1 /* 2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "asm/assembler.inline.hpp" 28 #include "code/SCCache.hpp" 29 #include "code/compiledIC.hpp" 30 #include "compiler/compiler_globals.hpp" 31 #include "compiler/disassembler.hpp" 32 #include "crc32c.h" 33 #include "gc/shared/barrierSet.hpp" 34 #include "gc/shared/barrierSetAssembler.hpp" 35 #include "gc/shared/collectedHeap.inline.hpp" 36 #include "gc/shared/tlab_globals.hpp" 37 #include "interpreter/bytecodeHistogram.hpp" 38 #include "interpreter/interpreter.hpp" 39 #include "jvm.h" 40 #include "memory/resourceArea.hpp" 41 #include "memory/universe.hpp" 42 #include "oops/accessDecorators.hpp" 43 #include "oops/compressedKlass.inline.hpp" 44 #include "oops/compressedOops.inline.hpp" 45 #include "oops/klass.inline.hpp" 46 #include "prims/methodHandles.hpp" 47 #include "runtime/continuation.hpp" 48 #include "runtime/interfaceSupport.inline.hpp" 49 #include "runtime/javaThread.hpp" 50 #include "runtime/jniHandles.hpp" 51 #include "runtime/objectMonitor.hpp" 52 #include "runtime/os.hpp" 53 #include "runtime/safepoint.hpp" 54 #include "runtime/safepointMechanism.hpp" 55 #include "runtime/sharedRuntime.hpp" 56 #include "runtime/stubRoutines.hpp" 57 #include "utilities/checkedCast.hpp" 58 #include "utilities/macros.hpp" 59 60 #ifdef PRODUCT 61 #define BLOCK_COMMENT(str) /* nothing */ 62 #define STOP(error) stop(error) 63 #else 64 #define BLOCK_COMMENT(str) block_comment(str) 65 #define STOP(error) block_comment(error); stop(error) 66 #endif 67 68 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 69 70 #ifdef ASSERT 71 bool AbstractAssembler::pd_check_instruction_mark() { return true; } 72 #endif 73 74 static const Assembler::Condition reverse[] = { 75 Assembler::noOverflow /* overflow = 0x0 */ , 76 Assembler::overflow /* noOverflow = 0x1 */ , 77 Assembler::aboveEqual /* carrySet = 0x2, below = 0x2 */ , 78 Assembler::below /* aboveEqual = 0x3, carryClear = 0x3 */ , 79 Assembler::notZero /* zero = 0x4, equal = 0x4 */ , 80 Assembler::zero /* notZero = 0x5, notEqual = 0x5 */ , 81 Assembler::above /* belowEqual = 0x6 */ , 82 Assembler::belowEqual /* above = 0x7 */ , 83 Assembler::positive /* negative = 0x8 */ , 84 Assembler::negative /* positive = 0x9 */ , 85 Assembler::noParity /* parity = 0xa */ , 86 Assembler::parity /* noParity = 0xb */ , 87 Assembler::greaterEqual /* less = 0xc */ , 88 Assembler::less /* greaterEqual = 0xd */ , 89 Assembler::greater /* lessEqual = 0xe */ , 90 Assembler::lessEqual /* greater = 0xf, */ 91 92 }; 93 94 95 // Implementation of MacroAssembler 96 97 // First all the versions that have distinct versions depending on 32/64 bit 98 // Unless the difference is trivial (1 line or so). 99 100 #ifndef _LP64 101 102 // 32bit versions 103 104 Address MacroAssembler::as_Address(AddressLiteral adr) { 105 return Address(adr.target(), adr.rspec()); 106 } 107 108 Address MacroAssembler::as_Address(ArrayAddress adr, Register rscratch) { 109 assert(rscratch == noreg, ""); 110 return Address::make_array(adr); 111 } 112 113 void MacroAssembler::call_VM_leaf_base(address entry_point, 114 int number_of_arguments) { 115 call(RuntimeAddress(entry_point)); 116 increment(rsp, number_of_arguments * wordSize); 117 } 118 119 void MacroAssembler::cmpklass(Address src1, Metadata* obj) { 120 cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate()); 121 } 122 123 124 void MacroAssembler::cmpklass(Register src1, Metadata* obj) { 125 cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate()); 126 } 127 128 void MacroAssembler::cmpoop(Address src1, jobject obj) { 129 cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate()); 130 } 131 132 void MacroAssembler::cmpoop(Register src1, jobject obj, Register rscratch) { 133 assert(rscratch == noreg, "redundant"); 134 cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate()); 135 } 136 137 void MacroAssembler::extend_sign(Register hi, Register lo) { 138 // According to Intel Doc. AP-526, "Integer Divide", p.18. 139 if (VM_Version::is_P6() && hi == rdx && lo == rax) { 140 cdql(); 141 } else { 142 movl(hi, lo); 143 sarl(hi, 31); 144 } 145 } 146 147 void MacroAssembler::jC2(Register tmp, Label& L) { 148 // set parity bit if FPU flag C2 is set (via rax) 149 save_rax(tmp); 150 fwait(); fnstsw_ax(); 151 sahf(); 152 restore_rax(tmp); 153 // branch 154 jcc(Assembler::parity, L); 155 } 156 157 void MacroAssembler::jnC2(Register tmp, Label& L) { 158 // set parity bit if FPU flag C2 is set (via rax) 159 save_rax(tmp); 160 fwait(); fnstsw_ax(); 161 sahf(); 162 restore_rax(tmp); 163 // branch 164 jcc(Assembler::noParity, L); 165 } 166 167 // 32bit can do a case table jump in one instruction but we no longer allow the base 168 // to be installed in the Address class 169 void MacroAssembler::jump(ArrayAddress entry, Register rscratch) { 170 assert(rscratch == noreg, "not needed"); 171 jmp(as_Address(entry, noreg)); 172 } 173 174 // Note: y_lo will be destroyed 175 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) { 176 // Long compare for Java (semantics as described in JVM spec.) 177 Label high, low, done; 178 179 cmpl(x_hi, y_hi); 180 jcc(Assembler::less, low); 181 jcc(Assembler::greater, high); 182 // x_hi is the return register 183 xorl(x_hi, x_hi); 184 cmpl(x_lo, y_lo); 185 jcc(Assembler::below, low); 186 jcc(Assembler::equal, done); 187 188 bind(high); 189 xorl(x_hi, x_hi); 190 increment(x_hi); 191 jmp(done); 192 193 bind(low); 194 xorl(x_hi, x_hi); 195 decrementl(x_hi); 196 197 bind(done); 198 } 199 200 void MacroAssembler::lea(Register dst, AddressLiteral src) { 201 mov_literal32(dst, (int32_t)src.target(), src.rspec()); 202 } 203 204 void MacroAssembler::lea(Address dst, AddressLiteral adr, Register rscratch) { 205 assert(rscratch == noreg, "not needed"); 206 207 // leal(dst, as_Address(adr)); 208 // see note in movl as to why we must use a move 209 mov_literal32(dst, (int32_t)adr.target(), adr.rspec()); 210 } 211 212 void MacroAssembler::leave() { 213 mov(rsp, rbp); 214 pop(rbp); 215 } 216 217 void MacroAssembler::lmul(int x_rsp_offset, int y_rsp_offset) { 218 // Multiplication of two Java long values stored on the stack 219 // as illustrated below. Result is in rdx:rax. 220 // 221 // rsp ---> [ ?? ] \ \ 222 // .... | y_rsp_offset | 223 // [ y_lo ] / (in bytes) | x_rsp_offset 224 // [ y_hi ] | (in bytes) 225 // .... | 226 // [ x_lo ] / 227 // [ x_hi ] 228 // .... 229 // 230 // Basic idea: lo(result) = lo(x_lo * y_lo) 231 // hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) + lo(x_lo * y_hi) 232 Address x_hi(rsp, x_rsp_offset + wordSize); Address x_lo(rsp, x_rsp_offset); 233 Address y_hi(rsp, y_rsp_offset + wordSize); Address y_lo(rsp, y_rsp_offset); 234 Label quick; 235 // load x_hi, y_hi and check if quick 236 // multiplication is possible 237 movl(rbx, x_hi); 238 movl(rcx, y_hi); 239 movl(rax, rbx); 240 orl(rbx, rcx); // rbx, = 0 <=> x_hi = 0 and y_hi = 0 241 jcc(Assembler::zero, quick); // if rbx, = 0 do quick multiply 242 // do full multiplication 243 // 1st step 244 mull(y_lo); // x_hi * y_lo 245 movl(rbx, rax); // save lo(x_hi * y_lo) in rbx, 246 // 2nd step 247 movl(rax, x_lo); 248 mull(rcx); // x_lo * y_hi 249 addl(rbx, rax); // add lo(x_lo * y_hi) to rbx, 250 // 3rd step 251 bind(quick); // note: rbx, = 0 if quick multiply! 252 movl(rax, x_lo); 253 mull(y_lo); // x_lo * y_lo 254 addl(rdx, rbx); // correct hi(x_lo * y_lo) 255 } 256 257 void MacroAssembler::lneg(Register hi, Register lo) { 258 negl(lo); 259 adcl(hi, 0); 260 negl(hi); 261 } 262 263 void MacroAssembler::lshl(Register hi, Register lo) { 264 // Java shift left long support (semantics as described in JVM spec., p.305) 265 // (basic idea for shift counts s >= n: x << s == (x << n) << (s - n)) 266 // shift value is in rcx ! 267 assert(hi != rcx, "must not use rcx"); 268 assert(lo != rcx, "must not use rcx"); 269 const Register s = rcx; // shift count 270 const int n = BitsPerWord; 271 Label L; 272 andl(s, 0x3f); // s := s & 0x3f (s < 0x40) 273 cmpl(s, n); // if (s < n) 274 jcc(Assembler::less, L); // else (s >= n) 275 movl(hi, lo); // x := x << n 276 xorl(lo, lo); 277 // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n! 278 bind(L); // s (mod n) < n 279 shldl(hi, lo); // x := x << s 280 shll(lo); 281 } 282 283 284 void MacroAssembler::lshr(Register hi, Register lo, bool sign_extension) { 285 // Java shift right long support (semantics as described in JVM spec., p.306 & p.310) 286 // (basic idea for shift counts s >= n: x >> s == (x >> n) >> (s - n)) 287 assert(hi != rcx, "must not use rcx"); 288 assert(lo != rcx, "must not use rcx"); 289 const Register s = rcx; // shift count 290 const int n = BitsPerWord; 291 Label L; 292 andl(s, 0x3f); // s := s & 0x3f (s < 0x40) 293 cmpl(s, n); // if (s < n) 294 jcc(Assembler::less, L); // else (s >= n) 295 movl(lo, hi); // x := x >> n 296 if (sign_extension) sarl(hi, 31); 297 else xorl(hi, hi); 298 // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n! 299 bind(L); // s (mod n) < n 300 shrdl(lo, hi); // x := x >> s 301 if (sign_extension) sarl(hi); 302 else shrl(hi); 303 } 304 305 void MacroAssembler::movoop(Register dst, jobject obj) { 306 mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate()); 307 } 308 309 void MacroAssembler::movoop(Address dst, jobject obj, Register rscratch) { 310 assert(rscratch == noreg, "redundant"); 311 mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate()); 312 } 313 314 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { 315 mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate()); 316 } 317 318 void MacroAssembler::mov_metadata(Address dst, Metadata* obj, Register rscratch) { 319 assert(rscratch == noreg, "redundant"); 320 mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate()); 321 } 322 323 void MacroAssembler::movptr(Register dst, AddressLiteral src) { 324 if (src.is_lval()) { 325 mov_literal32(dst, (intptr_t)src.target(), src.rspec()); 326 } else { 327 movl(dst, as_Address(src)); 328 } 329 } 330 331 void MacroAssembler::movptr(ArrayAddress dst, Register src, Register rscratch) { 332 assert(rscratch == noreg, "redundant"); 333 movl(as_Address(dst, noreg), src); 334 } 335 336 void MacroAssembler::movptr(Register dst, ArrayAddress src) { 337 movl(dst, as_Address(src, noreg)); 338 } 339 340 void MacroAssembler::movptr(Address dst, intptr_t src, Register rscratch) { 341 assert(rscratch == noreg, "redundant"); 342 movl(dst, src); 343 } 344 345 void MacroAssembler::pushoop(jobject obj, Register rscratch) { 346 assert(rscratch == noreg, "redundant"); 347 push_literal32((int32_t)obj, oop_Relocation::spec_for_immediate()); 348 } 349 350 void MacroAssembler::pushklass(Metadata* obj, Register rscratch) { 351 assert(rscratch == noreg, "redundant"); 352 push_literal32((int32_t)obj, metadata_Relocation::spec_for_immediate()); 353 } 354 355 void MacroAssembler::pushptr(AddressLiteral src, Register rscratch) { 356 assert(rscratch == noreg, "redundant"); 357 if (src.is_lval()) { 358 push_literal32((int32_t)src.target(), src.rspec()); 359 } else { 360 pushl(as_Address(src)); 361 } 362 } 363 364 static void pass_arg0(MacroAssembler* masm, Register arg) { 365 masm->push(arg); 366 } 367 368 static void pass_arg1(MacroAssembler* masm, Register arg) { 369 masm->push(arg); 370 } 371 372 static void pass_arg2(MacroAssembler* masm, Register arg) { 373 masm->push(arg); 374 } 375 376 static void pass_arg3(MacroAssembler* masm, Register arg) { 377 masm->push(arg); 378 } 379 380 #ifndef PRODUCT 381 extern "C" void findpc(intptr_t x); 382 #endif 383 384 void MacroAssembler::debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg) { 385 // In order to get locks to work, we need to fake a in_VM state 386 JavaThread* thread = JavaThread::current(); 387 JavaThreadState saved_state = thread->thread_state(); 388 thread->set_thread_state(_thread_in_vm); 389 if (ShowMessageBoxOnError) { 390 JavaThread* thread = JavaThread::current(); 391 JavaThreadState saved_state = thread->thread_state(); 392 thread->set_thread_state(_thread_in_vm); 393 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 394 ttyLocker ttyl; 395 BytecodeCounter::print(); 396 } 397 // To see where a verify_oop failed, get $ebx+40/X for this frame. 398 // This is the value of eip which points to where verify_oop will return. 399 if (os::message_box(msg, "Execution stopped, print registers?")) { 400 print_state32(rdi, rsi, rbp, rsp, rbx, rdx, rcx, rax, eip); 401 BREAKPOINT; 402 } 403 } 404 fatal("DEBUG MESSAGE: %s", msg); 405 } 406 407 void MacroAssembler::print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip) { 408 ttyLocker ttyl; 409 DebuggingContext debugging{}; 410 tty->print_cr("eip = 0x%08x", eip); 411 #ifndef PRODUCT 412 if ((WizardMode || Verbose) && PrintMiscellaneous) { 413 tty->cr(); 414 findpc(eip); 415 tty->cr(); 416 } 417 #endif 418 #define PRINT_REG(rax) \ 419 { tty->print("%s = ", #rax); os::print_location(tty, rax); } 420 PRINT_REG(rax); 421 PRINT_REG(rbx); 422 PRINT_REG(rcx); 423 PRINT_REG(rdx); 424 PRINT_REG(rdi); 425 PRINT_REG(rsi); 426 PRINT_REG(rbp); 427 PRINT_REG(rsp); 428 #undef PRINT_REG 429 // Print some words near top of staack. 430 int* dump_sp = (int*) rsp; 431 for (int col1 = 0; col1 < 8; col1++) { 432 tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 433 os::print_location(tty, *dump_sp++); 434 } 435 for (int row = 0; row < 16; row++) { 436 tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 437 for (int col = 0; col < 8; col++) { 438 tty->print(" 0x%08x", *dump_sp++); 439 } 440 tty->cr(); 441 } 442 // Print some instructions around pc: 443 Disassembler::decode((address)eip-64, (address)eip); 444 tty->print_cr("--------"); 445 Disassembler::decode((address)eip, (address)eip+32); 446 } 447 448 void MacroAssembler::stop(const char* msg) { 449 // push address of message 450 ExternalAddress message((address)msg); 451 pushptr(message.addr(), noreg); 452 { Label L; call(L, relocInfo::none); bind(L); } // push eip 453 pusha(); // push registers 454 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32))); 455 hlt(); 456 } 457 458 void MacroAssembler::warn(const char* msg) { 459 push_CPU_state(); 460 461 // push address of message 462 ExternalAddress message((address)msg); 463 pushptr(message.addr(), noreg); 464 465 call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning))); 466 addl(rsp, wordSize); // discard argument 467 pop_CPU_state(); 468 } 469 470 void MacroAssembler::print_state() { 471 { Label L; call(L, relocInfo::none); bind(L); } // push eip 472 pusha(); // push registers 473 474 push_CPU_state(); 475 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::print_state32))); 476 pop_CPU_state(); 477 478 popa(); 479 addl(rsp, wordSize); 480 } 481 482 #else // _LP64 483 484 // 64 bit versions 485 486 Address MacroAssembler::as_Address(AddressLiteral adr) { 487 // amd64 always does this as a pc-rel 488 // we can be absolute or disp based on the instruction type 489 // jmp/call are displacements others are absolute 490 assert(!adr.is_lval(), "must be rval"); 491 assert(reachable(adr), "must be"); 492 return Address(checked_cast<int32_t>(adr.target() - pc()), adr.target(), adr.reloc()); 493 494 } 495 496 Address MacroAssembler::as_Address(ArrayAddress adr, Register rscratch) { 497 AddressLiteral base = adr.base(); 498 lea(rscratch, base); 499 Address index = adr.index(); 500 assert(index._disp == 0, "must not have disp"); // maybe it can? 501 Address array(rscratch, index._index, index._scale, index._disp); 502 return array; 503 } 504 505 void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) { 506 Label L, E; 507 508 #ifdef _WIN64 509 // Windows always allocates space for it's register args 510 assert(num_args <= 4, "only register arguments supported"); 511 subq(rsp, frame::arg_reg_save_area_bytes); 512 #endif 513 514 // Align stack if necessary 515 testl(rsp, 15); 516 jcc(Assembler::zero, L); 517 518 subq(rsp, 8); 519 call(RuntimeAddress(entry_point)); 520 addq(rsp, 8); 521 jmp(E); 522 523 bind(L); 524 call(RuntimeAddress(entry_point)); 525 526 bind(E); 527 528 #ifdef _WIN64 529 // restore stack pointer 530 addq(rsp, frame::arg_reg_save_area_bytes); 531 #endif 532 533 } 534 535 void MacroAssembler::cmp64(Register src1, AddressLiteral src2, Register rscratch) { 536 assert(!src2.is_lval(), "should use cmpptr"); 537 assert(rscratch != noreg || always_reachable(src2), "missing"); 538 539 if (reachable(src2)) { 540 cmpq(src1, as_Address(src2)); 541 } else { 542 lea(rscratch, src2); 543 Assembler::cmpq(src1, Address(rscratch, 0)); 544 } 545 } 546 547 int MacroAssembler::corrected_idivq(Register reg) { 548 // Full implementation of Java ldiv and lrem; checks for special 549 // case as described in JVM spec., p.243 & p.271. The function 550 // returns the (pc) offset of the idivl instruction - may be needed 551 // for implicit exceptions. 552 // 553 // normal case special case 554 // 555 // input : rax: dividend min_long 556 // reg: divisor (may not be eax/edx) -1 557 // 558 // output: rax: quotient (= rax idiv reg) min_long 559 // rdx: remainder (= rax irem reg) 0 560 assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register"); 561 static const int64_t min_long = 0x8000000000000000; 562 Label normal_case, special_case; 563 564 // check for special case 565 cmp64(rax, ExternalAddress((address) &min_long), rdx /*rscratch*/); 566 jcc(Assembler::notEqual, normal_case); 567 xorl(rdx, rdx); // prepare rdx for possible special case (where 568 // remainder = 0) 569 cmpq(reg, -1); 570 jcc(Assembler::equal, special_case); 571 572 // handle normal case 573 bind(normal_case); 574 cdqq(); 575 int idivq_offset = offset(); 576 idivq(reg); 577 578 // normal and special case exit 579 bind(special_case); 580 581 return idivq_offset; 582 } 583 584 void MacroAssembler::decrementq(Register reg, int value) { 585 if (value == min_jint) { subq(reg, value); return; } 586 if (value < 0) { incrementq(reg, -value); return; } 587 if (value == 0) { ; return; } 588 if (value == 1 && UseIncDec) { decq(reg) ; return; } 589 /* else */ { subq(reg, value) ; return; } 590 } 591 592 void MacroAssembler::decrementq(Address dst, int value) { 593 if (value == min_jint) { subq(dst, value); return; } 594 if (value < 0) { incrementq(dst, -value); return; } 595 if (value == 0) { ; return; } 596 if (value == 1 && UseIncDec) { decq(dst) ; return; } 597 /* else */ { subq(dst, value) ; return; } 598 } 599 600 void MacroAssembler::incrementq(AddressLiteral dst, Register rscratch) { 601 assert(rscratch != noreg || always_reachable(dst), "missing"); 602 603 if (reachable(dst)) { 604 incrementq(as_Address(dst)); 605 } else { 606 lea(rscratch, dst); 607 incrementq(Address(rscratch, 0)); 608 } 609 } 610 611 void MacroAssembler::incrementq(Register reg, int value) { 612 if (value == min_jint) { addq(reg, value); return; } 613 if (value < 0) { decrementq(reg, -value); return; } 614 if (value == 0) { ; return; } 615 if (value == 1 && UseIncDec) { incq(reg) ; return; } 616 /* else */ { addq(reg, value) ; return; } 617 } 618 619 void MacroAssembler::incrementq(Address dst, int value) { 620 if (value == min_jint) { addq(dst, value); return; } 621 if (value < 0) { decrementq(dst, -value); return; } 622 if (value == 0) { ; return; } 623 if (value == 1 && UseIncDec) { incq(dst) ; return; } 624 /* else */ { addq(dst, value) ; return; } 625 } 626 627 // 32bit can do a case table jump in one instruction but we no longer allow the base 628 // to be installed in the Address class 629 void MacroAssembler::jump(ArrayAddress entry, Register rscratch) { 630 lea(rscratch, entry.base()); 631 Address dispatch = entry.index(); 632 assert(dispatch._base == noreg, "must be"); 633 dispatch._base = rscratch; 634 jmp(dispatch); 635 } 636 637 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) { 638 ShouldNotReachHere(); // 64bit doesn't use two regs 639 cmpq(x_lo, y_lo); 640 } 641 642 void MacroAssembler::lea(Register dst, AddressLiteral src) { 643 mov_literal64(dst, (intptr_t)src.target(), src.rspec()); 644 } 645 646 void MacroAssembler::lea(Address dst, AddressLiteral adr, Register rscratch) { 647 lea(rscratch, adr); 648 movptr(dst, rscratch); 649 } 650 651 void MacroAssembler::leave() { 652 // %%% is this really better? Why not on 32bit too? 653 emit_int8((unsigned char)0xC9); // LEAVE 654 } 655 656 void MacroAssembler::lneg(Register hi, Register lo) { 657 ShouldNotReachHere(); // 64bit doesn't use two regs 658 negq(lo); 659 } 660 661 void MacroAssembler::movoop(Register dst, jobject obj) { 662 mov_literal64(dst, (intptr_t)obj, oop_Relocation::spec_for_immediate()); 663 } 664 665 void MacroAssembler::movoop(Address dst, jobject obj, Register rscratch) { 666 mov_literal64(rscratch, (intptr_t)obj, oop_Relocation::spec_for_immediate()); 667 movq(dst, rscratch); 668 } 669 670 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { 671 mov_literal64(dst, (intptr_t)obj, metadata_Relocation::spec_for_immediate()); 672 } 673 674 void MacroAssembler::mov_metadata(Address dst, Metadata* obj, Register rscratch) { 675 mov_literal64(rscratch, (intptr_t)obj, metadata_Relocation::spec_for_immediate()); 676 movq(dst, rscratch); 677 } 678 679 void MacroAssembler::movptr(Register dst, AddressLiteral src) { 680 if (src.is_lval()) { 681 mov_literal64(dst, (intptr_t)src.target(), src.rspec()); 682 } else { 683 if (reachable(src)) { 684 movq(dst, as_Address(src)); 685 } else { 686 lea(dst, src); 687 movq(dst, Address(dst, 0)); 688 } 689 } 690 } 691 692 void MacroAssembler::movptr(ArrayAddress dst, Register src, Register rscratch) { 693 movq(as_Address(dst, rscratch), src); 694 } 695 696 void MacroAssembler::movptr(Register dst, ArrayAddress src) { 697 movq(dst, as_Address(src, dst /*rscratch*/)); 698 } 699 700 // src should NEVER be a real pointer. Use AddressLiteral for true pointers 701 void MacroAssembler::movptr(Address dst, intptr_t src, Register rscratch) { 702 if (is_simm32(src)) { 703 movptr(dst, checked_cast<int32_t>(src)); 704 } else { 705 mov64(rscratch, src); 706 movq(dst, rscratch); 707 } 708 } 709 710 void MacroAssembler::pushoop(jobject obj, Register rscratch) { 711 movoop(rscratch, obj); 712 push(rscratch); 713 } 714 715 void MacroAssembler::pushklass(Metadata* obj, Register rscratch) { 716 mov_metadata(rscratch, obj); 717 push(rscratch); 718 } 719 720 void MacroAssembler::pushptr(AddressLiteral src, Register rscratch) { 721 lea(rscratch, src); 722 if (src.is_lval()) { 723 push(rscratch); 724 } else { 725 pushq(Address(rscratch, 0)); 726 } 727 } 728 729 void MacroAssembler::reset_last_Java_frame(bool clear_fp) { 730 reset_last_Java_frame(r15_thread, clear_fp); 731 } 732 733 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 734 Register last_java_fp, 735 address last_java_pc, 736 Register rscratch) { 737 set_last_Java_frame(r15_thread, last_java_sp, last_java_fp, last_java_pc, rscratch); 738 } 739 740 static void pass_arg0(MacroAssembler* masm, Register arg) { 741 if (c_rarg0 != arg ) { 742 masm->mov(c_rarg0, arg); 743 } 744 } 745 746 static void pass_arg1(MacroAssembler* masm, Register arg) { 747 if (c_rarg1 != arg ) { 748 masm->mov(c_rarg1, arg); 749 } 750 } 751 752 static void pass_arg2(MacroAssembler* masm, Register arg) { 753 if (c_rarg2 != arg ) { 754 masm->mov(c_rarg2, arg); 755 } 756 } 757 758 static void pass_arg3(MacroAssembler* masm, Register arg) { 759 if (c_rarg3 != arg ) { 760 masm->mov(c_rarg3, arg); 761 } 762 } 763 764 void MacroAssembler::stop(const char* msg) { 765 if (ShowMessageBoxOnError) { 766 address rip = pc(); 767 pusha(); // get regs on stack 768 lea(c_rarg1, InternalAddress(rip)); 769 movq(c_rarg2, rsp); // pass pointer to regs array 770 } 771 lea(c_rarg0, ExternalAddress((address) msg)); 772 andq(rsp, -16); // align stack as required by ABI 773 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); 774 hlt(); 775 SCCache::add_C_string(msg); 776 } 777 778 void MacroAssembler::warn(const char* msg) { 779 push(rbp); 780 movq(rbp, rsp); 781 andq(rsp, -16); // align stack as required by push_CPU_state and call 782 push_CPU_state(); // keeps alignment at 16 bytes 783 784 lea(c_rarg0, ExternalAddress((address) msg)); 785 call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning))); 786 787 pop_CPU_state(); 788 mov(rsp, rbp); 789 pop(rbp); 790 } 791 792 void MacroAssembler::print_state() { 793 address rip = pc(); 794 pusha(); // get regs on stack 795 push(rbp); 796 movq(rbp, rsp); 797 andq(rsp, -16); // align stack as required by push_CPU_state and call 798 push_CPU_state(); // keeps alignment at 16 bytes 799 800 lea(c_rarg0, InternalAddress(rip)); 801 lea(c_rarg1, Address(rbp, wordSize)); // pass pointer to regs array 802 call_VM_leaf(CAST_FROM_FN_PTR(address, MacroAssembler::print_state64), c_rarg0, c_rarg1); 803 804 pop_CPU_state(); 805 mov(rsp, rbp); 806 pop(rbp); 807 popa(); 808 } 809 810 #ifndef PRODUCT 811 extern "C" void findpc(intptr_t x); 812 #endif 813 814 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) { 815 // In order to get locks to work, we need to fake a in_VM state 816 if (ShowMessageBoxOnError) { 817 JavaThread* thread = JavaThread::current(); 818 JavaThreadState saved_state = thread->thread_state(); 819 thread->set_thread_state(_thread_in_vm); 820 #ifndef PRODUCT 821 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 822 ttyLocker ttyl; 823 BytecodeCounter::print(); 824 } 825 #endif 826 // To see where a verify_oop failed, get $ebx+40/X for this frame. 827 // XXX correct this offset for amd64 828 // This is the value of eip which points to where verify_oop will return. 829 if (os::message_box(msg, "Execution stopped, print registers?")) { 830 print_state64(pc, regs); 831 BREAKPOINT; 832 } 833 } 834 fatal("DEBUG MESSAGE: %s", msg); 835 } 836 837 void MacroAssembler::print_state64(int64_t pc, int64_t regs[]) { 838 ttyLocker ttyl; 839 DebuggingContext debugging{}; 840 tty->print_cr("rip = 0x%016lx", (intptr_t)pc); 841 #ifndef PRODUCT 842 tty->cr(); 843 findpc(pc); 844 tty->cr(); 845 #endif 846 #define PRINT_REG(rax, value) \ 847 { tty->print("%s = ", #rax); os::print_location(tty, value); } 848 PRINT_REG(rax, regs[15]); 849 PRINT_REG(rbx, regs[12]); 850 PRINT_REG(rcx, regs[14]); 851 PRINT_REG(rdx, regs[13]); 852 PRINT_REG(rdi, regs[8]); 853 PRINT_REG(rsi, regs[9]); 854 PRINT_REG(rbp, regs[10]); 855 // rsp is actually not stored by pusha(), compute the old rsp from regs (rsp after pusha): regs + 16 = old rsp 856 PRINT_REG(rsp, (intptr_t)(®s[16])); 857 PRINT_REG(r8 , regs[7]); 858 PRINT_REG(r9 , regs[6]); 859 PRINT_REG(r10, regs[5]); 860 PRINT_REG(r11, regs[4]); 861 PRINT_REG(r12, regs[3]); 862 PRINT_REG(r13, regs[2]); 863 PRINT_REG(r14, regs[1]); 864 PRINT_REG(r15, regs[0]); 865 #undef PRINT_REG 866 // Print some words near the top of the stack. 867 int64_t* rsp = ®s[16]; 868 int64_t* dump_sp = rsp; 869 for (int col1 = 0; col1 < 8; col1++) { 870 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 871 os::print_location(tty, *dump_sp++); 872 } 873 for (int row = 0; row < 25; row++) { 874 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 875 for (int col = 0; col < 4; col++) { 876 tty->print(" 0x%016lx", (intptr_t)*dump_sp++); 877 } 878 tty->cr(); 879 } 880 // Print some instructions around pc: 881 Disassembler::decode((address)pc-64, (address)pc); 882 tty->print_cr("--------"); 883 Disassembler::decode((address)pc, (address)pc+32); 884 } 885 886 // The java_calling_convention describes stack locations as ideal slots on 887 // a frame with no abi restrictions. Since we must observe abi restrictions 888 // (like the placement of the register window) the slots must be biased by 889 // the following value. 890 static int reg2offset_in(VMReg r) { 891 // Account for saved rbp and return address 892 // This should really be in_preserve_stack_slots 893 return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size; 894 } 895 896 static int reg2offset_out(VMReg r) { 897 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 898 } 899 900 // A long move 901 void MacroAssembler::long_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 902 903 // The calling conventions assures us that each VMregpair is either 904 // all really one physical register or adjacent stack slots. 905 906 if (src.is_single_phys_reg() ) { 907 if (dst.is_single_phys_reg()) { 908 if (dst.first() != src.first()) { 909 mov(dst.first()->as_Register(), src.first()->as_Register()); 910 } 911 } else { 912 assert(dst.is_single_reg(), "not a stack pair: (%s, %s), (%s, %s)", 913 src.first()->name(), src.second()->name(), dst.first()->name(), dst.second()->name()); 914 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_Register()); 915 } 916 } else if (dst.is_single_phys_reg()) { 917 assert(src.is_single_reg(), "not a stack pair"); 918 movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 919 } else { 920 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs"); 921 movq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 922 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 923 } 924 } 925 926 // A double move 927 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 928 929 // The calling conventions assures us that each VMregpair is either 930 // all really one physical register or adjacent stack slots. 931 932 if (src.is_single_phys_reg() ) { 933 if (dst.is_single_phys_reg()) { 934 // In theory these overlap but the ordering is such that this is likely a nop 935 if ( src.first() != dst.first()) { 936 movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister()); 937 } 938 } else { 939 assert(dst.is_single_reg(), "not a stack pair"); 940 movdbl(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_XMMRegister()); 941 } 942 } else if (dst.is_single_phys_reg()) { 943 assert(src.is_single_reg(), "not a stack pair"); 944 movdbl(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 945 } else { 946 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs"); 947 movq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 948 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 949 } 950 } 951 952 953 // A float arg may have to do float reg int reg conversion 954 void MacroAssembler::float_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 955 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move"); 956 957 // The calling conventions assures us that each VMregpair is either 958 // all really one physical register or adjacent stack slots. 959 960 if (src.first()->is_stack()) { 961 if (dst.first()->is_stack()) { 962 movl(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 963 movptr(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 964 } else { 965 // stack to reg 966 assert(dst.first()->is_XMMRegister(), "only expect xmm registers as parameters"); 967 movflt(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 968 } 969 } else if (dst.first()->is_stack()) { 970 // reg to stack 971 assert(src.first()->is_XMMRegister(), "only expect xmm registers as parameters"); 972 movflt(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_XMMRegister()); 973 } else { 974 // reg to reg 975 // In theory these overlap but the ordering is such that this is likely a nop 976 if ( src.first() != dst.first()) { 977 movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister()); 978 } 979 } 980 } 981 982 // On 64 bit we will store integer like items to the stack as 983 // 64 bits items (x86_32/64 abi) even though java would only store 984 // 32bits for a parameter. On 32bit it will simply be 32 bits 985 // So this routine will do 32->32 on 32bit and 32->64 on 64bit 986 void MacroAssembler::move32_64(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 987 if (src.first()->is_stack()) { 988 if (dst.first()->is_stack()) { 989 // stack to stack 990 movslq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 991 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 992 } else { 993 // stack to reg 994 movslq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 995 } 996 } else if (dst.first()->is_stack()) { 997 // reg to stack 998 // Do we really have to sign extend??? 999 // __ movslq(src.first()->as_Register(), src.first()->as_Register()); 1000 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_Register()); 1001 } else { 1002 // Do we really have to sign extend??? 1003 // __ movslq(dst.first()->as_Register(), src.first()->as_Register()); 1004 if (dst.first() != src.first()) { 1005 movq(dst.first()->as_Register(), src.first()->as_Register()); 1006 } 1007 } 1008 } 1009 1010 void MacroAssembler::move_ptr(VMRegPair src, VMRegPair dst) { 1011 if (src.first()->is_stack()) { 1012 if (dst.first()->is_stack()) { 1013 // stack to stack 1014 movq(rax, Address(rbp, reg2offset_in(src.first()))); 1015 movq(Address(rsp, reg2offset_out(dst.first())), rax); 1016 } else { 1017 // stack to reg 1018 movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()))); 1019 } 1020 } else if (dst.first()->is_stack()) { 1021 // reg to stack 1022 movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register()); 1023 } else { 1024 if (dst.first() != src.first()) { 1025 movq(dst.first()->as_Register(), src.first()->as_Register()); 1026 } 1027 } 1028 } 1029 1030 // An oop arg. Must pass a handle not the oop itself 1031 void MacroAssembler::object_move(OopMap* map, 1032 int oop_handle_offset, 1033 int framesize_in_slots, 1034 VMRegPair src, 1035 VMRegPair dst, 1036 bool is_receiver, 1037 int* receiver_offset) { 1038 1039 // must pass a handle. First figure out the location we use as a handle 1040 1041 Register rHandle = dst.first()->is_stack() ? rax : dst.first()->as_Register(); 1042 1043 // See if oop is null if it is we need no handle 1044 1045 if (src.first()->is_stack()) { 1046 1047 // Oop is already on the stack as an argument 1048 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 1049 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots)); 1050 if (is_receiver) { 1051 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size; 1052 } 1053 1054 cmpptr(Address(rbp, reg2offset_in(src.first())), NULL_WORD); 1055 lea(rHandle, Address(rbp, reg2offset_in(src.first()))); 1056 // conditionally move a null 1057 cmovptr(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first()))); 1058 } else { 1059 1060 // Oop is in a register we must store it to the space we reserve 1061 // on the stack for oop_handles and pass a handle if oop is non-null 1062 1063 const Register rOop = src.first()->as_Register(); 1064 int oop_slot; 1065 if (rOop == j_rarg0) 1066 oop_slot = 0; 1067 else if (rOop == j_rarg1) 1068 oop_slot = 1; 1069 else if (rOop == j_rarg2) 1070 oop_slot = 2; 1071 else if (rOop == j_rarg3) 1072 oop_slot = 3; 1073 else if (rOop == j_rarg4) 1074 oop_slot = 4; 1075 else { 1076 assert(rOop == j_rarg5, "wrong register"); 1077 oop_slot = 5; 1078 } 1079 1080 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset; 1081 int offset = oop_slot*VMRegImpl::stack_slot_size; 1082 1083 map->set_oop(VMRegImpl::stack2reg(oop_slot)); 1084 // Store oop in handle area, may be null 1085 movptr(Address(rsp, offset), rOop); 1086 if (is_receiver) { 1087 *receiver_offset = offset; 1088 } 1089 1090 cmpptr(rOop, NULL_WORD); 1091 lea(rHandle, Address(rsp, offset)); 1092 // conditionally move a null from the handle area where it was just stored 1093 cmovptr(Assembler::equal, rHandle, Address(rsp, offset)); 1094 } 1095 1096 // If arg is on the stack then place it otherwise it is already in correct reg. 1097 if (dst.first()->is_stack()) { 1098 movptr(Address(rsp, reg2offset_out(dst.first())), rHandle); 1099 } 1100 } 1101 1102 #endif // _LP64 1103 1104 // Now versions that are common to 32/64 bit 1105 1106 void MacroAssembler::addptr(Register dst, int32_t imm32) { 1107 LP64_ONLY(addq(dst, imm32)) NOT_LP64(addl(dst, imm32)); 1108 } 1109 1110 void MacroAssembler::addptr(Register dst, Register src) { 1111 LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); 1112 } 1113 1114 void MacroAssembler::addptr(Address dst, Register src) { 1115 LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); 1116 } 1117 1118 void MacroAssembler::addsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 1119 assert(rscratch != noreg || always_reachable(src), "missing"); 1120 1121 if (reachable(src)) { 1122 Assembler::addsd(dst, as_Address(src)); 1123 } else { 1124 lea(rscratch, src); 1125 Assembler::addsd(dst, Address(rscratch, 0)); 1126 } 1127 } 1128 1129 void MacroAssembler::addss(XMMRegister dst, AddressLiteral src, Register rscratch) { 1130 assert(rscratch != noreg || always_reachable(src), "missing"); 1131 1132 if (reachable(src)) { 1133 addss(dst, as_Address(src)); 1134 } else { 1135 lea(rscratch, src); 1136 addss(dst, Address(rscratch, 0)); 1137 } 1138 } 1139 1140 void MacroAssembler::addpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 1141 assert(rscratch != noreg || always_reachable(src), "missing"); 1142 1143 if (reachable(src)) { 1144 Assembler::addpd(dst, as_Address(src)); 1145 } else { 1146 lea(rscratch, src); 1147 Assembler::addpd(dst, Address(rscratch, 0)); 1148 } 1149 } 1150 1151 // See 8273459. Function for ensuring 64-byte alignment, intended for stubs only. 1152 // Stub code is generated once and never copied. 1153 // NMethods can't use this because they get copied and we can't force alignment > 32 bytes. 1154 void MacroAssembler::align64() { 1155 align(64, (unsigned long long) pc()); 1156 } 1157 1158 void MacroAssembler::align32() { 1159 align(32, (unsigned long long) pc()); 1160 } 1161 1162 void MacroAssembler::align(int modulus) { 1163 // 8273459: Ensure alignment is possible with current segment alignment 1164 assert(modulus <= CodeEntryAlignment, "Alignment must be <= CodeEntryAlignment"); 1165 align(modulus, offset()); 1166 } 1167 1168 void MacroAssembler::align(int modulus, int target) { 1169 if (target % modulus != 0) { 1170 nop(modulus - (target % modulus)); 1171 } 1172 } 1173 1174 void MacroAssembler::push_f(XMMRegister r) { 1175 subptr(rsp, wordSize); 1176 movflt(Address(rsp, 0), r); 1177 } 1178 1179 void MacroAssembler::pop_f(XMMRegister r) { 1180 movflt(r, Address(rsp, 0)); 1181 addptr(rsp, wordSize); 1182 } 1183 1184 void MacroAssembler::push_d(XMMRegister r) { 1185 subptr(rsp, 2 * wordSize); 1186 movdbl(Address(rsp, 0), r); 1187 } 1188 1189 void MacroAssembler::pop_d(XMMRegister r) { 1190 movdbl(r, Address(rsp, 0)); 1191 addptr(rsp, 2 * Interpreter::stackElementSize); 1192 } 1193 1194 void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 1195 // Used in sign-masking with aligned address. 1196 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 1197 assert(rscratch != noreg || always_reachable(src), "missing"); 1198 1199 if (reachable(src)) { 1200 Assembler::andpd(dst, as_Address(src)); 1201 } else { 1202 lea(rscratch, src); 1203 Assembler::andpd(dst, Address(rscratch, 0)); 1204 } 1205 } 1206 1207 void MacroAssembler::andps(XMMRegister dst, AddressLiteral src, Register rscratch) { 1208 // Used in sign-masking with aligned address. 1209 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 1210 assert(rscratch != noreg || always_reachable(src), "missing"); 1211 1212 if (reachable(src)) { 1213 Assembler::andps(dst, as_Address(src)); 1214 } else { 1215 lea(rscratch, src); 1216 Assembler::andps(dst, Address(rscratch, 0)); 1217 } 1218 } 1219 1220 void MacroAssembler::andptr(Register dst, int32_t imm32) { 1221 LP64_ONLY(andq(dst, imm32)) NOT_LP64(andl(dst, imm32)); 1222 } 1223 1224 #ifdef _LP64 1225 void MacroAssembler::andq(Register dst, AddressLiteral src, Register rscratch) { 1226 assert(rscratch != noreg || always_reachable(src), "missing"); 1227 1228 if (reachable(src)) { 1229 andq(dst, as_Address(src)); 1230 } else { 1231 lea(rscratch, src); 1232 andq(dst, Address(rscratch, 0)); 1233 } 1234 } 1235 #endif 1236 1237 void MacroAssembler::atomic_incl(Address counter_addr) { 1238 lock(); 1239 incrementl(counter_addr); 1240 } 1241 1242 void MacroAssembler::atomic_incl(AddressLiteral counter_addr, Register rscratch) { 1243 assert(rscratch != noreg || always_reachable(counter_addr), "missing"); 1244 1245 if (reachable(counter_addr)) { 1246 atomic_incl(as_Address(counter_addr)); 1247 } else { 1248 lea(rscratch, counter_addr); 1249 atomic_incl(Address(rscratch, 0)); 1250 } 1251 } 1252 1253 #ifdef _LP64 1254 void MacroAssembler::atomic_incq(Address counter_addr) { 1255 lock(); 1256 incrementq(counter_addr); 1257 } 1258 1259 void MacroAssembler::atomic_incq(AddressLiteral counter_addr, Register rscratch) { 1260 assert(rscratch != noreg || always_reachable(counter_addr), "missing"); 1261 1262 if (reachable(counter_addr)) { 1263 atomic_incq(as_Address(counter_addr)); 1264 } else { 1265 lea(rscratch, counter_addr); 1266 atomic_incq(Address(rscratch, 0)); 1267 } 1268 } 1269 #endif 1270 1271 // Writes to stack successive pages until offset reached to check for 1272 // stack overflow + shadow pages. This clobbers tmp. 1273 void MacroAssembler::bang_stack_size(Register size, Register tmp) { 1274 movptr(tmp, rsp); 1275 // Bang stack for total size given plus shadow page size. 1276 // Bang one page at a time because large size can bang beyond yellow and 1277 // red zones. 1278 Label loop; 1279 bind(loop); 1280 movl(Address(tmp, (-(int)os::vm_page_size())), size ); 1281 subptr(tmp, (int)os::vm_page_size()); 1282 subl(size, (int)os::vm_page_size()); 1283 jcc(Assembler::greater, loop); 1284 1285 // Bang down shadow pages too. 1286 // At this point, (tmp-0) is the last address touched, so don't 1287 // touch it again. (It was touched as (tmp-pagesize) but then tmp 1288 // was post-decremented.) Skip this address by starting at i=1, and 1289 // touch a few more pages below. N.B. It is important to touch all 1290 // the way down including all pages in the shadow zone. 1291 for (int i = 1; i < ((int)StackOverflow::stack_shadow_zone_size() / (int)os::vm_page_size()); i++) { 1292 // this could be any sized move but this is can be a debugging crumb 1293 // so the bigger the better. 1294 movptr(Address(tmp, (-i*(int)os::vm_page_size())), size ); 1295 } 1296 } 1297 1298 void MacroAssembler::reserved_stack_check() { 1299 // testing if reserved zone needs to be enabled 1300 Label no_reserved_zone_enabling; 1301 Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); 1302 NOT_LP64(get_thread(rsi);) 1303 1304 cmpptr(rsp, Address(thread, JavaThread::reserved_stack_activation_offset())); 1305 jcc(Assembler::below, no_reserved_zone_enabling); 1306 1307 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), thread); 1308 jump(RuntimeAddress(StubRoutines::throw_delayed_StackOverflowError_entry())); 1309 should_not_reach_here(); 1310 1311 bind(no_reserved_zone_enabling); 1312 } 1313 1314 void MacroAssembler::c2bool(Register x) { 1315 // implements x == 0 ? 0 : 1 1316 // note: must only look at least-significant byte of x 1317 // since C-style booleans are stored in one byte 1318 // only! (was bug) 1319 andl(x, 0xFF); 1320 setb(Assembler::notZero, x); 1321 } 1322 1323 // Wouldn't need if AddressLiteral version had new name 1324 void MacroAssembler::call(Label& L, relocInfo::relocType rtype) { 1325 Assembler::call(L, rtype); 1326 } 1327 1328 void MacroAssembler::call(Register entry) { 1329 Assembler::call(entry); 1330 } 1331 1332 void MacroAssembler::call(AddressLiteral entry, Register rscratch) { 1333 assert(rscratch != noreg || always_reachable(entry), "missing"); 1334 1335 if (reachable(entry)) { 1336 Assembler::call_literal(entry.target(), entry.rspec()); 1337 } else { 1338 lea(rscratch, entry); 1339 Assembler::call(rscratch); 1340 } 1341 } 1342 1343 void MacroAssembler::ic_call(address entry, jint method_index) { 1344 RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index); 1345 #ifdef _LP64 1346 // Needs full 64-bit immediate for later patching. 1347 mov64(rax, (int64_t)Universe::non_oop_word()); 1348 #else 1349 movptr(rax, (intptr_t)Universe::non_oop_word()); 1350 #endif 1351 call(AddressLiteral(entry, rh)); 1352 } 1353 1354 int MacroAssembler::ic_check_size() { 1355 return LP64_ONLY(14) NOT_LP64(12); 1356 } 1357 1358 int MacroAssembler::ic_check(int end_alignment) { 1359 Register receiver = LP64_ONLY(j_rarg0) NOT_LP64(rcx); 1360 Register data = rax; 1361 Register temp = LP64_ONLY(rscratch1) NOT_LP64(rbx); 1362 1363 // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed 1364 // before the inline cache check, so we don't have to execute any nop instructions when dispatching 1365 // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align 1366 // before the inline cache check here, and not after 1367 align(end_alignment, offset() + ic_check_size()); 1368 1369 int uep_offset = offset(); 1370 1371 if (UseCompressedClassPointers) { 1372 movl(temp, Address(receiver, oopDesc::klass_offset_in_bytes())); 1373 cmpl(temp, Address(data, CompiledICData::speculated_klass_offset())); 1374 } else { 1375 movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes())); 1376 cmpptr(temp, Address(data, CompiledICData::speculated_klass_offset())); 1377 } 1378 1379 // if inline cache check fails, then jump to runtime routine 1380 jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 1381 assert((offset() % end_alignment) == 0, "Misaligned verified entry point"); 1382 1383 return uep_offset; 1384 } 1385 1386 void MacroAssembler::emit_static_call_stub() { 1387 // Static stub relocation also tags the Method* in the code-stream. 1388 mov_metadata(rbx, (Metadata*) nullptr); // Method is zapped till fixup time. 1389 // This is recognized as unresolved by relocs/nativeinst/ic code. 1390 jump(RuntimeAddress(pc())); 1391 } 1392 1393 // Implementation of call_VM versions 1394 1395 void MacroAssembler::call_VM(Register oop_result, 1396 address entry_point, 1397 bool check_exceptions) { 1398 Label C, E; 1399 call(C, relocInfo::none); 1400 jmp(E); 1401 1402 bind(C); 1403 call_VM_helper(oop_result, entry_point, 0, check_exceptions); 1404 ret(0); 1405 1406 bind(E); 1407 } 1408 1409 void MacroAssembler::call_VM(Register oop_result, 1410 address entry_point, 1411 Register arg_1, 1412 bool check_exceptions) { 1413 Label C, E; 1414 call(C, relocInfo::none); 1415 jmp(E); 1416 1417 bind(C); 1418 pass_arg1(this, arg_1); 1419 call_VM_helper(oop_result, entry_point, 1, check_exceptions); 1420 ret(0); 1421 1422 bind(E); 1423 } 1424 1425 void MacroAssembler::call_VM(Register oop_result, 1426 address entry_point, 1427 Register arg_1, 1428 Register arg_2, 1429 bool check_exceptions) { 1430 Label C, E; 1431 call(C, relocInfo::none); 1432 jmp(E); 1433 1434 bind(C); 1435 1436 LP64_ONLY(assert_different_registers(arg_1, c_rarg2)); 1437 1438 pass_arg2(this, arg_2); 1439 pass_arg1(this, arg_1); 1440 call_VM_helper(oop_result, entry_point, 2, check_exceptions); 1441 ret(0); 1442 1443 bind(E); 1444 } 1445 1446 void MacroAssembler::call_VM(Register oop_result, 1447 address entry_point, 1448 Register arg_1, 1449 Register arg_2, 1450 Register arg_3, 1451 bool check_exceptions) { 1452 Label C, E; 1453 call(C, relocInfo::none); 1454 jmp(E); 1455 1456 bind(C); 1457 1458 LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3)); 1459 LP64_ONLY(assert_different_registers(arg_2, c_rarg3)); 1460 pass_arg3(this, arg_3); 1461 pass_arg2(this, arg_2); 1462 pass_arg1(this, arg_1); 1463 call_VM_helper(oop_result, entry_point, 3, check_exceptions); 1464 ret(0); 1465 1466 bind(E); 1467 } 1468 1469 void MacroAssembler::call_VM(Register oop_result, 1470 Register last_java_sp, 1471 address entry_point, 1472 int number_of_arguments, 1473 bool check_exceptions) { 1474 Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg); 1475 call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions); 1476 } 1477 1478 void MacroAssembler::call_VM(Register oop_result, 1479 Register last_java_sp, 1480 address entry_point, 1481 Register arg_1, 1482 bool check_exceptions) { 1483 pass_arg1(this, arg_1); 1484 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 1485 } 1486 1487 void MacroAssembler::call_VM(Register oop_result, 1488 Register last_java_sp, 1489 address entry_point, 1490 Register arg_1, 1491 Register arg_2, 1492 bool check_exceptions) { 1493 1494 LP64_ONLY(assert_different_registers(arg_1, c_rarg2)); 1495 pass_arg2(this, arg_2); 1496 pass_arg1(this, arg_1); 1497 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 1498 } 1499 1500 void MacroAssembler::call_VM(Register oop_result, 1501 Register last_java_sp, 1502 address entry_point, 1503 Register arg_1, 1504 Register arg_2, 1505 Register arg_3, 1506 bool check_exceptions) { 1507 LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3)); 1508 LP64_ONLY(assert_different_registers(arg_2, c_rarg3)); 1509 pass_arg3(this, arg_3); 1510 pass_arg2(this, arg_2); 1511 pass_arg1(this, arg_1); 1512 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 1513 } 1514 1515 void MacroAssembler::super_call_VM(Register oop_result, 1516 Register last_java_sp, 1517 address entry_point, 1518 int number_of_arguments, 1519 bool check_exceptions) { 1520 Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg); 1521 MacroAssembler::call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions); 1522 } 1523 1524 void MacroAssembler::super_call_VM(Register oop_result, 1525 Register last_java_sp, 1526 address entry_point, 1527 Register arg_1, 1528 bool check_exceptions) { 1529 pass_arg1(this, arg_1); 1530 super_call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 1531 } 1532 1533 void MacroAssembler::super_call_VM(Register oop_result, 1534 Register last_java_sp, 1535 address entry_point, 1536 Register arg_1, 1537 Register arg_2, 1538 bool check_exceptions) { 1539 1540 LP64_ONLY(assert_different_registers(arg_1, c_rarg2)); 1541 pass_arg2(this, arg_2); 1542 pass_arg1(this, arg_1); 1543 super_call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 1544 } 1545 1546 void MacroAssembler::super_call_VM(Register oop_result, 1547 Register last_java_sp, 1548 address entry_point, 1549 Register arg_1, 1550 Register arg_2, 1551 Register arg_3, 1552 bool check_exceptions) { 1553 LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3)); 1554 LP64_ONLY(assert_different_registers(arg_2, c_rarg3)); 1555 pass_arg3(this, arg_3); 1556 pass_arg2(this, arg_2); 1557 pass_arg1(this, arg_1); 1558 super_call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 1559 } 1560 1561 void MacroAssembler::call_VM_base(Register oop_result, 1562 Register java_thread, 1563 Register last_java_sp, 1564 address entry_point, 1565 int number_of_arguments, 1566 bool check_exceptions) { 1567 // determine java_thread register 1568 if (!java_thread->is_valid()) { 1569 #ifdef _LP64 1570 java_thread = r15_thread; 1571 #else 1572 java_thread = rdi; 1573 get_thread(java_thread); 1574 #endif // LP64 1575 } 1576 // determine last_java_sp register 1577 if (!last_java_sp->is_valid()) { 1578 last_java_sp = rsp; 1579 } 1580 // debugging support 1581 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 1582 LP64_ONLY(assert(java_thread == r15_thread, "unexpected register")); 1583 #ifdef ASSERT 1584 // TraceBytecodes does not use r12 but saves it over the call, so don't verify 1585 // r12 is the heapbase. 1586 LP64_ONLY(if (UseCompressedOops && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");) 1587 #endif // ASSERT 1588 1589 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"); 1590 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp"); 1591 1592 // push java thread (becomes first argument of C function) 1593 1594 NOT_LP64(push(java_thread); number_of_arguments++); 1595 LP64_ONLY(mov(c_rarg0, r15_thread)); 1596 1597 // set last Java frame before call 1598 assert(last_java_sp != rbp, "can't use ebp/rbp"); 1599 1600 // Only interpreter should have to set fp 1601 set_last_Java_frame(java_thread, last_java_sp, rbp, nullptr, rscratch1); 1602 1603 // do the call, remove parameters 1604 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments); 1605 1606 // restore the thread (cannot use the pushed argument since arguments 1607 // may be overwritten by C code generated by an optimizing compiler); 1608 // however can use the register value directly if it is callee saved. 1609 if (LP64_ONLY(true ||) java_thread == rdi || java_thread == rsi) { 1610 // rdi & rsi (also r15) are callee saved -> nothing to do 1611 #ifdef ASSERT 1612 guarantee(java_thread != rax, "change this code"); 1613 push(rax); 1614 { Label L; 1615 get_thread(rax); 1616 cmpptr(java_thread, rax); 1617 jcc(Assembler::equal, L); 1618 STOP("MacroAssembler::call_VM_base: rdi not callee saved?"); 1619 bind(L); 1620 } 1621 pop(rax); 1622 #endif 1623 } else { 1624 get_thread(java_thread); 1625 } 1626 // reset last Java frame 1627 // Only interpreter should have to clear fp 1628 reset_last_Java_frame(java_thread, true); 1629 1630 // C++ interp handles this in the interpreter 1631 check_and_handle_popframe(java_thread); 1632 check_and_handle_earlyret(java_thread); 1633 1634 if (check_exceptions) { 1635 // check for pending exceptions (java_thread is set upon return) 1636 cmpptr(Address(java_thread, Thread::pending_exception_offset()), NULL_WORD); 1637 #ifndef _LP64 1638 jump_cc(Assembler::notEqual, 1639 RuntimeAddress(StubRoutines::forward_exception_entry())); 1640 #else 1641 // This used to conditionally jump to forward_exception however it is 1642 // possible if we relocate that the branch will not reach. So we must jump 1643 // around so we can always reach 1644 1645 Label ok; 1646 jcc(Assembler::equal, ok); 1647 jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 1648 bind(ok); 1649 #endif // LP64 1650 } 1651 1652 // get oop result if there is one and reset the value in the thread 1653 if (oop_result->is_valid()) { 1654 get_vm_result(oop_result, java_thread); 1655 } 1656 } 1657 1658 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 1659 1660 // Calculate the value for last_Java_sp 1661 // somewhat subtle. call_VM does an intermediate call 1662 // which places a return address on the stack just under the 1663 // stack pointer as the user finished with it. This allows 1664 // use to retrieve last_Java_pc from last_Java_sp[-1]. 1665 // On 32bit we then have to push additional args on the stack to accomplish 1666 // the actual requested call. On 64bit call_VM only can use register args 1667 // so the only extra space is the return address that call_VM created. 1668 // This hopefully explains the calculations here. 1669 1670 #ifdef _LP64 1671 // We've pushed one address, correct last_Java_sp 1672 lea(rax, Address(rsp, wordSize)); 1673 #else 1674 lea(rax, Address(rsp, (1 + number_of_arguments) * wordSize)); 1675 #endif // LP64 1676 1677 call_VM_base(oop_result, noreg, rax, entry_point, number_of_arguments, check_exceptions); 1678 1679 } 1680 1681 // Use this method when MacroAssembler version of call_VM_leaf_base() should be called from Interpreter. 1682 void MacroAssembler::call_VM_leaf0(address entry_point) { 1683 MacroAssembler::call_VM_leaf_base(entry_point, 0); 1684 } 1685 1686 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) { 1687 call_VM_leaf_base(entry_point, number_of_arguments); 1688 } 1689 1690 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) { 1691 pass_arg0(this, arg_0); 1692 call_VM_leaf(entry_point, 1); 1693 } 1694 1695 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 1696 1697 LP64_ONLY(assert_different_registers(arg_0, c_rarg1)); 1698 pass_arg1(this, arg_1); 1699 pass_arg0(this, arg_0); 1700 call_VM_leaf(entry_point, 2); 1701 } 1702 1703 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { 1704 LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2)); 1705 LP64_ONLY(assert_different_registers(arg_1, c_rarg2)); 1706 pass_arg2(this, arg_2); 1707 pass_arg1(this, arg_1); 1708 pass_arg0(this, arg_0); 1709 call_VM_leaf(entry_point, 3); 1710 } 1711 1712 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { 1713 LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3)); 1714 LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3)); 1715 LP64_ONLY(assert_different_registers(arg_2, c_rarg3)); 1716 pass_arg3(this, arg_3); 1717 pass_arg2(this, arg_2); 1718 pass_arg1(this, arg_1); 1719 pass_arg0(this, arg_0); 1720 call_VM_leaf(entry_point, 3); 1721 } 1722 1723 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) { 1724 pass_arg0(this, arg_0); 1725 MacroAssembler::call_VM_leaf_base(entry_point, 1); 1726 } 1727 1728 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 1729 LP64_ONLY(assert_different_registers(arg_0, c_rarg1)); 1730 pass_arg1(this, arg_1); 1731 pass_arg0(this, arg_0); 1732 MacroAssembler::call_VM_leaf_base(entry_point, 2); 1733 } 1734 1735 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { 1736 LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2)); 1737 LP64_ONLY(assert_different_registers(arg_1, c_rarg2)); 1738 pass_arg2(this, arg_2); 1739 pass_arg1(this, arg_1); 1740 pass_arg0(this, arg_0); 1741 MacroAssembler::call_VM_leaf_base(entry_point, 3); 1742 } 1743 1744 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { 1745 LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3)); 1746 LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3)); 1747 LP64_ONLY(assert_different_registers(arg_2, c_rarg3)); 1748 pass_arg3(this, arg_3); 1749 pass_arg2(this, arg_2); 1750 pass_arg1(this, arg_1); 1751 pass_arg0(this, arg_0); 1752 MacroAssembler::call_VM_leaf_base(entry_point, 4); 1753 } 1754 1755 void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) { 1756 movptr(oop_result, Address(java_thread, JavaThread::vm_result_offset())); 1757 movptr(Address(java_thread, JavaThread::vm_result_offset()), NULL_WORD); 1758 verify_oop_msg(oop_result, "broken oop in call_VM_base"); 1759 } 1760 1761 void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) { 1762 movptr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset())); 1763 movptr(Address(java_thread, JavaThread::vm_result_2_offset()), NULL_WORD); 1764 } 1765 1766 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { 1767 } 1768 1769 void MacroAssembler::check_and_handle_popframe(Register java_thread) { 1770 } 1771 1772 void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm, Register rscratch) { 1773 assert(rscratch != noreg || always_reachable(src1), "missing"); 1774 1775 if (reachable(src1)) { 1776 cmpl(as_Address(src1), imm); 1777 } else { 1778 lea(rscratch, src1); 1779 cmpl(Address(rscratch, 0), imm); 1780 } 1781 } 1782 1783 void MacroAssembler::cmp32(Register src1, AddressLiteral src2, Register rscratch) { 1784 assert(!src2.is_lval(), "use cmpptr"); 1785 assert(rscratch != noreg || always_reachable(src2), "missing"); 1786 1787 if (reachable(src2)) { 1788 cmpl(src1, as_Address(src2)); 1789 } else { 1790 lea(rscratch, src2); 1791 cmpl(src1, Address(rscratch, 0)); 1792 } 1793 } 1794 1795 void MacroAssembler::cmp32(Register src1, int32_t imm) { 1796 Assembler::cmpl(src1, imm); 1797 } 1798 1799 void MacroAssembler::cmp32(Register src1, Address src2) { 1800 Assembler::cmpl(src1, src2); 1801 } 1802 1803 void MacroAssembler::cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) { 1804 ucomisd(opr1, opr2); 1805 1806 Label L; 1807 if (unordered_is_less) { 1808 movl(dst, -1); 1809 jcc(Assembler::parity, L); 1810 jcc(Assembler::below , L); 1811 movl(dst, 0); 1812 jcc(Assembler::equal , L); 1813 increment(dst); 1814 } else { // unordered is greater 1815 movl(dst, 1); 1816 jcc(Assembler::parity, L); 1817 jcc(Assembler::above , L); 1818 movl(dst, 0); 1819 jcc(Assembler::equal , L); 1820 decrementl(dst); 1821 } 1822 bind(L); 1823 } 1824 1825 void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) { 1826 ucomiss(opr1, opr2); 1827 1828 Label L; 1829 if (unordered_is_less) { 1830 movl(dst, -1); 1831 jcc(Assembler::parity, L); 1832 jcc(Assembler::below , L); 1833 movl(dst, 0); 1834 jcc(Assembler::equal , L); 1835 increment(dst); 1836 } else { // unordered is greater 1837 movl(dst, 1); 1838 jcc(Assembler::parity, L); 1839 jcc(Assembler::above , L); 1840 movl(dst, 0); 1841 jcc(Assembler::equal , L); 1842 decrementl(dst); 1843 } 1844 bind(L); 1845 } 1846 1847 1848 void MacroAssembler::cmp8(AddressLiteral src1, int imm, Register rscratch) { 1849 assert(rscratch != noreg || always_reachable(src1), "missing"); 1850 1851 if (reachable(src1)) { 1852 cmpb(as_Address(src1), imm); 1853 } else { 1854 lea(rscratch, src1); 1855 cmpb(Address(rscratch, 0), imm); 1856 } 1857 } 1858 1859 void MacroAssembler::cmpptr(Register src1, AddressLiteral src2, Register rscratch) { 1860 #ifdef _LP64 1861 assert(rscratch != noreg || always_reachable(src2), "missing"); 1862 1863 if (src2.is_lval()) { 1864 movptr(rscratch, src2); 1865 Assembler::cmpq(src1, rscratch); 1866 } else if (reachable(src2)) { 1867 cmpq(src1, as_Address(src2)); 1868 } else { 1869 lea(rscratch, src2); 1870 Assembler::cmpq(src1, Address(rscratch, 0)); 1871 } 1872 #else 1873 assert(rscratch == noreg, "not needed"); 1874 if (src2.is_lval()) { 1875 cmp_literal32(src1, (int32_t)src2.target(), src2.rspec()); 1876 } else { 1877 cmpl(src1, as_Address(src2)); 1878 } 1879 #endif // _LP64 1880 } 1881 1882 void MacroAssembler::cmpptr(Address src1, AddressLiteral src2, Register rscratch) { 1883 assert(src2.is_lval(), "not a mem-mem compare"); 1884 #ifdef _LP64 1885 // moves src2's literal address 1886 movptr(rscratch, src2); 1887 Assembler::cmpq(src1, rscratch); 1888 #else 1889 assert(rscratch == noreg, "not needed"); 1890 cmp_literal32(src1, (int32_t)src2.target(), src2.rspec()); 1891 #endif // _LP64 1892 } 1893 1894 void MacroAssembler::cmpoop(Register src1, Register src2) { 1895 cmpptr(src1, src2); 1896 } 1897 1898 void MacroAssembler::cmpoop(Register src1, Address src2) { 1899 cmpptr(src1, src2); 1900 } 1901 1902 #ifdef _LP64 1903 void MacroAssembler::cmpoop(Register src1, jobject src2, Register rscratch) { 1904 movoop(rscratch, src2); 1905 cmpptr(src1, rscratch); 1906 } 1907 #endif 1908 1909 void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch) { 1910 assert(rscratch != noreg || always_reachable(adr), "missing"); 1911 1912 if (reachable(adr)) { 1913 lock(); 1914 cmpxchgptr(reg, as_Address(adr)); 1915 } else { 1916 lea(rscratch, adr); 1917 lock(); 1918 cmpxchgptr(reg, Address(rscratch, 0)); 1919 } 1920 } 1921 1922 void MacroAssembler::cmpxchgptr(Register reg, Address adr) { 1923 LP64_ONLY(cmpxchgq(reg, adr)) NOT_LP64(cmpxchgl(reg, adr)); 1924 } 1925 1926 void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src, Register rscratch) { 1927 assert(rscratch != noreg || always_reachable(src), "missing"); 1928 1929 if (reachable(src)) { 1930 Assembler::comisd(dst, as_Address(src)); 1931 } else { 1932 lea(rscratch, src); 1933 Assembler::comisd(dst, Address(rscratch, 0)); 1934 } 1935 } 1936 1937 void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src, Register rscratch) { 1938 assert(rscratch != noreg || always_reachable(src), "missing"); 1939 1940 if (reachable(src)) { 1941 Assembler::comiss(dst, as_Address(src)); 1942 } else { 1943 lea(rscratch, src); 1944 Assembler::comiss(dst, Address(rscratch, 0)); 1945 } 1946 } 1947 1948 1949 void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr, Register rscratch) { 1950 assert(rscratch != noreg || always_reachable(counter_addr), "missing"); 1951 1952 Condition negated_cond = negate_condition(cond); 1953 Label L; 1954 jcc(negated_cond, L); 1955 pushf(); // Preserve flags 1956 atomic_incl(counter_addr, rscratch); 1957 popf(); 1958 bind(L); 1959 } 1960 1961 int MacroAssembler::corrected_idivl(Register reg) { 1962 // Full implementation of Java idiv and irem; checks for 1963 // special case as described in JVM spec., p.243 & p.271. 1964 // The function returns the (pc) offset of the idivl 1965 // instruction - may be needed for implicit exceptions. 1966 // 1967 // normal case special case 1968 // 1969 // input : rax,: dividend min_int 1970 // reg: divisor (may not be rax,/rdx) -1 1971 // 1972 // output: rax,: quotient (= rax, idiv reg) min_int 1973 // rdx: remainder (= rax, irem reg) 0 1974 assert(reg != rax && reg != rdx, "reg cannot be rax, or rdx register"); 1975 const int min_int = 0x80000000; 1976 Label normal_case, special_case; 1977 1978 // check for special case 1979 cmpl(rax, min_int); 1980 jcc(Assembler::notEqual, normal_case); 1981 xorl(rdx, rdx); // prepare rdx for possible special case (where remainder = 0) 1982 cmpl(reg, -1); 1983 jcc(Assembler::equal, special_case); 1984 1985 // handle normal case 1986 bind(normal_case); 1987 cdql(); 1988 int idivl_offset = offset(); 1989 idivl(reg); 1990 1991 // normal and special case exit 1992 bind(special_case); 1993 1994 return idivl_offset; 1995 } 1996 1997 1998 1999 void MacroAssembler::decrementl(Register reg, int value) { 2000 if (value == min_jint) {subl(reg, value) ; return; } 2001 if (value < 0) { incrementl(reg, -value); return; } 2002 if (value == 0) { ; return; } 2003 if (value == 1 && UseIncDec) { decl(reg) ; return; } 2004 /* else */ { subl(reg, value) ; return; } 2005 } 2006 2007 void MacroAssembler::decrementl(Address dst, int value) { 2008 if (value == min_jint) {subl(dst, value) ; return; } 2009 if (value < 0) { incrementl(dst, -value); return; } 2010 if (value == 0) { ; return; } 2011 if (value == 1 && UseIncDec) { decl(dst) ; return; } 2012 /* else */ { subl(dst, value) ; return; } 2013 } 2014 2015 void MacroAssembler::division_with_shift (Register reg, int shift_value) { 2016 assert(shift_value > 0, "illegal shift value"); 2017 Label _is_positive; 2018 testl (reg, reg); 2019 jcc (Assembler::positive, _is_positive); 2020 int offset = (1 << shift_value) - 1 ; 2021 2022 if (offset == 1) { 2023 incrementl(reg); 2024 } else { 2025 addl(reg, offset); 2026 } 2027 2028 bind (_is_positive); 2029 sarl(reg, shift_value); 2030 } 2031 2032 void MacroAssembler::divsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2033 assert(rscratch != noreg || always_reachable(src), "missing"); 2034 2035 if (reachable(src)) { 2036 Assembler::divsd(dst, as_Address(src)); 2037 } else { 2038 lea(rscratch, src); 2039 Assembler::divsd(dst, Address(rscratch, 0)); 2040 } 2041 } 2042 2043 void MacroAssembler::divss(XMMRegister dst, AddressLiteral src, Register rscratch) { 2044 assert(rscratch != noreg || always_reachable(src), "missing"); 2045 2046 if (reachable(src)) { 2047 Assembler::divss(dst, as_Address(src)); 2048 } else { 2049 lea(rscratch, src); 2050 Assembler::divss(dst, Address(rscratch, 0)); 2051 } 2052 } 2053 2054 void MacroAssembler::enter() { 2055 push(rbp); 2056 mov(rbp, rsp); 2057 } 2058 2059 void MacroAssembler::post_call_nop() { 2060 if (!Continuations::enabled()) { 2061 return; 2062 } 2063 InstructionMark im(this); 2064 relocate(post_call_nop_Relocation::spec()); 2065 InlineSkippedInstructionsCounter skipCounter(this); 2066 emit_int8((uint8_t)0x0f); 2067 emit_int8((uint8_t)0x1f); 2068 emit_int8((uint8_t)0x84); 2069 emit_int8((uint8_t)0x00); 2070 emit_int32(0x00); 2071 } 2072 2073 // A 5 byte nop that is safe for patching (see patch_verified_entry) 2074 void MacroAssembler::fat_nop() { 2075 if (UseAddressNop) { 2076 addr_nop_5(); 2077 } else { 2078 emit_int8((uint8_t)0x26); // es: 2079 emit_int8((uint8_t)0x2e); // cs: 2080 emit_int8((uint8_t)0x64); // fs: 2081 emit_int8((uint8_t)0x65); // gs: 2082 emit_int8((uint8_t)0x90); 2083 } 2084 } 2085 2086 #ifndef _LP64 2087 void MacroAssembler::fcmp(Register tmp) { 2088 fcmp(tmp, 1, true, true); 2089 } 2090 2091 void MacroAssembler::fcmp(Register tmp, int index, bool pop_left, bool pop_right) { 2092 assert(!pop_right || pop_left, "usage error"); 2093 if (VM_Version::supports_cmov()) { 2094 assert(tmp == noreg, "unneeded temp"); 2095 if (pop_left) { 2096 fucomip(index); 2097 } else { 2098 fucomi(index); 2099 } 2100 if (pop_right) { 2101 fpop(); 2102 } 2103 } else { 2104 assert(tmp != noreg, "need temp"); 2105 if (pop_left) { 2106 if (pop_right) { 2107 fcompp(); 2108 } else { 2109 fcomp(index); 2110 } 2111 } else { 2112 fcom(index); 2113 } 2114 // convert FPU condition into eflags condition via rax, 2115 save_rax(tmp); 2116 fwait(); fnstsw_ax(); 2117 sahf(); 2118 restore_rax(tmp); 2119 } 2120 // condition codes set as follows: 2121 // 2122 // CF (corresponds to C0) if x < y 2123 // PF (corresponds to C2) if unordered 2124 // ZF (corresponds to C3) if x = y 2125 } 2126 2127 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less) { 2128 fcmp2int(dst, unordered_is_less, 1, true, true); 2129 } 2130 2131 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right) { 2132 fcmp(VM_Version::supports_cmov() ? noreg : dst, index, pop_left, pop_right); 2133 Label L; 2134 if (unordered_is_less) { 2135 movl(dst, -1); 2136 jcc(Assembler::parity, L); 2137 jcc(Assembler::below , L); 2138 movl(dst, 0); 2139 jcc(Assembler::equal , L); 2140 increment(dst); 2141 } else { // unordered is greater 2142 movl(dst, 1); 2143 jcc(Assembler::parity, L); 2144 jcc(Assembler::above , L); 2145 movl(dst, 0); 2146 jcc(Assembler::equal , L); 2147 decrementl(dst); 2148 } 2149 bind(L); 2150 } 2151 2152 void MacroAssembler::fld_d(AddressLiteral src) { 2153 fld_d(as_Address(src)); 2154 } 2155 2156 void MacroAssembler::fld_s(AddressLiteral src) { 2157 fld_s(as_Address(src)); 2158 } 2159 2160 void MacroAssembler::fldcw(AddressLiteral src) { 2161 fldcw(as_Address(src)); 2162 } 2163 2164 void MacroAssembler::fpop() { 2165 ffree(); 2166 fincstp(); 2167 } 2168 2169 void MacroAssembler::fremr(Register tmp) { 2170 save_rax(tmp); 2171 { Label L; 2172 bind(L); 2173 fprem(); 2174 fwait(); fnstsw_ax(); 2175 sahf(); 2176 jcc(Assembler::parity, L); 2177 } 2178 restore_rax(tmp); 2179 // Result is in ST0. 2180 // Note: fxch & fpop to get rid of ST1 2181 // (otherwise FPU stack could overflow eventually) 2182 fxch(1); 2183 fpop(); 2184 } 2185 2186 void MacroAssembler::empty_FPU_stack() { 2187 if (VM_Version::supports_mmx()) { 2188 emms(); 2189 } else { 2190 for (int i = 8; i-- > 0; ) ffree(i); 2191 } 2192 } 2193 #endif // !LP64 2194 2195 void MacroAssembler::mulpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2196 assert(rscratch != noreg || always_reachable(src), "missing"); 2197 if (reachable(src)) { 2198 Assembler::mulpd(dst, as_Address(src)); 2199 } else { 2200 lea(rscratch, src); 2201 Assembler::mulpd(dst, Address(rscratch, 0)); 2202 } 2203 } 2204 2205 void MacroAssembler::load_float(Address src) { 2206 #ifdef _LP64 2207 movflt(xmm0, src); 2208 #else 2209 if (UseSSE >= 1) { 2210 movflt(xmm0, src); 2211 } else { 2212 fld_s(src); 2213 } 2214 #endif // LP64 2215 } 2216 2217 void MacroAssembler::store_float(Address dst) { 2218 #ifdef _LP64 2219 movflt(dst, xmm0); 2220 #else 2221 if (UseSSE >= 1) { 2222 movflt(dst, xmm0); 2223 } else { 2224 fstp_s(dst); 2225 } 2226 #endif // LP64 2227 } 2228 2229 void MacroAssembler::load_double(Address src) { 2230 #ifdef _LP64 2231 movdbl(xmm0, src); 2232 #else 2233 if (UseSSE >= 2) { 2234 movdbl(xmm0, src); 2235 } else { 2236 fld_d(src); 2237 } 2238 #endif // LP64 2239 } 2240 2241 void MacroAssembler::store_double(Address dst) { 2242 #ifdef _LP64 2243 movdbl(dst, xmm0); 2244 #else 2245 if (UseSSE >= 2) { 2246 movdbl(dst, xmm0); 2247 } else { 2248 fstp_d(dst); 2249 } 2250 #endif // LP64 2251 } 2252 2253 // dst = c = a * b + c 2254 void MacroAssembler::fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c) { 2255 Assembler::vfmadd231sd(c, a, b); 2256 if (dst != c) { 2257 movdbl(dst, c); 2258 } 2259 } 2260 2261 // dst = c = a * b + c 2262 void MacroAssembler::fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c) { 2263 Assembler::vfmadd231ss(c, a, b); 2264 if (dst != c) { 2265 movflt(dst, c); 2266 } 2267 } 2268 2269 // dst = c = a * b + c 2270 void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) { 2271 Assembler::vfmadd231pd(c, a, b, vector_len); 2272 if (dst != c) { 2273 vmovdqu(dst, c); 2274 } 2275 } 2276 2277 // dst = c = a * b + c 2278 void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) { 2279 Assembler::vfmadd231ps(c, a, b, vector_len); 2280 if (dst != c) { 2281 vmovdqu(dst, c); 2282 } 2283 } 2284 2285 // dst = c = a * b + c 2286 void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) { 2287 Assembler::vfmadd231pd(c, a, b, vector_len); 2288 if (dst != c) { 2289 vmovdqu(dst, c); 2290 } 2291 } 2292 2293 // dst = c = a * b + c 2294 void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) { 2295 Assembler::vfmadd231ps(c, a, b, vector_len); 2296 if (dst != c) { 2297 vmovdqu(dst, c); 2298 } 2299 } 2300 2301 void MacroAssembler::incrementl(AddressLiteral dst, Register rscratch) { 2302 assert(rscratch != noreg || always_reachable(dst), "missing"); 2303 2304 if (reachable(dst)) { 2305 incrementl(as_Address(dst)); 2306 } else { 2307 lea(rscratch, dst); 2308 incrementl(Address(rscratch, 0)); 2309 } 2310 } 2311 2312 void MacroAssembler::incrementl(ArrayAddress dst, Register rscratch) { 2313 incrementl(as_Address(dst, rscratch)); 2314 } 2315 2316 void MacroAssembler::incrementl(Register reg, int value) { 2317 if (value == min_jint) {addl(reg, value) ; return; } 2318 if (value < 0) { decrementl(reg, -value); return; } 2319 if (value == 0) { ; return; } 2320 if (value == 1 && UseIncDec) { incl(reg) ; return; } 2321 /* else */ { addl(reg, value) ; return; } 2322 } 2323 2324 void MacroAssembler::incrementl(Address dst, int value) { 2325 if (value == min_jint) {addl(dst, value) ; return; } 2326 if (value < 0) { decrementl(dst, -value); return; } 2327 if (value == 0) { ; return; } 2328 if (value == 1 && UseIncDec) { incl(dst) ; return; } 2329 /* else */ { addl(dst, value) ; return; } 2330 } 2331 2332 void MacroAssembler::jump(AddressLiteral dst, Register rscratch) { 2333 assert(rscratch != noreg || always_reachable(dst), "missing"); 2334 2335 if (reachable(dst)) { 2336 jmp_literal(dst.target(), dst.rspec()); 2337 } else { 2338 lea(rscratch, dst); 2339 jmp(rscratch); 2340 } 2341 } 2342 2343 void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst, Register rscratch) { 2344 assert(rscratch != noreg || always_reachable(dst), "missing"); 2345 2346 if (reachable(dst)) { 2347 InstructionMark im(this); 2348 relocate(dst.reloc()); 2349 const int short_size = 2; 2350 const int long_size = 6; 2351 int offs = (intptr_t)dst.target() - ((intptr_t)pc()); 2352 if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) { 2353 // 0111 tttn #8-bit disp 2354 emit_int8(0x70 | cc); 2355 emit_int8((offs - short_size) & 0xFF); 2356 } else { 2357 // 0000 1111 1000 tttn #32-bit disp 2358 emit_int8(0x0F); 2359 emit_int8((unsigned char)(0x80 | cc)); 2360 emit_int32(offs - long_size); 2361 } 2362 } else { 2363 #ifdef ASSERT 2364 warning("reversing conditional branch"); 2365 #endif /* ASSERT */ 2366 Label skip; 2367 jccb(reverse[cc], skip); 2368 lea(rscratch, dst); 2369 Assembler::jmp(rscratch); 2370 bind(skip); 2371 } 2372 } 2373 2374 void MacroAssembler::ldmxcsr(AddressLiteral src, Register rscratch) { 2375 assert(rscratch != noreg || always_reachable(src), "missing"); 2376 2377 if (reachable(src)) { 2378 Assembler::ldmxcsr(as_Address(src)); 2379 } else { 2380 lea(rscratch, src); 2381 Assembler::ldmxcsr(Address(rscratch, 0)); 2382 } 2383 } 2384 2385 int MacroAssembler::load_signed_byte(Register dst, Address src) { 2386 int off; 2387 if (LP64_ONLY(true ||) VM_Version::is_P6()) { 2388 off = offset(); 2389 movsbl(dst, src); // movsxb 2390 } else { 2391 off = load_unsigned_byte(dst, src); 2392 shll(dst, 24); 2393 sarl(dst, 24); 2394 } 2395 return off; 2396 } 2397 2398 // Note: load_signed_short used to be called load_signed_word. 2399 // Although the 'w' in x86 opcodes refers to the term "word" in the assembler 2400 // manual, which means 16 bits, that usage is found nowhere in HotSpot code. 2401 // The term "word" in HotSpot means a 32- or 64-bit machine word. 2402 int MacroAssembler::load_signed_short(Register dst, Address src) { 2403 int off; 2404 if (LP64_ONLY(true ||) VM_Version::is_P6()) { 2405 // This is dubious to me since it seems safe to do a signed 16 => 64 bit 2406 // version but this is what 64bit has always done. This seems to imply 2407 // that users are only using 32bits worth. 2408 off = offset(); 2409 movswl(dst, src); // movsxw 2410 } else { 2411 off = load_unsigned_short(dst, src); 2412 shll(dst, 16); 2413 sarl(dst, 16); 2414 } 2415 return off; 2416 } 2417 2418 int MacroAssembler::load_unsigned_byte(Register dst, Address src) { 2419 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16, 2420 // and "3.9 Partial Register Penalties", p. 22). 2421 int off; 2422 if (LP64_ONLY(true || ) VM_Version::is_P6() || src.uses(dst)) { 2423 off = offset(); 2424 movzbl(dst, src); // movzxb 2425 } else { 2426 xorl(dst, dst); 2427 off = offset(); 2428 movb(dst, src); 2429 } 2430 return off; 2431 } 2432 2433 // Note: load_unsigned_short used to be called load_unsigned_word. 2434 int MacroAssembler::load_unsigned_short(Register dst, Address src) { 2435 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16, 2436 // and "3.9 Partial Register Penalties", p. 22). 2437 int off; 2438 if (LP64_ONLY(true ||) VM_Version::is_P6() || src.uses(dst)) { 2439 off = offset(); 2440 movzwl(dst, src); // movzxw 2441 } else { 2442 xorl(dst, dst); 2443 off = offset(); 2444 movw(dst, src); 2445 } 2446 return off; 2447 } 2448 2449 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) { 2450 switch (size_in_bytes) { 2451 #ifndef _LP64 2452 case 8: 2453 assert(dst2 != noreg, "second dest register required"); 2454 movl(dst, src); 2455 movl(dst2, src.plus_disp(BytesPerInt)); 2456 break; 2457 #else 2458 case 8: movq(dst, src); break; 2459 #endif 2460 case 4: movl(dst, src); break; 2461 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break; 2462 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break; 2463 default: ShouldNotReachHere(); 2464 } 2465 } 2466 2467 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) { 2468 switch (size_in_bytes) { 2469 #ifndef _LP64 2470 case 8: 2471 assert(src2 != noreg, "second source register required"); 2472 movl(dst, src); 2473 movl(dst.plus_disp(BytesPerInt), src2); 2474 break; 2475 #else 2476 case 8: movq(dst, src); break; 2477 #endif 2478 case 4: movl(dst, src); break; 2479 case 2: movw(dst, src); break; 2480 case 1: movb(dst, src); break; 2481 default: ShouldNotReachHere(); 2482 } 2483 } 2484 2485 void MacroAssembler::mov32(AddressLiteral dst, Register src, Register rscratch) { 2486 assert(rscratch != noreg || always_reachable(dst), "missing"); 2487 2488 if (reachable(dst)) { 2489 movl(as_Address(dst), src); 2490 } else { 2491 lea(rscratch, dst); 2492 movl(Address(rscratch, 0), src); 2493 } 2494 } 2495 2496 void MacroAssembler::mov32(Register dst, AddressLiteral src) { 2497 if (reachable(src)) { 2498 movl(dst, as_Address(src)); 2499 } else { 2500 lea(dst, src); 2501 movl(dst, Address(dst, 0)); 2502 } 2503 } 2504 2505 // C++ bool manipulation 2506 2507 void MacroAssembler::movbool(Register dst, Address src) { 2508 if(sizeof(bool) == 1) 2509 movb(dst, src); 2510 else if(sizeof(bool) == 2) 2511 movw(dst, src); 2512 else if(sizeof(bool) == 4) 2513 movl(dst, src); 2514 else 2515 // unsupported 2516 ShouldNotReachHere(); 2517 } 2518 2519 void MacroAssembler::movbool(Address dst, bool boolconst) { 2520 if(sizeof(bool) == 1) 2521 movb(dst, (int) boolconst); 2522 else if(sizeof(bool) == 2) 2523 movw(dst, (int) boolconst); 2524 else if(sizeof(bool) == 4) 2525 movl(dst, (int) boolconst); 2526 else 2527 // unsupported 2528 ShouldNotReachHere(); 2529 } 2530 2531 void MacroAssembler::movbool(Address dst, Register src) { 2532 if(sizeof(bool) == 1) 2533 movb(dst, src); 2534 else if(sizeof(bool) == 2) 2535 movw(dst, src); 2536 else if(sizeof(bool) == 4) 2537 movl(dst, src); 2538 else 2539 // unsupported 2540 ShouldNotReachHere(); 2541 } 2542 2543 void MacroAssembler::movdl(XMMRegister dst, AddressLiteral src, Register rscratch) { 2544 assert(rscratch != noreg || always_reachable(src), "missing"); 2545 2546 if (reachable(src)) { 2547 movdl(dst, as_Address(src)); 2548 } else { 2549 lea(rscratch, src); 2550 movdl(dst, Address(rscratch, 0)); 2551 } 2552 } 2553 2554 void MacroAssembler::movq(XMMRegister dst, AddressLiteral src, Register rscratch) { 2555 assert(rscratch != noreg || always_reachable(src), "missing"); 2556 2557 if (reachable(src)) { 2558 movq(dst, as_Address(src)); 2559 } else { 2560 lea(rscratch, src); 2561 movq(dst, Address(rscratch, 0)); 2562 } 2563 } 2564 2565 void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src, Register rscratch) { 2566 assert(rscratch != noreg || always_reachable(src), "missing"); 2567 2568 if (reachable(src)) { 2569 if (UseXmmLoadAndClearUpper) { 2570 movsd (dst, as_Address(src)); 2571 } else { 2572 movlpd(dst, as_Address(src)); 2573 } 2574 } else { 2575 lea(rscratch, src); 2576 if (UseXmmLoadAndClearUpper) { 2577 movsd (dst, Address(rscratch, 0)); 2578 } else { 2579 movlpd(dst, Address(rscratch, 0)); 2580 } 2581 } 2582 } 2583 2584 void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src, Register rscratch) { 2585 assert(rscratch != noreg || always_reachable(src), "missing"); 2586 2587 if (reachable(src)) { 2588 movss(dst, as_Address(src)); 2589 } else { 2590 lea(rscratch, src); 2591 movss(dst, Address(rscratch, 0)); 2592 } 2593 } 2594 2595 void MacroAssembler::movptr(Register dst, Register src) { 2596 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); 2597 } 2598 2599 void MacroAssembler::movptr(Register dst, Address src) { 2600 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); 2601 } 2602 2603 // src should NEVER be a real pointer. Use AddressLiteral for true pointers 2604 void MacroAssembler::movptr(Register dst, intptr_t src) { 2605 #ifdef _LP64 2606 if (is_uimm32(src)) { 2607 movl(dst, checked_cast<uint32_t>(src)); 2608 } else if (is_simm32(src)) { 2609 movq(dst, checked_cast<int32_t>(src)); 2610 } else { 2611 mov64(dst, src); 2612 } 2613 #else 2614 movl(dst, src); 2615 #endif 2616 } 2617 2618 void MacroAssembler::movptr(Address dst, Register src) { 2619 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); 2620 } 2621 2622 void MacroAssembler::movptr(Address dst, int32_t src) { 2623 LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); 2624 } 2625 2626 void MacroAssembler::movdqu(Address dst, XMMRegister src) { 2627 assert(((src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2628 Assembler::movdqu(dst, src); 2629 } 2630 2631 void MacroAssembler::movdqu(XMMRegister dst, Address src) { 2632 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2633 Assembler::movdqu(dst, src); 2634 } 2635 2636 void MacroAssembler::movdqu(XMMRegister dst, XMMRegister src) { 2637 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2638 Assembler::movdqu(dst, src); 2639 } 2640 2641 void MacroAssembler::movdqu(XMMRegister dst, AddressLiteral src, Register rscratch) { 2642 assert(rscratch != noreg || always_reachable(src), "missing"); 2643 2644 if (reachable(src)) { 2645 movdqu(dst, as_Address(src)); 2646 } else { 2647 lea(rscratch, src); 2648 movdqu(dst, Address(rscratch, 0)); 2649 } 2650 } 2651 2652 void MacroAssembler::vmovdqu(Address dst, XMMRegister src) { 2653 assert(((src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2654 Assembler::vmovdqu(dst, src); 2655 } 2656 2657 void MacroAssembler::vmovdqu(XMMRegister dst, Address src) { 2658 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2659 Assembler::vmovdqu(dst, src); 2660 } 2661 2662 void MacroAssembler::vmovdqu(XMMRegister dst, XMMRegister src) { 2663 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2664 Assembler::vmovdqu(dst, src); 2665 } 2666 2667 void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src, Register rscratch) { 2668 assert(rscratch != noreg || always_reachable(src), "missing"); 2669 2670 if (reachable(src)) { 2671 vmovdqu(dst, as_Address(src)); 2672 } 2673 else { 2674 lea(rscratch, src); 2675 vmovdqu(dst, Address(rscratch, 0)); 2676 } 2677 } 2678 2679 void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2680 assert(rscratch != noreg || always_reachable(src), "missing"); 2681 2682 if (vector_len == AVX_512bit) { 2683 evmovdquq(dst, src, AVX_512bit, rscratch); 2684 } else if (vector_len == AVX_256bit) { 2685 vmovdqu(dst, src, rscratch); 2686 } else { 2687 movdqu(dst, src, rscratch); 2688 } 2689 } 2690 2691 void MacroAssembler::kmov(KRegister dst, Address src) { 2692 if (VM_Version::supports_avx512bw()) { 2693 kmovql(dst, src); 2694 } else { 2695 assert(VM_Version::supports_evex(), ""); 2696 kmovwl(dst, src); 2697 } 2698 } 2699 2700 void MacroAssembler::kmov(Address dst, KRegister src) { 2701 if (VM_Version::supports_avx512bw()) { 2702 kmovql(dst, src); 2703 } else { 2704 assert(VM_Version::supports_evex(), ""); 2705 kmovwl(dst, src); 2706 } 2707 } 2708 2709 void MacroAssembler::kmov(KRegister dst, KRegister src) { 2710 if (VM_Version::supports_avx512bw()) { 2711 kmovql(dst, src); 2712 } else { 2713 assert(VM_Version::supports_evex(), ""); 2714 kmovwl(dst, src); 2715 } 2716 } 2717 2718 void MacroAssembler::kmov(Register dst, KRegister src) { 2719 if (VM_Version::supports_avx512bw()) { 2720 kmovql(dst, src); 2721 } else { 2722 assert(VM_Version::supports_evex(), ""); 2723 kmovwl(dst, src); 2724 } 2725 } 2726 2727 void MacroAssembler::kmov(KRegister dst, Register src) { 2728 if (VM_Version::supports_avx512bw()) { 2729 kmovql(dst, src); 2730 } else { 2731 assert(VM_Version::supports_evex(), ""); 2732 kmovwl(dst, src); 2733 } 2734 } 2735 2736 void MacroAssembler::kmovql(KRegister dst, AddressLiteral src, Register rscratch) { 2737 assert(rscratch != noreg || always_reachable(src), "missing"); 2738 2739 if (reachable(src)) { 2740 kmovql(dst, as_Address(src)); 2741 } else { 2742 lea(rscratch, src); 2743 kmovql(dst, Address(rscratch, 0)); 2744 } 2745 } 2746 2747 void MacroAssembler::kmovwl(KRegister dst, AddressLiteral src, Register rscratch) { 2748 assert(rscratch != noreg || always_reachable(src), "missing"); 2749 2750 if (reachable(src)) { 2751 kmovwl(dst, as_Address(src)); 2752 } else { 2753 lea(rscratch, src); 2754 kmovwl(dst, Address(rscratch, 0)); 2755 } 2756 } 2757 2758 void MacroAssembler::evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, 2759 int vector_len, Register rscratch) { 2760 assert(rscratch != noreg || always_reachable(src), "missing"); 2761 2762 if (reachable(src)) { 2763 Assembler::evmovdqub(dst, mask, as_Address(src), merge, vector_len); 2764 } else { 2765 lea(rscratch, src); 2766 Assembler::evmovdqub(dst, mask, Address(rscratch, 0), merge, vector_len); 2767 } 2768 } 2769 2770 void MacroAssembler::evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, 2771 int vector_len, Register rscratch) { 2772 assert(rscratch != noreg || always_reachable(src), "missing"); 2773 2774 if (reachable(src)) { 2775 Assembler::evmovdquw(dst, mask, as_Address(src), merge, vector_len); 2776 } else { 2777 lea(rscratch, src); 2778 Assembler::evmovdquw(dst, mask, Address(rscratch, 0), merge, vector_len); 2779 } 2780 } 2781 2782 void MacroAssembler::evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) { 2783 assert(rscratch != noreg || always_reachable(src), "missing"); 2784 2785 if (reachable(src)) { 2786 Assembler::evmovdqul(dst, mask, as_Address(src), merge, vector_len); 2787 } else { 2788 lea(rscratch, src); 2789 Assembler::evmovdqul(dst, mask, Address(rscratch, 0), merge, vector_len); 2790 } 2791 } 2792 2793 void MacroAssembler::evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) { 2794 assert(rscratch != noreg || always_reachable(src), "missing"); 2795 2796 if (reachable(src)) { 2797 Assembler::evmovdquq(dst, mask, as_Address(src), merge, vector_len); 2798 } else { 2799 lea(rscratch, src); 2800 Assembler::evmovdquq(dst, mask, Address(rscratch, 0), merge, vector_len); 2801 } 2802 } 2803 2804 void MacroAssembler::evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2805 assert(rscratch != noreg || always_reachable(src), "missing"); 2806 2807 if (reachable(src)) { 2808 Assembler::evmovdquq(dst, as_Address(src), vector_len); 2809 } else { 2810 lea(rscratch, src); 2811 Assembler::evmovdquq(dst, Address(rscratch, 0), vector_len); 2812 } 2813 } 2814 2815 void MacroAssembler::movdqa(XMMRegister dst, AddressLiteral src, Register rscratch) { 2816 assert(rscratch != noreg || always_reachable(src), "missing"); 2817 2818 if (reachable(src)) { 2819 Assembler::movdqa(dst, as_Address(src)); 2820 } else { 2821 lea(rscratch, src); 2822 Assembler::movdqa(dst, Address(rscratch, 0)); 2823 } 2824 } 2825 2826 void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2827 assert(rscratch != noreg || always_reachable(src), "missing"); 2828 2829 if (reachable(src)) { 2830 Assembler::movsd(dst, as_Address(src)); 2831 } else { 2832 lea(rscratch, src); 2833 Assembler::movsd(dst, Address(rscratch, 0)); 2834 } 2835 } 2836 2837 void MacroAssembler::movss(XMMRegister dst, AddressLiteral src, Register rscratch) { 2838 assert(rscratch != noreg || always_reachable(src), "missing"); 2839 2840 if (reachable(src)) { 2841 Assembler::movss(dst, as_Address(src)); 2842 } else { 2843 lea(rscratch, src); 2844 Assembler::movss(dst, Address(rscratch, 0)); 2845 } 2846 } 2847 2848 void MacroAssembler::movddup(XMMRegister dst, AddressLiteral src, Register rscratch) { 2849 assert(rscratch != noreg || always_reachable(src), "missing"); 2850 2851 if (reachable(src)) { 2852 Assembler::movddup(dst, as_Address(src)); 2853 } else { 2854 lea(rscratch, src); 2855 Assembler::movddup(dst, Address(rscratch, 0)); 2856 } 2857 } 2858 2859 void MacroAssembler::vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2860 assert(rscratch != noreg || always_reachable(src), "missing"); 2861 2862 if (reachable(src)) { 2863 Assembler::vmovddup(dst, as_Address(src), vector_len); 2864 } else { 2865 lea(rscratch, src); 2866 Assembler::vmovddup(dst, Address(rscratch, 0), vector_len); 2867 } 2868 } 2869 2870 void MacroAssembler::mulsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2871 assert(rscratch != noreg || always_reachable(src), "missing"); 2872 2873 if (reachable(src)) { 2874 Assembler::mulsd(dst, as_Address(src)); 2875 } else { 2876 lea(rscratch, src); 2877 Assembler::mulsd(dst, Address(rscratch, 0)); 2878 } 2879 } 2880 2881 void MacroAssembler::mulss(XMMRegister dst, AddressLiteral src, Register rscratch) { 2882 assert(rscratch != noreg || always_reachable(src), "missing"); 2883 2884 if (reachable(src)) { 2885 Assembler::mulss(dst, as_Address(src)); 2886 } else { 2887 lea(rscratch, src); 2888 Assembler::mulss(dst, Address(rscratch, 0)); 2889 } 2890 } 2891 2892 void MacroAssembler::null_check(Register reg, int offset) { 2893 if (needs_explicit_null_check(offset)) { 2894 // provoke OS null exception if reg is null by 2895 // accessing M[reg] w/o changing any (non-CC) registers 2896 // NOTE: cmpl is plenty here to provoke a segv 2897 cmpptr(rax, Address(reg, 0)); 2898 // Note: should probably use testl(rax, Address(reg, 0)); 2899 // may be shorter code (however, this version of 2900 // testl needs to be implemented first) 2901 } else { 2902 // nothing to do, (later) access of M[reg + offset] 2903 // will provoke OS null exception if reg is null 2904 } 2905 } 2906 2907 void MacroAssembler::os_breakpoint() { 2908 // instead of directly emitting a breakpoint, call os:breakpoint for better debugability 2909 // (e.g., MSVC can't call ps() otherwise) 2910 call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint))); 2911 } 2912 2913 void MacroAssembler::unimplemented(const char* what) { 2914 const char* buf = nullptr; 2915 { 2916 ResourceMark rm; 2917 stringStream ss; 2918 ss.print("unimplemented: %s", what); 2919 buf = code_string(ss.as_string()); 2920 } 2921 stop(buf); 2922 } 2923 2924 #ifdef _LP64 2925 #define XSTATE_BV 0x200 2926 #endif 2927 2928 void MacroAssembler::pop_CPU_state() { 2929 pop_FPU_state(); 2930 pop_IU_state(); 2931 } 2932 2933 void MacroAssembler::pop_FPU_state() { 2934 #ifndef _LP64 2935 frstor(Address(rsp, 0)); 2936 #else 2937 fxrstor(Address(rsp, 0)); 2938 #endif 2939 addptr(rsp, FPUStateSizeInWords * wordSize); 2940 } 2941 2942 void MacroAssembler::pop_IU_state() { 2943 popa(); 2944 LP64_ONLY(addq(rsp, 8)); 2945 popf(); 2946 } 2947 2948 // Save Integer and Float state 2949 // Warning: Stack must be 16 byte aligned (64bit) 2950 void MacroAssembler::push_CPU_state() { 2951 push_IU_state(); 2952 push_FPU_state(); 2953 } 2954 2955 void MacroAssembler::push_FPU_state() { 2956 subptr(rsp, FPUStateSizeInWords * wordSize); 2957 #ifndef _LP64 2958 fnsave(Address(rsp, 0)); 2959 fwait(); 2960 #else 2961 fxsave(Address(rsp, 0)); 2962 #endif // LP64 2963 } 2964 2965 void MacroAssembler::push_IU_state() { 2966 // Push flags first because pusha kills them 2967 pushf(); 2968 // Make sure rsp stays 16-byte aligned 2969 LP64_ONLY(subq(rsp, 8)); 2970 pusha(); 2971 } 2972 2973 void MacroAssembler::push_cont_fastpath() { 2974 if (!Continuations::enabled()) return; 2975 2976 #ifndef _LP64 2977 Register rthread = rax; 2978 Register rrealsp = rbx; 2979 push(rthread); 2980 push(rrealsp); 2981 2982 get_thread(rthread); 2983 2984 // The code below wants the original RSP. 2985 // Move it back after the pushes above. 2986 movptr(rrealsp, rsp); 2987 addptr(rrealsp, 2*wordSize); 2988 #else 2989 Register rthread = r15_thread; 2990 Register rrealsp = rsp; 2991 #endif 2992 2993 Label done; 2994 cmpptr(rrealsp, Address(rthread, JavaThread::cont_fastpath_offset())); 2995 jccb(Assembler::belowEqual, done); 2996 movptr(Address(rthread, JavaThread::cont_fastpath_offset()), rrealsp); 2997 bind(done); 2998 2999 #ifndef _LP64 3000 pop(rrealsp); 3001 pop(rthread); 3002 #endif 3003 } 3004 3005 void MacroAssembler::pop_cont_fastpath() { 3006 if (!Continuations::enabled()) return; 3007 3008 #ifndef _LP64 3009 Register rthread = rax; 3010 Register rrealsp = rbx; 3011 push(rthread); 3012 push(rrealsp); 3013 3014 get_thread(rthread); 3015 3016 // The code below wants the original RSP. 3017 // Move it back after the pushes above. 3018 movptr(rrealsp, rsp); 3019 addptr(rrealsp, 2*wordSize); 3020 #else 3021 Register rthread = r15_thread; 3022 Register rrealsp = rsp; 3023 #endif 3024 3025 Label done; 3026 cmpptr(rrealsp, Address(rthread, JavaThread::cont_fastpath_offset())); 3027 jccb(Assembler::below, done); 3028 movptr(Address(rthread, JavaThread::cont_fastpath_offset()), 0); 3029 bind(done); 3030 3031 #ifndef _LP64 3032 pop(rrealsp); 3033 pop(rthread); 3034 #endif 3035 } 3036 3037 void MacroAssembler::inc_held_monitor_count() { 3038 #ifndef _LP64 3039 Register thread = rax; 3040 push(thread); 3041 get_thread(thread); 3042 incrementl(Address(thread, JavaThread::held_monitor_count_offset())); 3043 pop(thread); 3044 #else // LP64 3045 incrementq(Address(r15_thread, JavaThread::held_monitor_count_offset())); 3046 #endif 3047 } 3048 3049 void MacroAssembler::dec_held_monitor_count() { 3050 #ifndef _LP64 3051 Register thread = rax; 3052 push(thread); 3053 get_thread(thread); 3054 decrementl(Address(thread, JavaThread::held_monitor_count_offset())); 3055 pop(thread); 3056 #else // LP64 3057 decrementq(Address(r15_thread, JavaThread::held_monitor_count_offset())); 3058 #endif 3059 } 3060 3061 #ifdef ASSERT 3062 void MacroAssembler::stop_if_in_cont(Register cont, const char* name) { 3063 #ifdef _LP64 3064 Label no_cont; 3065 movptr(cont, Address(r15_thread, JavaThread::cont_entry_offset())); 3066 testl(cont, cont); 3067 jcc(Assembler::zero, no_cont); 3068 stop(name); 3069 bind(no_cont); 3070 #else 3071 Unimplemented(); 3072 #endif 3073 } 3074 #endif 3075 3076 void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp) { // determine java_thread register 3077 if (!java_thread->is_valid()) { 3078 java_thread = rdi; 3079 get_thread(java_thread); 3080 } 3081 // we must set sp to zero to clear frame 3082 movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), NULL_WORD); 3083 // must clear fp, so that compiled frames are not confused; it is 3084 // possible that we need it only for debugging 3085 if (clear_fp) { 3086 movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), NULL_WORD); 3087 } 3088 // Always clear the pc because it could have been set by make_walkable() 3089 movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), NULL_WORD); 3090 vzeroupper(); 3091 } 3092 3093 void MacroAssembler::restore_rax(Register tmp) { 3094 if (tmp == noreg) pop(rax); 3095 else if (tmp != rax) mov(rax, tmp); 3096 } 3097 3098 void MacroAssembler::round_to(Register reg, int modulus) { 3099 addptr(reg, modulus - 1); 3100 andptr(reg, -modulus); 3101 } 3102 3103 void MacroAssembler::save_rax(Register tmp) { 3104 if (tmp == noreg) push(rax); 3105 else if (tmp != rax) mov(tmp, rax); 3106 } 3107 3108 void MacroAssembler::safepoint_poll(Label& slow_path, Register thread_reg, bool at_return, bool in_nmethod) { 3109 if (at_return) { 3110 // Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore, 3111 // we may safely use rsp instead to perform the stack watermark check. 3112 cmpptr(in_nmethod ? rsp : rbp, Address(thread_reg, JavaThread::polling_word_offset())); 3113 jcc(Assembler::above, slow_path); 3114 return; 3115 } 3116 testb(Address(thread_reg, JavaThread::polling_word_offset()), SafepointMechanism::poll_bit()); 3117 jcc(Assembler::notZero, slow_path); // handshake bit set implies poll 3118 } 3119 3120 // Calls to C land 3121 // 3122 // When entering C land, the rbp, & rsp of the last Java frame have to be recorded 3123 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp 3124 // has to be reset to 0. This is required to allow proper stack traversal. 3125 void MacroAssembler::set_last_Java_frame(Register java_thread, 3126 Register last_java_sp, 3127 Register last_java_fp, 3128 address last_java_pc, 3129 Register rscratch) { 3130 vzeroupper(); 3131 // determine java_thread register 3132 if (!java_thread->is_valid()) { 3133 java_thread = rdi; 3134 get_thread(java_thread); 3135 } 3136 // determine last_java_sp register 3137 if (!last_java_sp->is_valid()) { 3138 last_java_sp = rsp; 3139 } 3140 // last_java_fp is optional 3141 if (last_java_fp->is_valid()) { 3142 movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), last_java_fp); 3143 } 3144 // last_java_pc is optional 3145 if (last_java_pc != nullptr) { 3146 Address java_pc(java_thread, 3147 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()); 3148 lea(java_pc, InternalAddress(last_java_pc), rscratch); 3149 } 3150 movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), last_java_sp); 3151 } 3152 3153 void MacroAssembler::shlptr(Register dst, int imm8) { 3154 LP64_ONLY(shlq(dst, imm8)) NOT_LP64(shll(dst, imm8)); 3155 } 3156 3157 void MacroAssembler::shrptr(Register dst, int imm8) { 3158 LP64_ONLY(shrq(dst, imm8)) NOT_LP64(shrl(dst, imm8)); 3159 } 3160 3161 void MacroAssembler::sign_extend_byte(Register reg) { 3162 if (LP64_ONLY(true ||) (VM_Version::is_P6() && reg->has_byte_register())) { 3163 movsbl(reg, reg); // movsxb 3164 } else { 3165 shll(reg, 24); 3166 sarl(reg, 24); 3167 } 3168 } 3169 3170 void MacroAssembler::sign_extend_short(Register reg) { 3171 if (LP64_ONLY(true ||) VM_Version::is_P6()) { 3172 movswl(reg, reg); // movsxw 3173 } else { 3174 shll(reg, 16); 3175 sarl(reg, 16); 3176 } 3177 } 3178 3179 void MacroAssembler::testl(Address dst, int32_t imm32) { 3180 if (imm32 >= 0 && is8bit(imm32)) { 3181 testb(dst, imm32); 3182 } else { 3183 Assembler::testl(dst, imm32); 3184 } 3185 } 3186 3187 void MacroAssembler::testl(Register dst, int32_t imm32) { 3188 if (imm32 >= 0 && is8bit(imm32) && dst->has_byte_register()) { 3189 testb(dst, imm32); 3190 } else { 3191 Assembler::testl(dst, imm32); 3192 } 3193 } 3194 3195 void MacroAssembler::testl(Register dst, AddressLiteral src) { 3196 assert(always_reachable(src), "Address should be reachable"); 3197 testl(dst, as_Address(src)); 3198 } 3199 3200 #ifdef _LP64 3201 3202 void MacroAssembler::testq(Address dst, int32_t imm32) { 3203 if (imm32 >= 0) { 3204 testl(dst, imm32); 3205 } else { 3206 Assembler::testq(dst, imm32); 3207 } 3208 } 3209 3210 void MacroAssembler::testq(Register dst, int32_t imm32) { 3211 if (imm32 >= 0) { 3212 testl(dst, imm32); 3213 } else { 3214 Assembler::testq(dst, imm32); 3215 } 3216 } 3217 3218 #endif 3219 3220 void MacroAssembler::pcmpeqb(XMMRegister dst, XMMRegister src) { 3221 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3222 Assembler::pcmpeqb(dst, src); 3223 } 3224 3225 void MacroAssembler::pcmpeqw(XMMRegister dst, XMMRegister src) { 3226 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3227 Assembler::pcmpeqw(dst, src); 3228 } 3229 3230 void MacroAssembler::pcmpestri(XMMRegister dst, Address src, int imm8) { 3231 assert((dst->encoding() < 16),"XMM register should be 0-15"); 3232 Assembler::pcmpestri(dst, src, imm8); 3233 } 3234 3235 void MacroAssembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) { 3236 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15"); 3237 Assembler::pcmpestri(dst, src, imm8); 3238 } 3239 3240 void MacroAssembler::pmovzxbw(XMMRegister dst, XMMRegister src) { 3241 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3242 Assembler::pmovzxbw(dst, src); 3243 } 3244 3245 void MacroAssembler::pmovzxbw(XMMRegister dst, Address src) { 3246 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3247 Assembler::pmovzxbw(dst, src); 3248 } 3249 3250 void MacroAssembler::pmovmskb(Register dst, XMMRegister src) { 3251 assert((src->encoding() < 16),"XMM register should be 0-15"); 3252 Assembler::pmovmskb(dst, src); 3253 } 3254 3255 void MacroAssembler::ptest(XMMRegister dst, XMMRegister src) { 3256 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15"); 3257 Assembler::ptest(dst, src); 3258 } 3259 3260 void MacroAssembler::sqrtss(XMMRegister dst, AddressLiteral src, Register rscratch) { 3261 assert(rscratch != noreg || always_reachable(src), "missing"); 3262 3263 if (reachable(src)) { 3264 Assembler::sqrtss(dst, as_Address(src)); 3265 } else { 3266 lea(rscratch, src); 3267 Assembler::sqrtss(dst, Address(rscratch, 0)); 3268 } 3269 } 3270 3271 void MacroAssembler::subsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 3272 assert(rscratch != noreg || always_reachable(src), "missing"); 3273 3274 if (reachable(src)) { 3275 Assembler::subsd(dst, as_Address(src)); 3276 } else { 3277 lea(rscratch, src); 3278 Assembler::subsd(dst, Address(rscratch, 0)); 3279 } 3280 } 3281 3282 void MacroAssembler::roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register rscratch) { 3283 assert(rscratch != noreg || always_reachable(src), "missing"); 3284 3285 if (reachable(src)) { 3286 Assembler::roundsd(dst, as_Address(src), rmode); 3287 } else { 3288 lea(rscratch, src); 3289 Assembler::roundsd(dst, Address(rscratch, 0), rmode); 3290 } 3291 } 3292 3293 void MacroAssembler::subss(XMMRegister dst, AddressLiteral src, Register rscratch) { 3294 assert(rscratch != noreg || always_reachable(src), "missing"); 3295 3296 if (reachable(src)) { 3297 Assembler::subss(dst, as_Address(src)); 3298 } else { 3299 lea(rscratch, src); 3300 Assembler::subss(dst, Address(rscratch, 0)); 3301 } 3302 } 3303 3304 void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch) { 3305 assert(rscratch != noreg || always_reachable(src), "missing"); 3306 3307 if (reachable(src)) { 3308 Assembler::ucomisd(dst, as_Address(src)); 3309 } else { 3310 lea(rscratch, src); 3311 Assembler::ucomisd(dst, Address(rscratch, 0)); 3312 } 3313 } 3314 3315 void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch) { 3316 assert(rscratch != noreg || always_reachable(src), "missing"); 3317 3318 if (reachable(src)) { 3319 Assembler::ucomiss(dst, as_Address(src)); 3320 } else { 3321 lea(rscratch, src); 3322 Assembler::ucomiss(dst, Address(rscratch, 0)); 3323 } 3324 } 3325 3326 void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 3327 assert(rscratch != noreg || always_reachable(src), "missing"); 3328 3329 // Used in sign-bit flipping with aligned address. 3330 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 3331 if (reachable(src)) { 3332 Assembler::xorpd(dst, as_Address(src)); 3333 } else { 3334 lea(rscratch, src); 3335 Assembler::xorpd(dst, Address(rscratch, 0)); 3336 } 3337 } 3338 3339 void MacroAssembler::xorpd(XMMRegister dst, XMMRegister src) { 3340 if (UseAVX > 2 && !VM_Version::supports_avx512dq() && (dst->encoding() == src->encoding())) { 3341 Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit); 3342 } 3343 else { 3344 Assembler::xorpd(dst, src); 3345 } 3346 } 3347 3348 void MacroAssembler::xorps(XMMRegister dst, XMMRegister src) { 3349 if (UseAVX > 2 && !VM_Version::supports_avx512dq() && (dst->encoding() == src->encoding())) { 3350 Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit); 3351 } else { 3352 Assembler::xorps(dst, src); 3353 } 3354 } 3355 3356 void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src, Register rscratch) { 3357 assert(rscratch != noreg || always_reachable(src), "missing"); 3358 3359 // Used in sign-bit flipping with aligned address. 3360 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 3361 if (reachable(src)) { 3362 Assembler::xorps(dst, as_Address(src)); 3363 } else { 3364 lea(rscratch, src); 3365 Assembler::xorps(dst, Address(rscratch, 0)); 3366 } 3367 } 3368 3369 void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src, Register rscratch) { 3370 assert(rscratch != noreg || always_reachable(src), "missing"); 3371 3372 // Used in sign-bit flipping with aligned address. 3373 bool aligned_adr = (((intptr_t)src.target() & 15) == 0); 3374 assert((UseAVX > 0) || aligned_adr, "SSE mode requires address alignment 16 bytes"); 3375 if (reachable(src)) { 3376 Assembler::pshufb(dst, as_Address(src)); 3377 } else { 3378 lea(rscratch, src); 3379 Assembler::pshufb(dst, Address(rscratch, 0)); 3380 } 3381 } 3382 3383 // AVX 3-operands instructions 3384 3385 void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3386 assert(rscratch != noreg || always_reachable(src), "missing"); 3387 3388 if (reachable(src)) { 3389 vaddsd(dst, nds, as_Address(src)); 3390 } else { 3391 lea(rscratch, src); 3392 vaddsd(dst, nds, Address(rscratch, 0)); 3393 } 3394 } 3395 3396 void MacroAssembler::vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3397 assert(rscratch != noreg || always_reachable(src), "missing"); 3398 3399 if (reachable(src)) { 3400 vaddss(dst, nds, as_Address(src)); 3401 } else { 3402 lea(rscratch, src); 3403 vaddss(dst, nds, Address(rscratch, 0)); 3404 } 3405 } 3406 3407 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3408 assert(UseAVX > 0, "requires some form of AVX"); 3409 assert(rscratch != noreg || always_reachable(src), "missing"); 3410 3411 if (reachable(src)) { 3412 Assembler::vpaddb(dst, nds, as_Address(src), vector_len); 3413 } else { 3414 lea(rscratch, src); 3415 Assembler::vpaddb(dst, nds, Address(rscratch, 0), vector_len); 3416 } 3417 } 3418 3419 void MacroAssembler::vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3420 assert(UseAVX > 0, "requires some form of AVX"); 3421 assert(rscratch != noreg || always_reachable(src), "missing"); 3422 3423 if (reachable(src)) { 3424 Assembler::vpaddd(dst, nds, as_Address(src), vector_len); 3425 } else { 3426 lea(rscratch, src); 3427 Assembler::vpaddd(dst, nds, Address(rscratch, 0), vector_len); 3428 } 3429 } 3430 3431 void MacroAssembler::vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch) { 3432 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 3433 assert(rscratch != noreg || always_reachable(negate_field), "missing"); 3434 3435 vandps(dst, nds, negate_field, vector_len, rscratch); 3436 } 3437 3438 void MacroAssembler::vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch) { 3439 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 3440 assert(rscratch != noreg || always_reachable(negate_field), "missing"); 3441 3442 vandpd(dst, nds, negate_field, vector_len, rscratch); 3443 } 3444 3445 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3446 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3447 Assembler::vpaddb(dst, nds, src, vector_len); 3448 } 3449 3450 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3451 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3452 Assembler::vpaddb(dst, nds, src, vector_len); 3453 } 3454 3455 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3456 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3457 Assembler::vpaddw(dst, nds, src, vector_len); 3458 } 3459 3460 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3461 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3462 Assembler::vpaddw(dst, nds, src, vector_len); 3463 } 3464 3465 void MacroAssembler::vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3466 assert(rscratch != noreg || always_reachable(src), "missing"); 3467 3468 if (reachable(src)) { 3469 Assembler::vpand(dst, nds, as_Address(src), vector_len); 3470 } else { 3471 lea(rscratch, src); 3472 Assembler::vpand(dst, nds, Address(rscratch, 0), vector_len); 3473 } 3474 } 3475 3476 void MacroAssembler::vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 3477 assert(rscratch != noreg || always_reachable(src), "missing"); 3478 3479 if (reachable(src)) { 3480 Assembler::vpbroadcastd(dst, as_Address(src), vector_len); 3481 } else { 3482 lea(rscratch, src); 3483 Assembler::vpbroadcastd(dst, Address(rscratch, 0), vector_len); 3484 } 3485 } 3486 3487 void MacroAssembler::vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 3488 assert(rscratch != noreg || always_reachable(src), "missing"); 3489 3490 if (reachable(src)) { 3491 Assembler::vpbroadcastq(dst, as_Address(src), vector_len); 3492 } else { 3493 lea(rscratch, src); 3494 Assembler::vpbroadcastq(dst, Address(rscratch, 0), vector_len); 3495 } 3496 } 3497 3498 void MacroAssembler::vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 3499 assert(rscratch != noreg || always_reachable(src), "missing"); 3500 3501 if (reachable(src)) { 3502 Assembler::vbroadcastsd(dst, as_Address(src), vector_len); 3503 } else { 3504 lea(rscratch, src); 3505 Assembler::vbroadcastsd(dst, Address(rscratch, 0), vector_len); 3506 } 3507 } 3508 3509 void MacroAssembler::vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 3510 assert(rscratch != noreg || always_reachable(src), "missing"); 3511 3512 if (reachable(src)) { 3513 Assembler::vbroadcastss(dst, as_Address(src), vector_len); 3514 } else { 3515 lea(rscratch, src); 3516 Assembler::vbroadcastss(dst, Address(rscratch, 0), vector_len); 3517 } 3518 } 3519 3520 // Vector float blend 3521 // vblendvps(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg) 3522 void MacroAssembler::vblendvps(XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister mask, int vector_len, bool compute_mask, XMMRegister scratch) { 3523 // WARN: Allow dst == (src1|src2), mask == scratch 3524 bool blend_emulation = EnableX86ECoreOpts && UseAVX > 1; 3525 bool scratch_available = scratch != xnoreg && scratch != src1 && scratch != src2 && scratch != dst; 3526 bool dst_available = dst != mask && (dst != src1 || dst != src2); 3527 if (blend_emulation && scratch_available && dst_available) { 3528 if (compute_mask) { 3529 vpsrad(scratch, mask, 32, vector_len); 3530 mask = scratch; 3531 } 3532 if (dst == src1) { 3533 vpandn(dst, mask, src1, vector_len); // if mask == 0, src1 3534 vpand (scratch, mask, src2, vector_len); // if mask == 1, src2 3535 } else { 3536 vpand (dst, mask, src2, vector_len); // if mask == 1, src2 3537 vpandn(scratch, mask, src1, vector_len); // if mask == 0, src1 3538 } 3539 vpor(dst, dst, scratch, vector_len); 3540 } else { 3541 Assembler::vblendvps(dst, src1, src2, mask, vector_len); 3542 } 3543 } 3544 3545 // vblendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg) 3546 void MacroAssembler::vblendvpd(XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister mask, int vector_len, bool compute_mask, XMMRegister scratch) { 3547 // WARN: Allow dst == (src1|src2), mask == scratch 3548 bool blend_emulation = EnableX86ECoreOpts && UseAVX > 1; 3549 bool scratch_available = scratch != xnoreg && scratch != src1 && scratch != src2 && scratch != dst && (!compute_mask || scratch != mask); 3550 bool dst_available = dst != mask && (dst != src1 || dst != src2); 3551 if (blend_emulation && scratch_available && dst_available) { 3552 if (compute_mask) { 3553 vpxor(scratch, scratch, scratch, vector_len); 3554 vpcmpgtq(scratch, scratch, mask, vector_len); 3555 mask = scratch; 3556 } 3557 if (dst == src1) { 3558 vpandn(dst, mask, src1, vector_len); // if mask == 0, src 3559 vpand (scratch, mask, src2, vector_len); // if mask == 1, src2 3560 } else { 3561 vpand (dst, mask, src2, vector_len); // if mask == 1, src2 3562 vpandn(scratch, mask, src1, vector_len); // if mask == 0, src 3563 } 3564 vpor(dst, dst, scratch, vector_len); 3565 } else { 3566 Assembler::vblendvpd(dst, src1, src2, mask, vector_len); 3567 } 3568 } 3569 3570 void MacroAssembler::vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3571 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3572 Assembler::vpcmpeqb(dst, nds, src, vector_len); 3573 } 3574 3575 void MacroAssembler::vpcmpeqb(XMMRegister dst, XMMRegister src1, Address src2, int vector_len) { 3576 assert(((dst->encoding() < 16 && src1->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3577 Assembler::vpcmpeqb(dst, src1, src2, vector_len); 3578 } 3579 3580 void MacroAssembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3581 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3582 Assembler::vpcmpeqw(dst, nds, src, vector_len); 3583 } 3584 3585 void MacroAssembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3586 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3587 Assembler::vpcmpeqw(dst, nds, src, vector_len); 3588 } 3589 3590 void MacroAssembler::evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3591 assert(rscratch != noreg || always_reachable(src), "missing"); 3592 3593 if (reachable(src)) { 3594 Assembler::evpcmpeqd(kdst, mask, nds, as_Address(src), vector_len); 3595 } else { 3596 lea(rscratch, src); 3597 Assembler::evpcmpeqd(kdst, mask, nds, Address(rscratch, 0), vector_len); 3598 } 3599 } 3600 3601 void MacroAssembler::evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 3602 int comparison, bool is_signed, int vector_len, Register rscratch) { 3603 assert(rscratch != noreg || always_reachable(src), "missing"); 3604 3605 if (reachable(src)) { 3606 Assembler::evpcmpd(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 3607 } else { 3608 lea(rscratch, src); 3609 Assembler::evpcmpd(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 3610 } 3611 } 3612 3613 void MacroAssembler::evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 3614 int comparison, bool is_signed, int vector_len, Register rscratch) { 3615 assert(rscratch != noreg || always_reachable(src), "missing"); 3616 3617 if (reachable(src)) { 3618 Assembler::evpcmpq(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 3619 } else { 3620 lea(rscratch, src); 3621 Assembler::evpcmpq(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 3622 } 3623 } 3624 3625 void MacroAssembler::evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 3626 int comparison, bool is_signed, int vector_len, Register rscratch) { 3627 assert(rscratch != noreg || always_reachable(src), "missing"); 3628 3629 if (reachable(src)) { 3630 Assembler::evpcmpb(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 3631 } else { 3632 lea(rscratch, src); 3633 Assembler::evpcmpb(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 3634 } 3635 } 3636 3637 void MacroAssembler::evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 3638 int comparison, bool is_signed, int vector_len, Register rscratch) { 3639 assert(rscratch != noreg || always_reachable(src), "missing"); 3640 3641 if (reachable(src)) { 3642 Assembler::evpcmpw(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 3643 } else { 3644 lea(rscratch, src); 3645 Assembler::evpcmpw(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 3646 } 3647 } 3648 3649 void MacroAssembler::vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len) { 3650 if (width == Assembler::Q) { 3651 Assembler::vpcmpCCq(dst, nds, src, cond_encoding, vector_len); 3652 } else { 3653 Assembler::vpcmpCCbwd(dst, nds, src, cond_encoding, vector_len); 3654 } 3655 } 3656 3657 void MacroAssembler::vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister xtmp, ComparisonPredicate cond, Width width, int vector_len) { 3658 int eq_cond_enc = 0x29; 3659 int gt_cond_enc = 0x37; 3660 if (width != Assembler::Q) { 3661 eq_cond_enc = 0x74 + width; 3662 gt_cond_enc = 0x64 + width; 3663 } 3664 switch (cond) { 3665 case eq: 3666 vpcmpCC(dst, nds, src, eq_cond_enc, width, vector_len); 3667 break; 3668 case neq: 3669 vpcmpCC(dst, nds, src, eq_cond_enc, width, vector_len); 3670 vallones(xtmp, vector_len); 3671 vpxor(dst, xtmp, dst, vector_len); 3672 break; 3673 case le: 3674 vpcmpCC(dst, nds, src, gt_cond_enc, width, vector_len); 3675 vallones(xtmp, vector_len); 3676 vpxor(dst, xtmp, dst, vector_len); 3677 break; 3678 case nlt: 3679 vpcmpCC(dst, src, nds, gt_cond_enc, width, vector_len); 3680 vallones(xtmp, vector_len); 3681 vpxor(dst, xtmp, dst, vector_len); 3682 break; 3683 case lt: 3684 vpcmpCC(dst, src, nds, gt_cond_enc, width, vector_len); 3685 break; 3686 case nle: 3687 vpcmpCC(dst, nds, src, gt_cond_enc, width, vector_len); 3688 break; 3689 default: 3690 assert(false, "Should not reach here"); 3691 } 3692 } 3693 3694 void MacroAssembler::vpmovzxbw(XMMRegister dst, Address src, int vector_len) { 3695 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3696 Assembler::vpmovzxbw(dst, src, vector_len); 3697 } 3698 3699 void MacroAssembler::vpmovmskb(Register dst, XMMRegister src, int vector_len) { 3700 assert((src->encoding() < 16),"XMM register should be 0-15"); 3701 Assembler::vpmovmskb(dst, src, vector_len); 3702 } 3703 3704 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3705 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3706 Assembler::vpmullw(dst, nds, src, vector_len); 3707 } 3708 3709 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3710 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3711 Assembler::vpmullw(dst, nds, src, vector_len); 3712 } 3713 3714 void MacroAssembler::vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3715 assert((UseAVX > 0), "AVX support is needed"); 3716 assert(rscratch != noreg || always_reachable(src), "missing"); 3717 3718 if (reachable(src)) { 3719 Assembler::vpmulld(dst, nds, as_Address(src), vector_len); 3720 } else { 3721 lea(rscratch, src); 3722 Assembler::vpmulld(dst, nds, Address(rscratch, 0), vector_len); 3723 } 3724 } 3725 3726 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3727 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3728 Assembler::vpsubb(dst, nds, src, vector_len); 3729 } 3730 3731 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3732 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3733 Assembler::vpsubb(dst, nds, src, vector_len); 3734 } 3735 3736 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3737 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3738 Assembler::vpsubw(dst, nds, src, vector_len); 3739 } 3740 3741 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3742 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3743 Assembler::vpsubw(dst, nds, src, vector_len); 3744 } 3745 3746 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3747 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3748 Assembler::vpsraw(dst, nds, shift, vector_len); 3749 } 3750 3751 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3752 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3753 Assembler::vpsraw(dst, nds, shift, vector_len); 3754 } 3755 3756 void MacroAssembler::evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3757 assert(UseAVX > 2,""); 3758 if (!VM_Version::supports_avx512vl() && vector_len < 2) { 3759 vector_len = 2; 3760 } 3761 Assembler::evpsraq(dst, nds, shift, vector_len); 3762 } 3763 3764 void MacroAssembler::evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3765 assert(UseAVX > 2,""); 3766 if (!VM_Version::supports_avx512vl() && vector_len < 2) { 3767 vector_len = 2; 3768 } 3769 Assembler::evpsraq(dst, nds, shift, vector_len); 3770 } 3771 3772 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3773 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3774 Assembler::vpsrlw(dst, nds, shift, vector_len); 3775 } 3776 3777 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3778 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3779 Assembler::vpsrlw(dst, nds, shift, vector_len); 3780 } 3781 3782 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3783 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3784 Assembler::vpsllw(dst, nds, shift, vector_len); 3785 } 3786 3787 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3788 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3789 Assembler::vpsllw(dst, nds, shift, vector_len); 3790 } 3791 3792 void MacroAssembler::vptest(XMMRegister dst, XMMRegister src) { 3793 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15"); 3794 Assembler::vptest(dst, src); 3795 } 3796 3797 void MacroAssembler::punpcklbw(XMMRegister dst, XMMRegister src) { 3798 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3799 Assembler::punpcklbw(dst, src); 3800 } 3801 3802 void MacroAssembler::pshufd(XMMRegister dst, Address src, int mode) { 3803 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 3804 Assembler::pshufd(dst, src, mode); 3805 } 3806 3807 void MacroAssembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) { 3808 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3809 Assembler::pshuflw(dst, src, mode); 3810 } 3811 3812 void MacroAssembler::vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3813 assert(rscratch != noreg || always_reachable(src), "missing"); 3814 3815 if (reachable(src)) { 3816 vandpd(dst, nds, as_Address(src), vector_len); 3817 } else { 3818 lea(rscratch, src); 3819 vandpd(dst, nds, Address(rscratch, 0), vector_len); 3820 } 3821 } 3822 3823 void MacroAssembler::vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3824 assert(rscratch != noreg || always_reachable(src), "missing"); 3825 3826 if (reachable(src)) { 3827 vandps(dst, nds, as_Address(src), vector_len); 3828 } else { 3829 lea(rscratch, src); 3830 vandps(dst, nds, Address(rscratch, 0), vector_len); 3831 } 3832 } 3833 3834 void MacroAssembler::evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, 3835 bool merge, int vector_len, Register rscratch) { 3836 assert(rscratch != noreg || always_reachable(src), "missing"); 3837 3838 if (reachable(src)) { 3839 Assembler::evpord(dst, mask, nds, as_Address(src), merge, vector_len); 3840 } else { 3841 lea(rscratch, src); 3842 Assembler::evpord(dst, mask, nds, Address(rscratch, 0), merge, vector_len); 3843 } 3844 } 3845 3846 void MacroAssembler::vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3847 assert(rscratch != noreg || always_reachable(src), "missing"); 3848 3849 if (reachable(src)) { 3850 vdivsd(dst, nds, as_Address(src)); 3851 } else { 3852 lea(rscratch, src); 3853 vdivsd(dst, nds, Address(rscratch, 0)); 3854 } 3855 } 3856 3857 void MacroAssembler::vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3858 assert(rscratch != noreg || always_reachable(src), "missing"); 3859 3860 if (reachable(src)) { 3861 vdivss(dst, nds, as_Address(src)); 3862 } else { 3863 lea(rscratch, src); 3864 vdivss(dst, nds, Address(rscratch, 0)); 3865 } 3866 } 3867 3868 void MacroAssembler::vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3869 assert(rscratch != noreg || always_reachable(src), "missing"); 3870 3871 if (reachable(src)) { 3872 vmulsd(dst, nds, as_Address(src)); 3873 } else { 3874 lea(rscratch, src); 3875 vmulsd(dst, nds, Address(rscratch, 0)); 3876 } 3877 } 3878 3879 void MacroAssembler::vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3880 assert(rscratch != noreg || always_reachable(src), "missing"); 3881 3882 if (reachable(src)) { 3883 vmulss(dst, nds, as_Address(src)); 3884 } else { 3885 lea(rscratch, src); 3886 vmulss(dst, nds, Address(rscratch, 0)); 3887 } 3888 } 3889 3890 void MacroAssembler::vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3891 assert(rscratch != noreg || always_reachable(src), "missing"); 3892 3893 if (reachable(src)) { 3894 vsubsd(dst, nds, as_Address(src)); 3895 } else { 3896 lea(rscratch, src); 3897 vsubsd(dst, nds, Address(rscratch, 0)); 3898 } 3899 } 3900 3901 void MacroAssembler::vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3902 assert(rscratch != noreg || always_reachable(src), "missing"); 3903 3904 if (reachable(src)) { 3905 vsubss(dst, nds, as_Address(src)); 3906 } else { 3907 lea(rscratch, src); 3908 vsubss(dst, nds, Address(rscratch, 0)); 3909 } 3910 } 3911 3912 void MacroAssembler::vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3913 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 3914 assert(rscratch != noreg || always_reachable(src), "missing"); 3915 3916 vxorps(dst, nds, src, Assembler::AVX_128bit, rscratch); 3917 } 3918 3919 void MacroAssembler::vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3920 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 3921 assert(rscratch != noreg || always_reachable(src), "missing"); 3922 3923 vxorpd(dst, nds, src, Assembler::AVX_128bit, rscratch); 3924 } 3925 3926 void MacroAssembler::vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3927 assert(rscratch != noreg || always_reachable(src), "missing"); 3928 3929 if (reachable(src)) { 3930 vxorpd(dst, nds, as_Address(src), vector_len); 3931 } else { 3932 lea(rscratch, src); 3933 vxorpd(dst, nds, Address(rscratch, 0), vector_len); 3934 } 3935 } 3936 3937 void MacroAssembler::vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3938 assert(rscratch != noreg || always_reachable(src), "missing"); 3939 3940 if (reachable(src)) { 3941 vxorps(dst, nds, as_Address(src), vector_len); 3942 } else { 3943 lea(rscratch, src); 3944 vxorps(dst, nds, Address(rscratch, 0), vector_len); 3945 } 3946 } 3947 3948 void MacroAssembler::vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3949 assert(rscratch != noreg || always_reachable(src), "missing"); 3950 3951 if (UseAVX > 1 || (vector_len < 1)) { 3952 if (reachable(src)) { 3953 Assembler::vpxor(dst, nds, as_Address(src), vector_len); 3954 } else { 3955 lea(rscratch, src); 3956 Assembler::vpxor(dst, nds, Address(rscratch, 0), vector_len); 3957 } 3958 } else { 3959 MacroAssembler::vxorpd(dst, nds, src, vector_len, rscratch); 3960 } 3961 } 3962 3963 void MacroAssembler::vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3964 assert(rscratch != noreg || always_reachable(src), "missing"); 3965 3966 if (reachable(src)) { 3967 Assembler::vpermd(dst, nds, as_Address(src), vector_len); 3968 } else { 3969 lea(rscratch, src); 3970 Assembler::vpermd(dst, nds, Address(rscratch, 0), vector_len); 3971 } 3972 } 3973 3974 void MacroAssembler::clear_jobject_tag(Register possibly_non_local) { 3975 const int32_t inverted_mask = ~static_cast<int32_t>(JNIHandles::tag_mask); 3976 STATIC_ASSERT(inverted_mask == -4); // otherwise check this code 3977 // The inverted mask is sign-extended 3978 andptr(possibly_non_local, inverted_mask); 3979 } 3980 3981 void MacroAssembler::resolve_jobject(Register value, 3982 Register thread, 3983 Register tmp) { 3984 assert_different_registers(value, thread, tmp); 3985 Label done, tagged, weak_tagged; 3986 testptr(value, value); 3987 jcc(Assembler::zero, done); // Use null as-is. 3988 testptr(value, JNIHandles::tag_mask); // Test for tag. 3989 jcc(Assembler::notZero, tagged); 3990 3991 // Resolve local handle 3992 access_load_at(T_OBJECT, IN_NATIVE | AS_RAW, value, Address(value, 0), tmp, thread); 3993 verify_oop(value); 3994 jmp(done); 3995 3996 bind(tagged); 3997 testptr(value, JNIHandles::TypeTag::weak_global); // Test for weak tag. 3998 jcc(Assembler::notZero, weak_tagged); 3999 4000 // Resolve global handle 4001 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp, thread); 4002 verify_oop(value); 4003 jmp(done); 4004 4005 bind(weak_tagged); 4006 // Resolve jweak. 4007 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 4008 value, Address(value, -JNIHandles::TypeTag::weak_global), tmp, thread); 4009 verify_oop(value); 4010 4011 bind(done); 4012 } 4013 4014 void MacroAssembler::resolve_global_jobject(Register value, 4015 Register thread, 4016 Register tmp) { 4017 assert_different_registers(value, thread, tmp); 4018 Label done; 4019 4020 testptr(value, value); 4021 jcc(Assembler::zero, done); // Use null as-is. 4022 4023 #ifdef ASSERT 4024 { 4025 Label valid_global_tag; 4026 testptr(value, JNIHandles::TypeTag::global); // Test for global tag. 4027 jcc(Assembler::notZero, valid_global_tag); 4028 stop("non global jobject using resolve_global_jobject"); 4029 bind(valid_global_tag); 4030 } 4031 #endif 4032 4033 // Resolve global handle 4034 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp, thread); 4035 verify_oop(value); 4036 4037 bind(done); 4038 } 4039 4040 void MacroAssembler::subptr(Register dst, int32_t imm32) { 4041 LP64_ONLY(subq(dst, imm32)) NOT_LP64(subl(dst, imm32)); 4042 } 4043 4044 // Force generation of a 4 byte immediate value even if it fits into 8bit 4045 void MacroAssembler::subptr_imm32(Register dst, int32_t imm32) { 4046 LP64_ONLY(subq_imm32(dst, imm32)) NOT_LP64(subl_imm32(dst, imm32)); 4047 } 4048 4049 void MacroAssembler::subptr(Register dst, Register src) { 4050 LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); 4051 } 4052 4053 // C++ bool manipulation 4054 void MacroAssembler::testbool(Register dst) { 4055 if(sizeof(bool) == 1) 4056 testb(dst, 0xff); 4057 else if(sizeof(bool) == 2) { 4058 // testw implementation needed for two byte bools 4059 ShouldNotReachHere(); 4060 } else if(sizeof(bool) == 4) 4061 testl(dst, dst); 4062 else 4063 // unsupported 4064 ShouldNotReachHere(); 4065 } 4066 4067 void MacroAssembler::testptr(Register dst, Register src) { 4068 LP64_ONLY(testq(dst, src)) NOT_LP64(testl(dst, src)); 4069 } 4070 4071 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. 4072 void MacroAssembler::tlab_allocate(Register thread, Register obj, 4073 Register var_size_in_bytes, 4074 int con_size_in_bytes, 4075 Register t1, 4076 Register t2, 4077 Label& slow_case) { 4078 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 4079 bs->tlab_allocate(this, thread, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case); 4080 } 4081 4082 RegSet MacroAssembler::call_clobbered_gp_registers() { 4083 RegSet regs; 4084 #ifdef _LP64 4085 regs += RegSet::of(rax, rcx, rdx); 4086 #ifndef WINDOWS 4087 regs += RegSet::of(rsi, rdi); 4088 #endif 4089 regs += RegSet::range(r8, r11); 4090 #else 4091 regs += RegSet::of(rax, rcx, rdx); 4092 #endif 4093 return regs; 4094 } 4095 4096 XMMRegSet MacroAssembler::call_clobbered_xmm_registers() { 4097 int num_xmm_registers = XMMRegister::available_xmm_registers(); 4098 #if defined(WINDOWS) && defined(_LP64) 4099 XMMRegSet result = XMMRegSet::range(xmm0, xmm5); 4100 if (num_xmm_registers > 16) { 4101 result += XMMRegSet::range(xmm16, as_XMMRegister(num_xmm_registers - 1)); 4102 } 4103 return result; 4104 #else 4105 return XMMRegSet::range(xmm0, as_XMMRegister(num_xmm_registers - 1)); 4106 #endif 4107 } 4108 4109 static int FPUSaveAreaSize = align_up(108, StackAlignmentInBytes); // 108 bytes needed for FPU state by fsave/frstor 4110 4111 #ifndef _LP64 4112 static bool use_x87_registers() { return UseSSE < 2; } 4113 #endif 4114 static bool use_xmm_registers() { return UseSSE >= 1; } 4115 4116 // C1 only ever uses the first double/float of the XMM register. 4117 static int xmm_save_size() { return UseSSE >= 2 ? sizeof(double) : sizeof(float); } 4118 4119 static void save_xmm_register(MacroAssembler* masm, int offset, XMMRegister reg) { 4120 if (UseSSE == 1) { 4121 masm->movflt(Address(rsp, offset), reg); 4122 } else { 4123 masm->movdbl(Address(rsp, offset), reg); 4124 } 4125 } 4126 4127 static void restore_xmm_register(MacroAssembler* masm, int offset, XMMRegister reg) { 4128 if (UseSSE == 1) { 4129 masm->movflt(reg, Address(rsp, offset)); 4130 } else { 4131 masm->movdbl(reg, Address(rsp, offset)); 4132 } 4133 } 4134 4135 static int register_section_sizes(RegSet gp_registers, XMMRegSet xmm_registers, 4136 bool save_fpu, int& gp_area_size, 4137 int& fp_area_size, int& xmm_area_size) { 4138 4139 gp_area_size = align_up(gp_registers.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size, 4140 StackAlignmentInBytes); 4141 #ifdef _LP64 4142 fp_area_size = 0; 4143 #else 4144 fp_area_size = (save_fpu && use_x87_registers()) ? FPUSaveAreaSize : 0; 4145 #endif 4146 xmm_area_size = (save_fpu && use_xmm_registers()) ? xmm_registers.size() * xmm_save_size() : 0; 4147 4148 return gp_area_size + fp_area_size + xmm_area_size; 4149 } 4150 4151 void MacroAssembler::push_call_clobbered_registers_except(RegSet exclude, bool save_fpu) { 4152 block_comment("push_call_clobbered_registers start"); 4153 // Regular registers 4154 RegSet gp_registers_to_push = call_clobbered_gp_registers() - exclude; 4155 4156 int gp_area_size; 4157 int fp_area_size; 4158 int xmm_area_size; 4159 int total_save_size = register_section_sizes(gp_registers_to_push, call_clobbered_xmm_registers(), save_fpu, 4160 gp_area_size, fp_area_size, xmm_area_size); 4161 subptr(rsp, total_save_size); 4162 4163 push_set(gp_registers_to_push, 0); 4164 4165 #ifndef _LP64 4166 if (save_fpu && use_x87_registers()) { 4167 fnsave(Address(rsp, gp_area_size)); 4168 fwait(); 4169 } 4170 #endif 4171 if (save_fpu && use_xmm_registers()) { 4172 push_set(call_clobbered_xmm_registers(), gp_area_size + fp_area_size); 4173 } 4174 4175 block_comment("push_call_clobbered_registers end"); 4176 } 4177 4178 void MacroAssembler::pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu) { 4179 block_comment("pop_call_clobbered_registers start"); 4180 4181 RegSet gp_registers_to_pop = call_clobbered_gp_registers() - exclude; 4182 4183 int gp_area_size; 4184 int fp_area_size; 4185 int xmm_area_size; 4186 int total_save_size = register_section_sizes(gp_registers_to_pop, call_clobbered_xmm_registers(), restore_fpu, 4187 gp_area_size, fp_area_size, xmm_area_size); 4188 4189 if (restore_fpu && use_xmm_registers()) { 4190 pop_set(call_clobbered_xmm_registers(), gp_area_size + fp_area_size); 4191 } 4192 #ifndef _LP64 4193 if (restore_fpu && use_x87_registers()) { 4194 frstor(Address(rsp, gp_area_size)); 4195 } 4196 #endif 4197 4198 pop_set(gp_registers_to_pop, 0); 4199 4200 addptr(rsp, total_save_size); 4201 4202 vzeroupper(); 4203 4204 block_comment("pop_call_clobbered_registers end"); 4205 } 4206 4207 void MacroAssembler::push_set(XMMRegSet set, int offset) { 4208 assert(is_aligned(set.size() * xmm_save_size(), StackAlignmentInBytes), "must be"); 4209 int spill_offset = offset; 4210 4211 for (RegSetIterator<XMMRegister> it = set.begin(); *it != xnoreg; ++it) { 4212 save_xmm_register(this, spill_offset, *it); 4213 spill_offset += xmm_save_size(); 4214 } 4215 } 4216 4217 void MacroAssembler::pop_set(XMMRegSet set, int offset) { 4218 int restore_size = set.size() * xmm_save_size(); 4219 assert(is_aligned(restore_size, StackAlignmentInBytes), "must be"); 4220 4221 int restore_offset = offset + restore_size - xmm_save_size(); 4222 4223 for (ReverseRegSetIterator<XMMRegister> it = set.rbegin(); *it != xnoreg; ++it) { 4224 restore_xmm_register(this, restore_offset, *it); 4225 restore_offset -= xmm_save_size(); 4226 } 4227 } 4228 4229 void MacroAssembler::push_set(RegSet set, int offset) { 4230 int spill_offset; 4231 if (offset == -1) { 4232 int register_push_size = set.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size; 4233 int aligned_size = align_up(register_push_size, StackAlignmentInBytes); 4234 subptr(rsp, aligned_size); 4235 spill_offset = 0; 4236 } else { 4237 spill_offset = offset; 4238 } 4239 4240 for (RegSetIterator<Register> it = set.begin(); *it != noreg; ++it) { 4241 movptr(Address(rsp, spill_offset), *it); 4242 spill_offset += Register::max_slots_per_register * VMRegImpl::stack_slot_size; 4243 } 4244 } 4245 4246 void MacroAssembler::pop_set(RegSet set, int offset) { 4247 4248 int gp_reg_size = Register::max_slots_per_register * VMRegImpl::stack_slot_size; 4249 int restore_size = set.size() * gp_reg_size; 4250 int aligned_size = align_up(restore_size, StackAlignmentInBytes); 4251 4252 int restore_offset; 4253 if (offset == -1) { 4254 restore_offset = restore_size - gp_reg_size; 4255 } else { 4256 restore_offset = offset + restore_size - gp_reg_size; 4257 } 4258 for (ReverseRegSetIterator<Register> it = set.rbegin(); *it != noreg; ++it) { 4259 movptr(*it, Address(rsp, restore_offset)); 4260 restore_offset -= gp_reg_size; 4261 } 4262 4263 if (offset == -1) { 4264 addptr(rsp, aligned_size); 4265 } 4266 } 4267 4268 // Preserves the contents of address, destroys the contents length_in_bytes and temp. 4269 void MacroAssembler::zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp) { 4270 assert(address != length_in_bytes && address != temp && temp != length_in_bytes, "registers must be different"); 4271 assert((offset_in_bytes & (BytesPerWord - 1)) == 0, "offset must be a multiple of BytesPerWord"); 4272 Label done; 4273 4274 testptr(length_in_bytes, length_in_bytes); 4275 jcc(Assembler::zero, done); 4276 4277 // initialize topmost word, divide index by 2, check if odd and test if zero 4278 // note: for the remaining code to work, index must be a multiple of BytesPerWord 4279 #ifdef ASSERT 4280 { 4281 Label L; 4282 testptr(length_in_bytes, BytesPerWord - 1); 4283 jcc(Assembler::zero, L); 4284 stop("length must be a multiple of BytesPerWord"); 4285 bind(L); 4286 } 4287 #endif 4288 Register index = length_in_bytes; 4289 xorptr(temp, temp); // use _zero reg to clear memory (shorter code) 4290 if (UseIncDec) { 4291 shrptr(index, 3); // divide by 8/16 and set carry flag if bit 2 was set 4292 } else { 4293 shrptr(index, 2); // use 2 instructions to avoid partial flag stall 4294 shrptr(index, 1); 4295 } 4296 #ifndef _LP64 4297 // index could have not been a multiple of 8 (i.e., bit 2 was set) 4298 { 4299 Label even; 4300 // note: if index was a multiple of 8, then it cannot 4301 // be 0 now otherwise it must have been 0 before 4302 // => if it is even, we don't need to check for 0 again 4303 jcc(Assembler::carryClear, even); 4304 // clear topmost word (no jump would be needed if conditional assignment worked here) 4305 movptr(Address(address, index, Address::times_8, offset_in_bytes - 0*BytesPerWord), temp); 4306 // index could be 0 now, must check again 4307 jcc(Assembler::zero, done); 4308 bind(even); 4309 } 4310 #endif // !_LP64 4311 // initialize remaining object fields: index is a multiple of 2 now 4312 { 4313 Label loop; 4314 bind(loop); 4315 movptr(Address(address, index, Address::times_8, offset_in_bytes - 1*BytesPerWord), temp); 4316 NOT_LP64(movptr(Address(address, index, Address::times_8, offset_in_bytes - 2*BytesPerWord), temp);) 4317 decrement(index); 4318 jcc(Assembler::notZero, loop); 4319 } 4320 4321 bind(done); 4322 } 4323 4324 // Look up the method for a megamorphic invokeinterface call. 4325 // The target method is determined by <intf_klass, itable_index>. 4326 // The receiver klass is in recv_klass. 4327 // On success, the result will be in method_result, and execution falls through. 4328 // On failure, execution transfers to the given label. 4329 void MacroAssembler::lookup_interface_method(Register recv_klass, 4330 Register intf_klass, 4331 RegisterOrConstant itable_index, 4332 Register method_result, 4333 Register scan_temp, 4334 Label& L_no_such_interface, 4335 bool return_method) { 4336 assert_different_registers(recv_klass, intf_klass, scan_temp); 4337 assert_different_registers(method_result, intf_klass, scan_temp); 4338 assert(recv_klass != method_result || !return_method, 4339 "recv_klass can be destroyed when method isn't needed"); 4340 4341 assert(itable_index.is_constant() || itable_index.as_register() == method_result, 4342 "caller must use same register for non-constant itable index as for method"); 4343 4344 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 4345 int vtable_base = in_bytes(Klass::vtable_start_offset()); 4346 int itentry_off = in_bytes(itableMethodEntry::method_offset()); 4347 int scan_step = itableOffsetEntry::size() * wordSize; 4348 int vte_size = vtableEntry::size_in_bytes(); 4349 Address::ScaleFactor times_vte_scale = Address::times_ptr; 4350 assert(vte_size == wordSize, "else adjust times_vte_scale"); 4351 4352 movl(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 4353 4354 // Could store the aligned, prescaled offset in the klass. 4355 lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base)); 4356 4357 if (return_method) { 4358 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 4359 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 4360 lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off)); 4361 } 4362 4363 // for (scan = klass->itable(); scan->interface() != nullptr; scan += scan_step) { 4364 // if (scan->interface() == intf) { 4365 // result = (klass + scan->offset() + itable_index); 4366 // } 4367 // } 4368 Label search, found_method; 4369 4370 for (int peel = 1; peel >= 0; peel--) { 4371 movptr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset())); 4372 cmpptr(intf_klass, method_result); 4373 4374 if (peel) { 4375 jccb(Assembler::equal, found_method); 4376 } else { 4377 jccb(Assembler::notEqual, search); 4378 // (invert the test to fall through to found_method...) 4379 } 4380 4381 if (!peel) break; 4382 4383 bind(search); 4384 4385 // Check that the previous entry is non-null. A null entry means that 4386 // the receiver class doesn't implement the interface, and wasn't the 4387 // same as when the caller was compiled. 4388 testptr(method_result, method_result); 4389 jcc(Assembler::zero, L_no_such_interface); 4390 addptr(scan_temp, scan_step); 4391 } 4392 4393 bind(found_method); 4394 4395 if (return_method) { 4396 // Got a hit. 4397 movl(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset())); 4398 movptr(method_result, Address(recv_klass, scan_temp, Address::times_1)); 4399 } 4400 } 4401 4402 // Look up the method for a megamorphic invokeinterface call in a single pass over itable: 4403 // - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICData 4404 // - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index 4405 // The target method is determined by <holder_klass, itable_index>. 4406 // The receiver klass is in recv_klass. 4407 // On success, the result will be in method_result, and execution falls through. 4408 // On failure, execution transfers to the given label. 4409 void MacroAssembler::lookup_interface_method_stub(Register recv_klass, 4410 Register holder_klass, 4411 Register resolved_klass, 4412 Register method_result, 4413 Register scan_temp, 4414 Register temp_reg2, 4415 Register receiver, 4416 int itable_index, 4417 Label& L_no_such_interface) { 4418 assert_different_registers(recv_klass, method_result, holder_klass, resolved_klass, scan_temp, temp_reg2, receiver); 4419 Register temp_itbl_klass = method_result; 4420 Register temp_reg = (temp_reg2 == noreg ? recv_klass : temp_reg2); // reuse recv_klass register on 32-bit x86 impl 4421 4422 int vtable_base = in_bytes(Klass::vtable_start_offset()); 4423 int itentry_off = in_bytes(itableMethodEntry::method_offset()); 4424 int scan_step = itableOffsetEntry::size() * wordSize; 4425 int vte_size = vtableEntry::size_in_bytes(); 4426 int ioffset = in_bytes(itableOffsetEntry::interface_offset()); 4427 int ooffset = in_bytes(itableOffsetEntry::offset_offset()); 4428 Address::ScaleFactor times_vte_scale = Address::times_ptr; 4429 assert(vte_size == wordSize, "adjust times_vte_scale"); 4430 4431 Label L_loop_scan_resolved_entry, L_resolved_found, L_holder_found; 4432 4433 // temp_itbl_klass = recv_klass.itable[0] 4434 // scan_temp = &recv_klass.itable[0] + step 4435 movl(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 4436 movptr(temp_itbl_klass, Address(recv_klass, scan_temp, times_vte_scale, vtable_base + ioffset)); 4437 lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base + ioffset + scan_step)); 4438 xorptr(temp_reg, temp_reg); 4439 4440 // Initial checks: 4441 // - if (holder_klass != resolved_klass), go to "scan for resolved" 4442 // - if (itable[0] == 0), no such interface 4443 // - if (itable[0] == holder_klass), shortcut to "holder found" 4444 cmpptr(holder_klass, resolved_klass); 4445 jccb(Assembler::notEqual, L_loop_scan_resolved_entry); 4446 testptr(temp_itbl_klass, temp_itbl_klass); 4447 jccb(Assembler::zero, L_no_such_interface); 4448 cmpptr(holder_klass, temp_itbl_klass); 4449 jccb(Assembler::equal, L_holder_found); 4450 4451 // Loop: Look for holder_klass record in itable 4452 // do { 4453 // tmp = itable[index]; 4454 // index += step; 4455 // if (tmp == holder_klass) { 4456 // goto L_holder_found; // Found! 4457 // } 4458 // } while (tmp != 0); 4459 // goto L_no_such_interface // Not found. 4460 Label L_scan_holder; 4461 bind(L_scan_holder); 4462 movptr(temp_itbl_klass, Address(scan_temp, 0)); 4463 addptr(scan_temp, scan_step); 4464 cmpptr(holder_klass, temp_itbl_klass); 4465 jccb(Assembler::equal, L_holder_found); 4466 testptr(temp_itbl_klass, temp_itbl_klass); 4467 jccb(Assembler::notZero, L_scan_holder); 4468 4469 jmpb(L_no_such_interface); 4470 4471 // Loop: Look for resolved_class record in itable 4472 // do { 4473 // tmp = itable[index]; 4474 // index += step; 4475 // if (tmp == holder_klass) { 4476 // // Also check if we have met a holder klass 4477 // holder_tmp = itable[index-step-ioffset]; 4478 // } 4479 // if (tmp == resolved_klass) { 4480 // goto L_resolved_found; // Found! 4481 // } 4482 // } while (tmp != 0); 4483 // goto L_no_such_interface // Not found. 4484 // 4485 Label L_loop_scan_resolved; 4486 bind(L_loop_scan_resolved); 4487 movptr(temp_itbl_klass, Address(scan_temp, 0)); 4488 addptr(scan_temp, scan_step); 4489 bind(L_loop_scan_resolved_entry); 4490 cmpptr(holder_klass, temp_itbl_klass); 4491 cmovl(Assembler::equal, temp_reg, Address(scan_temp, ooffset - ioffset - scan_step)); 4492 cmpptr(resolved_klass, temp_itbl_klass); 4493 jccb(Assembler::equal, L_resolved_found); 4494 testptr(temp_itbl_klass, temp_itbl_klass); 4495 jccb(Assembler::notZero, L_loop_scan_resolved); 4496 4497 jmpb(L_no_such_interface); 4498 4499 Label L_ready; 4500 4501 // See if we already have a holder klass. If not, go and scan for it. 4502 bind(L_resolved_found); 4503 testptr(temp_reg, temp_reg); 4504 jccb(Assembler::zero, L_scan_holder); 4505 jmpb(L_ready); 4506 4507 bind(L_holder_found); 4508 movl(temp_reg, Address(scan_temp, ooffset - ioffset - scan_step)); 4509 4510 // Finally, temp_reg contains holder_klass vtable offset 4511 bind(L_ready); 4512 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 4513 if (temp_reg2 == noreg) { // recv_klass register is clobbered for 32-bit x86 impl 4514 load_klass(scan_temp, receiver, noreg); 4515 movptr(method_result, Address(scan_temp, temp_reg, Address::times_1, itable_index * wordSize + itentry_off)); 4516 } else { 4517 movptr(method_result, Address(recv_klass, temp_reg, Address::times_1, itable_index * wordSize + itentry_off)); 4518 } 4519 } 4520 4521 4522 // virtual method calling 4523 void MacroAssembler::lookup_virtual_method(Register recv_klass, 4524 RegisterOrConstant vtable_index, 4525 Register method_result) { 4526 const ByteSize base = Klass::vtable_start_offset(); 4527 assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below"); 4528 Address vtable_entry_addr(recv_klass, 4529 vtable_index, Address::times_ptr, 4530 base + vtableEntry::method_offset()); 4531 movptr(method_result, vtable_entry_addr); 4532 } 4533 4534 4535 void MacroAssembler::check_klass_subtype(Register sub_klass, 4536 Register super_klass, 4537 Register temp_reg, 4538 Label& L_success) { 4539 Label L_failure; 4540 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, nullptr); 4541 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, nullptr); 4542 bind(L_failure); 4543 } 4544 4545 4546 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 4547 Register super_klass, 4548 Register temp_reg, 4549 Label* L_success, 4550 Label* L_failure, 4551 Label* L_slow_path, 4552 RegisterOrConstant super_check_offset) { 4553 assert_different_registers(sub_klass, super_klass, temp_reg); 4554 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 4555 if (super_check_offset.is_register()) { 4556 assert_different_registers(sub_klass, super_klass, 4557 super_check_offset.as_register()); 4558 } else if (must_load_sco) { 4559 assert(temp_reg != noreg, "supply either a temp or a register offset"); 4560 } 4561 4562 Label L_fallthrough; 4563 int label_nulls = 0; 4564 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 4565 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 4566 if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; } 4567 assert(label_nulls <= 1, "at most one null in the batch"); 4568 4569 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 4570 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 4571 Address super_check_offset_addr(super_klass, sco_offset); 4572 4573 // Hacked jcc, which "knows" that L_fallthrough, at least, is in 4574 // range of a jccb. If this routine grows larger, reconsider at 4575 // least some of these. 4576 #define local_jcc(assembler_cond, label) \ 4577 if (&(label) == &L_fallthrough) jccb(assembler_cond, label); \ 4578 else jcc( assembler_cond, label) /*omit semi*/ 4579 4580 // Hacked jmp, which may only be used just before L_fallthrough. 4581 #define final_jmp(label) \ 4582 if (&(label) == &L_fallthrough) { /*do nothing*/ } \ 4583 else jmp(label) /*omit semi*/ 4584 4585 // If the pointers are equal, we are done (e.g., String[] elements). 4586 // This self-check enables sharing of secondary supertype arrays among 4587 // non-primary types such as array-of-interface. Otherwise, each such 4588 // type would need its own customized SSA. 4589 // We move this check to the front of the fast path because many 4590 // type checks are in fact trivially successful in this manner, 4591 // so we get a nicely predicted branch right at the start of the check. 4592 cmpptr(sub_klass, super_klass); 4593 local_jcc(Assembler::equal, *L_success); 4594 4595 // Check the supertype display: 4596 if (must_load_sco) { 4597 // Positive movl does right thing on LP64. 4598 movl(temp_reg, super_check_offset_addr); 4599 super_check_offset = RegisterOrConstant(temp_reg); 4600 } 4601 Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0); 4602 cmpptr(super_klass, super_check_addr); // load displayed supertype 4603 4604 // This check has worked decisively for primary supers. 4605 // Secondary supers are sought in the super_cache ('super_cache_addr'). 4606 // (Secondary supers are interfaces and very deeply nested subtypes.) 4607 // This works in the same check above because of a tricky aliasing 4608 // between the super_cache and the primary super display elements. 4609 // (The 'super_check_addr' can address either, as the case requires.) 4610 // Note that the cache is updated below if it does not help us find 4611 // what we need immediately. 4612 // So if it was a primary super, we can just fail immediately. 4613 // Otherwise, it's the slow path for us (no success at this point). 4614 4615 if (super_check_offset.is_register()) { 4616 local_jcc(Assembler::equal, *L_success); 4617 cmpl(super_check_offset.as_register(), sc_offset); 4618 if (L_failure == &L_fallthrough) { 4619 local_jcc(Assembler::equal, *L_slow_path); 4620 } else { 4621 local_jcc(Assembler::notEqual, *L_failure); 4622 final_jmp(*L_slow_path); 4623 } 4624 } else if (super_check_offset.as_constant() == sc_offset) { 4625 // Need a slow path; fast failure is impossible. 4626 if (L_slow_path == &L_fallthrough) { 4627 local_jcc(Assembler::equal, *L_success); 4628 } else { 4629 local_jcc(Assembler::notEqual, *L_slow_path); 4630 final_jmp(*L_success); 4631 } 4632 } else { 4633 // No slow path; it's a fast decision. 4634 if (L_failure == &L_fallthrough) { 4635 local_jcc(Assembler::equal, *L_success); 4636 } else { 4637 local_jcc(Assembler::notEqual, *L_failure); 4638 final_jmp(*L_success); 4639 } 4640 } 4641 4642 bind(L_fallthrough); 4643 4644 #undef local_jcc 4645 #undef final_jmp 4646 } 4647 4648 4649 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 4650 Register super_klass, 4651 Register temp_reg, 4652 Register temp2_reg, 4653 Label* L_success, 4654 Label* L_failure, 4655 bool set_cond_codes) { 4656 assert_different_registers(sub_klass, super_klass, temp_reg); 4657 if (temp2_reg != noreg) 4658 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg); 4659 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg) 4660 4661 Label L_fallthrough; 4662 int label_nulls = 0; 4663 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 4664 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 4665 assert(label_nulls <= 1, "at most one null in the batch"); 4666 4667 // a couple of useful fields in sub_klass: 4668 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 4669 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 4670 Address secondary_supers_addr(sub_klass, ss_offset); 4671 Address super_cache_addr( sub_klass, sc_offset); 4672 4673 // Do a linear scan of the secondary super-klass chain. 4674 // This code is rarely used, so simplicity is a virtue here. 4675 // The repne_scan instruction uses fixed registers, which we must spill. 4676 // Don't worry too much about pre-existing connections with the input regs. 4677 4678 assert(sub_klass != rax, "killed reg"); // killed by mov(rax, super) 4679 assert(sub_klass != rcx, "killed reg"); // killed by lea(rcx, &pst_counter) 4680 4681 // Get super_klass value into rax (even if it was in rdi or rcx). 4682 bool pushed_rax = false, pushed_rcx = false, pushed_rdi = false; 4683 if (super_klass != rax) { 4684 if (!IS_A_TEMP(rax)) { push(rax); pushed_rax = true; } 4685 mov(rax, super_klass); 4686 } 4687 if (!IS_A_TEMP(rcx)) { push(rcx); pushed_rcx = true; } 4688 if (!IS_A_TEMP(rdi)) { push(rdi); pushed_rdi = true; } 4689 4690 #ifndef PRODUCT 4691 uint* pst_counter = &SharedRuntime::_partial_subtype_ctr; 4692 ExternalAddress pst_counter_addr((address) pst_counter); 4693 NOT_LP64( incrementl(pst_counter_addr) ); 4694 LP64_ONLY( lea(rcx, pst_counter_addr) ); 4695 LP64_ONLY( incrementl(Address(rcx, 0)) ); 4696 #endif //PRODUCT 4697 4698 // We will consult the secondary-super array. 4699 movptr(rdi, secondary_supers_addr); 4700 // Load the array length. (Positive movl does right thing on LP64.) 4701 movl(rcx, Address(rdi, Array<Klass*>::length_offset_in_bytes())); 4702 // Skip to start of data. 4703 addptr(rdi, Array<Klass*>::base_offset_in_bytes()); 4704 4705 // Scan RCX words at [RDI] for an occurrence of RAX. 4706 // Set NZ/Z based on last compare. 4707 // Z flag value will not be set by 'repne' if RCX == 0 since 'repne' does 4708 // not change flags (only scas instruction which is repeated sets flags). 4709 // Set Z = 0 (not equal) before 'repne' to indicate that class was not found. 4710 4711 testptr(rax,rax); // Set Z = 0 4712 repne_scan(); 4713 4714 // Unspill the temp. registers: 4715 if (pushed_rdi) pop(rdi); 4716 if (pushed_rcx) pop(rcx); 4717 if (pushed_rax) pop(rax); 4718 4719 if (set_cond_codes) { 4720 // Special hack for the AD files: rdi is guaranteed non-zero. 4721 assert(!pushed_rdi, "rdi must be left non-null"); 4722 // Also, the condition codes are properly set Z/NZ on succeed/failure. 4723 } 4724 4725 if (L_failure == &L_fallthrough) 4726 jccb(Assembler::notEqual, *L_failure); 4727 else jcc(Assembler::notEqual, *L_failure); 4728 4729 // Success. Cache the super we found and proceed in triumph. 4730 movptr(super_cache_addr, super_klass); 4731 4732 if (L_success != &L_fallthrough) { 4733 jmp(*L_success); 4734 } 4735 4736 #undef IS_A_TEMP 4737 4738 bind(L_fallthrough); 4739 } 4740 4741 #ifdef _LP64 4742 4743 // population_count variant for running without the POPCNT 4744 // instruction, which was introduced with SSE4.2 in 2008. 4745 void MacroAssembler::population_count(Register dst, Register src, 4746 Register scratch1, Register scratch2) { 4747 assert_different_registers(src, scratch1, scratch2); 4748 if (UsePopCountInstruction) { 4749 Assembler::popcntq(dst, src); 4750 } else { 4751 assert_different_registers(src, scratch1, scratch2); 4752 assert_different_registers(dst, scratch1, scratch2); 4753 Label loop, done; 4754 4755 mov(scratch1, src); 4756 // dst = 0; 4757 // while(scratch1 != 0) { 4758 // dst++; 4759 // scratch1 &= (scratch1 - 1); 4760 // } 4761 xorl(dst, dst); 4762 testq(scratch1, scratch1); 4763 jccb(Assembler::equal, done); 4764 { 4765 bind(loop); 4766 incq(dst); 4767 movq(scratch2, scratch1); 4768 decq(scratch2); 4769 andq(scratch1, scratch2); 4770 jccb(Assembler::notEqual, loop); 4771 } 4772 bind(done); 4773 } 4774 } 4775 4776 // Ensure that the inline code and the stub are using the same registers. 4777 #define LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS \ 4778 do { \ 4779 assert(r_super_klass == rax, "mismatch"); \ 4780 assert(r_array_base == rbx, "mismatch"); \ 4781 assert(r_array_length == rcx, "mismatch"); \ 4782 assert(r_array_index == rdx, "mismatch"); \ 4783 assert(r_sub_klass == rsi || r_sub_klass == noreg, "mismatch"); \ 4784 assert(r_bitmap == r11 || r_bitmap == noreg, "mismatch"); \ 4785 assert(result == rdi || result == noreg, "mismatch"); \ 4786 } while(0) 4787 4788 void MacroAssembler::lookup_secondary_supers_table(Register r_sub_klass, 4789 Register r_super_klass, 4790 Register temp1, 4791 Register temp2, 4792 Register temp3, 4793 Register temp4, 4794 Register result, 4795 u1 super_klass_slot) { 4796 assert_different_registers(r_sub_klass, r_super_klass, temp1, temp2, temp3, temp4, result); 4797 4798 Label L_fallthrough, L_success, L_failure; 4799 4800 BLOCK_COMMENT("lookup_secondary_supers_table {"); 4801 4802 const Register 4803 r_array_index = temp1, 4804 r_array_length = temp2, 4805 r_array_base = temp3, 4806 r_bitmap = temp4; 4807 4808 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS; 4809 4810 xorq(result, result); // = 0 4811 4812 movq(r_bitmap, Address(r_sub_klass, Klass::bitmap_offset())); 4813 movq(r_array_index, r_bitmap); 4814 4815 // First check the bitmap to see if super_klass might be present. If 4816 // the bit is zero, we are certain that super_klass is not one of 4817 // the secondary supers. 4818 u1 bit = super_klass_slot; 4819 { 4820 // NB: If the count in a x86 shift instruction is 0, the flags are 4821 // not affected, so we do a testq instead. 4822 int shift_count = Klass::SECONDARY_SUPERS_TABLE_MASK - bit; 4823 if (shift_count != 0) { 4824 salq(r_array_index, shift_count); 4825 } else { 4826 testq(r_array_index, r_array_index); 4827 } 4828 } 4829 // We test the MSB of r_array_index, i.e. its sign bit 4830 jcc(Assembler::positive, L_failure); 4831 4832 // Get the first array index that can contain super_klass into r_array_index. 4833 if (bit != 0) { 4834 population_count(r_array_index, r_array_index, temp2, temp3); 4835 } else { 4836 movl(r_array_index, 1); 4837 } 4838 // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word. 4839 4840 // We will consult the secondary-super array. 4841 movptr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset()))); 4842 4843 // We're asserting that the first word in an Array<Klass*> is the 4844 // length, and the second word is the first word of the data. If 4845 // that ever changes, r_array_base will have to be adjusted here. 4846 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code"); 4847 assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code"); 4848 4849 cmpq(r_super_klass, Address(r_array_base, r_array_index, Address::times_8)); 4850 jccb(Assembler::equal, L_success); 4851 4852 // Is there another entry to check? Consult the bitmap. 4853 btq(r_bitmap, (bit + 1) & Klass::SECONDARY_SUPERS_TABLE_MASK); 4854 jccb(Assembler::carryClear, L_failure); 4855 4856 // Linear probe. Rotate the bitmap so that the next bit to test is 4857 // in Bit 1. 4858 if (bit != 0) { 4859 rorq(r_bitmap, bit); 4860 } 4861 4862 // Calls into the stub generated by lookup_secondary_supers_table_slow_path. 4863 // Arguments: r_super_klass, r_array_base, r_array_index, r_bitmap. 4864 // Kills: r_array_length. 4865 // Returns: result. 4866 call(RuntimeAddress(StubRoutines::lookup_secondary_supers_table_slow_path_stub())); 4867 // Result (0/1) is in rdi 4868 jmpb(L_fallthrough); 4869 4870 bind(L_failure); 4871 incq(result); // 0 => 1 4872 4873 bind(L_success); 4874 // result = 0; 4875 4876 bind(L_fallthrough); 4877 BLOCK_COMMENT("} lookup_secondary_supers_table"); 4878 4879 if (VerifySecondarySupers) { 4880 verify_secondary_supers_table(r_sub_klass, r_super_klass, result, 4881 temp1, temp2, temp3); 4882 } 4883 } 4884 4885 void MacroAssembler::repne_scanq(Register addr, Register value, Register count, Register limit, 4886 Label* L_success, Label* L_failure) { 4887 Label L_loop, L_fallthrough; 4888 { 4889 int label_nulls = 0; 4890 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 4891 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 4892 assert(label_nulls <= 1, "at most one null in the batch"); 4893 } 4894 bind(L_loop); 4895 cmpq(value, Address(addr, count, Address::times_8)); 4896 jcc(Assembler::equal, *L_success); 4897 addl(count, 1); 4898 cmpl(count, limit); 4899 jcc(Assembler::less, L_loop); 4900 4901 if (&L_fallthrough != L_failure) { 4902 jmp(*L_failure); 4903 } 4904 bind(L_fallthrough); 4905 } 4906 4907 // Called by code generated by check_klass_subtype_slow_path 4908 // above. This is called when there is a collision in the hashed 4909 // lookup in the secondary supers array. 4910 void MacroAssembler::lookup_secondary_supers_table_slow_path(Register r_super_klass, 4911 Register r_array_base, 4912 Register r_array_index, 4913 Register r_bitmap, 4914 Register temp1, 4915 Register temp2, 4916 Label* L_success, 4917 Label* L_failure) { 4918 assert_different_registers(r_super_klass, r_array_base, r_array_index, r_bitmap, temp1, temp2); 4919 4920 const Register 4921 r_array_length = temp1, 4922 r_sub_klass = noreg, 4923 result = noreg; 4924 4925 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS; 4926 4927 Label L_fallthrough; 4928 int label_nulls = 0; 4929 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 4930 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 4931 assert(label_nulls <= 1, "at most one null in the batch"); 4932 4933 // Load the array length. 4934 movl(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes())); 4935 // And adjust the array base to point to the data. 4936 // NB! Effectively increments current slot index by 1. 4937 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, ""); 4938 addptr(r_array_base, Array<Klass*>::base_offset_in_bytes()); 4939 4940 // Linear probe 4941 Label L_huge; 4942 4943 // The bitmap is full to bursting. 4944 // Implicit invariant: BITMAP_FULL implies (length > 0) 4945 assert(Klass::SECONDARY_SUPERS_BITMAP_FULL == ~uintx(0), ""); 4946 cmpq(r_bitmap, (int32_t)-1); // sign-extends immediate to 64-bit value 4947 jcc(Assembler::equal, L_huge); 4948 4949 // NB! Our caller has checked bits 0 and 1 in the bitmap. The 4950 // current slot (at secondary_supers[r_array_index]) has not yet 4951 // been inspected, and r_array_index may be out of bounds if we 4952 // wrapped around the end of the array. 4953 4954 { // This is conventional linear probing, but instead of terminating 4955 // when a null entry is found in the table, we maintain a bitmap 4956 // in which a 0 indicates missing entries. 4957 // The check above guarantees there are 0s in the bitmap, so the loop 4958 // eventually terminates. 4959 4960 xorl(temp2, temp2); // = 0; 4961 4962 Label L_again; 4963 bind(L_again); 4964 4965 // Check for array wraparound. 4966 cmpl(r_array_index, r_array_length); 4967 cmovl(Assembler::greaterEqual, r_array_index, temp2); 4968 4969 cmpq(r_super_klass, Address(r_array_base, r_array_index, Address::times_8)); 4970 jcc(Assembler::equal, *L_success); 4971 4972 // If the next bit in bitmap is zero, we're done. 4973 btq(r_bitmap, 2); // look-ahead check (Bit 2); Bits 0 and 1 are tested by now 4974 jcc(Assembler::carryClear, *L_failure); 4975 4976 rorq(r_bitmap, 1); // Bits 1/2 => 0/1 4977 addl(r_array_index, 1); 4978 4979 jmp(L_again); 4980 } 4981 4982 { // Degenerate case: more than 64 secondary supers. 4983 // FIXME: We could do something smarter here, maybe a vectorized 4984 // comparison or a binary search, but is that worth any added 4985 // complexity? 4986 bind(L_huge); 4987 xorl(r_array_index, r_array_index); // = 0 4988 repne_scanq(r_array_base, r_super_klass, r_array_index, r_array_length, 4989 L_success, 4990 (&L_fallthrough != L_failure ? L_failure : nullptr)); 4991 4992 bind(L_fallthrough); 4993 } 4994 } 4995 4996 struct VerifyHelperArguments { 4997 Klass* _super; 4998 Klass* _sub; 4999 intptr_t _linear_result; 5000 intptr_t _table_result; 5001 }; 5002 5003 static void verify_secondary_supers_table_helper(const char* msg, VerifyHelperArguments* args) { 5004 Klass::on_secondary_supers_verification_failure(args->_super, 5005 args->_sub, 5006 args->_linear_result, 5007 args->_table_result, 5008 msg); 5009 } 5010 5011 // Make sure that the hashed lookup and a linear scan agree. 5012 void MacroAssembler::verify_secondary_supers_table(Register r_sub_klass, 5013 Register r_super_klass, 5014 Register result, 5015 Register temp1, 5016 Register temp2, 5017 Register temp3) { 5018 const Register 5019 r_array_index = temp1, 5020 r_array_length = temp2, 5021 r_array_base = temp3, 5022 r_bitmap = noreg; 5023 5024 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS; 5025 5026 BLOCK_COMMENT("verify_secondary_supers_table {"); 5027 5028 Label L_success, L_failure, L_check, L_done; 5029 5030 movptr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset()))); 5031 movl(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes())); 5032 // And adjust the array base to point to the data. 5033 addptr(r_array_base, Array<Klass*>::base_offset_in_bytes()); 5034 5035 testl(r_array_length, r_array_length); // array_length == 0? 5036 jcc(Assembler::zero, L_failure); 5037 5038 movl(r_array_index, 0); 5039 repne_scanq(r_array_base, r_super_klass, r_array_index, r_array_length, &L_success); 5040 // fall through to L_failure 5041 5042 const Register linear_result = r_array_index; // reuse temp1 5043 5044 bind(L_failure); // not present 5045 movl(linear_result, 1); 5046 jmp(L_check); 5047 5048 bind(L_success); // present 5049 movl(linear_result, 0); 5050 5051 bind(L_check); 5052 cmpl(linear_result, result); 5053 jcc(Assembler::equal, L_done); 5054 5055 { // To avoid calling convention issues, build a record on the stack 5056 // and pass the pointer to that instead. 5057 push(result); 5058 push(linear_result); 5059 push(r_sub_klass); 5060 push(r_super_klass); 5061 movptr(c_rarg1, rsp); 5062 movptr(c_rarg0, (uintptr_t) "mismatch"); 5063 call(RuntimeAddress(CAST_FROM_FN_PTR(address, verify_secondary_supers_table_helper))); 5064 should_not_reach_here(); 5065 } 5066 bind(L_done); 5067 5068 BLOCK_COMMENT("} verify_secondary_supers_table"); 5069 } 5070 5071 #undef LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS 5072 5073 #endif // LP64 5074 5075 void MacroAssembler::clinit_barrier(Register klass, Register thread, Label* L_fast_path, Label* L_slow_path) { 5076 assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required"); 5077 5078 Label L_fallthrough; 5079 if (L_fast_path == nullptr) { 5080 L_fast_path = &L_fallthrough; 5081 } else if (L_slow_path == nullptr) { 5082 L_slow_path = &L_fallthrough; 5083 } 5084 5085 // Fast path check: class is fully initialized 5086 cmpb(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized); 5087 jcc(Assembler::equal, *L_fast_path); 5088 5089 // Fast path check: current thread is initializer thread 5090 cmpptr(thread, Address(klass, InstanceKlass::init_thread_offset())); 5091 if (L_slow_path == &L_fallthrough) { 5092 jcc(Assembler::equal, *L_fast_path); 5093 bind(*L_slow_path); 5094 } else if (L_fast_path == &L_fallthrough) { 5095 jcc(Assembler::notEqual, *L_slow_path); 5096 bind(*L_fast_path); 5097 } else { 5098 Unimplemented(); 5099 } 5100 } 5101 5102 void MacroAssembler::cmov32(Condition cc, Register dst, Address src) { 5103 if (VM_Version::supports_cmov()) { 5104 cmovl(cc, dst, src); 5105 } else { 5106 Label L; 5107 jccb(negate_condition(cc), L); 5108 movl(dst, src); 5109 bind(L); 5110 } 5111 } 5112 5113 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) { 5114 if (VM_Version::supports_cmov()) { 5115 cmovl(cc, dst, src); 5116 } else { 5117 Label L; 5118 jccb(negate_condition(cc), L); 5119 movl(dst, src); 5120 bind(L); 5121 } 5122 } 5123 5124 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) { 5125 if (!VerifyOops) return; 5126 5127 BLOCK_COMMENT("verify_oop {"); 5128 #ifdef _LP64 5129 push(rscratch1); 5130 #endif 5131 push(rax); // save rax 5132 push(reg); // pass register argument 5133 5134 // Pass register number to verify_oop_subroutine 5135 const char* b = nullptr; 5136 { 5137 ResourceMark rm; 5138 stringStream ss; 5139 ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line); 5140 b = code_string(ss.as_string()); 5141 } 5142 ExternalAddress buffer((address) b); 5143 pushptr(buffer.addr(), rscratch1); 5144 5145 // call indirectly to solve generation ordering problem 5146 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 5147 call(rax); 5148 // Caller pops the arguments (oop, message) and restores rax, r10 5149 BLOCK_COMMENT("} verify_oop"); 5150 } 5151 5152 void MacroAssembler::vallones(XMMRegister dst, int vector_len) { 5153 if (UseAVX > 2 && (vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl())) { 5154 // Only pcmpeq has dependency breaking treatment (i.e the execution can begin without 5155 // waiting for the previous result on dst), not vpcmpeqd, so just use vpternlog 5156 vpternlogd(dst, 0xFF, dst, dst, vector_len); 5157 } else if (VM_Version::supports_avx()) { 5158 vpcmpeqd(dst, dst, dst, vector_len); 5159 } else { 5160 assert(VM_Version::supports_sse2(), ""); 5161 pcmpeqd(dst, dst); 5162 } 5163 } 5164 5165 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 5166 int extra_slot_offset) { 5167 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 5168 int stackElementSize = Interpreter::stackElementSize; 5169 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0); 5170 #ifdef ASSERT 5171 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1); 5172 assert(offset1 - offset == stackElementSize, "correct arithmetic"); 5173 #endif 5174 Register scale_reg = noreg; 5175 Address::ScaleFactor scale_factor = Address::no_scale; 5176 if (arg_slot.is_constant()) { 5177 offset += arg_slot.as_constant() * stackElementSize; 5178 } else { 5179 scale_reg = arg_slot.as_register(); 5180 scale_factor = Address::times(stackElementSize); 5181 } 5182 offset += wordSize; // return PC is on stack 5183 return Address(rsp, scale_reg, scale_factor, offset); 5184 } 5185 5186 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) { 5187 if (!VerifyOops) return; 5188 5189 #ifdef _LP64 5190 push(rscratch1); 5191 #endif 5192 push(rax); // save rax, 5193 // addr may contain rsp so we will have to adjust it based on the push 5194 // we just did (and on 64 bit we do two pushes) 5195 // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which 5196 // stores rax into addr which is backwards of what was intended. 5197 if (addr.uses(rsp)) { 5198 lea(rax, addr); 5199 pushptr(Address(rax, LP64_ONLY(2 *) BytesPerWord)); 5200 } else { 5201 pushptr(addr); 5202 } 5203 5204 // Pass register number to verify_oop_subroutine 5205 const char* b = nullptr; 5206 { 5207 ResourceMark rm; 5208 stringStream ss; 5209 ss.print("verify_oop_addr: %s (%s:%d)", s, file, line); 5210 b = code_string(ss.as_string()); 5211 } 5212 ExternalAddress buffer((address) b); 5213 pushptr(buffer.addr(), rscratch1); 5214 5215 // call indirectly to solve generation ordering problem 5216 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 5217 call(rax); 5218 // Caller pops the arguments (addr, message) and restores rax, r10. 5219 } 5220 5221 void MacroAssembler::verify_tlab() { 5222 #ifdef ASSERT 5223 if (UseTLAB && VerifyOops) { 5224 Label next, ok; 5225 Register t1 = rsi; 5226 Register thread_reg = NOT_LP64(rbx) LP64_ONLY(r15_thread); 5227 5228 push(t1); 5229 NOT_LP64(push(thread_reg)); 5230 NOT_LP64(get_thread(thread_reg)); 5231 5232 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset()))); 5233 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset()))); 5234 jcc(Assembler::aboveEqual, next); 5235 STOP("assert(top >= start)"); 5236 should_not_reach_here(); 5237 5238 bind(next); 5239 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset()))); 5240 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset()))); 5241 jcc(Assembler::aboveEqual, ok); 5242 STOP("assert(top <= end)"); 5243 should_not_reach_here(); 5244 5245 bind(ok); 5246 NOT_LP64(pop(thread_reg)); 5247 pop(t1); 5248 } 5249 #endif 5250 } 5251 5252 class ControlWord { 5253 public: 5254 int32_t _value; 5255 5256 int rounding_control() const { return (_value >> 10) & 3 ; } 5257 int precision_control() const { return (_value >> 8) & 3 ; } 5258 bool precision() const { return ((_value >> 5) & 1) != 0; } 5259 bool underflow() const { return ((_value >> 4) & 1) != 0; } 5260 bool overflow() const { return ((_value >> 3) & 1) != 0; } 5261 bool zero_divide() const { return ((_value >> 2) & 1) != 0; } 5262 bool denormalized() const { return ((_value >> 1) & 1) != 0; } 5263 bool invalid() const { return ((_value >> 0) & 1) != 0; } 5264 5265 void print() const { 5266 // rounding control 5267 const char* rc; 5268 switch (rounding_control()) { 5269 case 0: rc = "round near"; break; 5270 case 1: rc = "round down"; break; 5271 case 2: rc = "round up "; break; 5272 case 3: rc = "chop "; break; 5273 default: 5274 rc = nullptr; // silence compiler warnings 5275 fatal("Unknown rounding control: %d", rounding_control()); 5276 }; 5277 // precision control 5278 const char* pc; 5279 switch (precision_control()) { 5280 case 0: pc = "24 bits "; break; 5281 case 1: pc = "reserved"; break; 5282 case 2: pc = "53 bits "; break; 5283 case 3: pc = "64 bits "; break; 5284 default: 5285 pc = nullptr; // silence compiler warnings 5286 fatal("Unknown precision control: %d", precision_control()); 5287 }; 5288 // flags 5289 char f[9]; 5290 f[0] = ' '; 5291 f[1] = ' '; 5292 f[2] = (precision ()) ? 'P' : 'p'; 5293 f[3] = (underflow ()) ? 'U' : 'u'; 5294 f[4] = (overflow ()) ? 'O' : 'o'; 5295 f[5] = (zero_divide ()) ? 'Z' : 'z'; 5296 f[6] = (denormalized()) ? 'D' : 'd'; 5297 f[7] = (invalid ()) ? 'I' : 'i'; 5298 f[8] = '\x0'; 5299 // output 5300 printf("%04x masks = %s, %s, %s", _value & 0xFFFF, f, rc, pc); 5301 } 5302 5303 }; 5304 5305 class StatusWord { 5306 public: 5307 int32_t _value; 5308 5309 bool busy() const { return ((_value >> 15) & 1) != 0; } 5310 bool C3() const { return ((_value >> 14) & 1) != 0; } 5311 bool C2() const { return ((_value >> 10) & 1) != 0; } 5312 bool C1() const { return ((_value >> 9) & 1) != 0; } 5313 bool C0() const { return ((_value >> 8) & 1) != 0; } 5314 int top() const { return (_value >> 11) & 7 ; } 5315 bool error_status() const { return ((_value >> 7) & 1) != 0; } 5316 bool stack_fault() const { return ((_value >> 6) & 1) != 0; } 5317 bool precision() const { return ((_value >> 5) & 1) != 0; } 5318 bool underflow() const { return ((_value >> 4) & 1) != 0; } 5319 bool overflow() const { return ((_value >> 3) & 1) != 0; } 5320 bool zero_divide() const { return ((_value >> 2) & 1) != 0; } 5321 bool denormalized() const { return ((_value >> 1) & 1) != 0; } 5322 bool invalid() const { return ((_value >> 0) & 1) != 0; } 5323 5324 void print() const { 5325 // condition codes 5326 char c[5]; 5327 c[0] = (C3()) ? '3' : '-'; 5328 c[1] = (C2()) ? '2' : '-'; 5329 c[2] = (C1()) ? '1' : '-'; 5330 c[3] = (C0()) ? '0' : '-'; 5331 c[4] = '\x0'; 5332 // flags 5333 char f[9]; 5334 f[0] = (error_status()) ? 'E' : '-'; 5335 f[1] = (stack_fault ()) ? 'S' : '-'; 5336 f[2] = (precision ()) ? 'P' : '-'; 5337 f[3] = (underflow ()) ? 'U' : '-'; 5338 f[4] = (overflow ()) ? 'O' : '-'; 5339 f[5] = (zero_divide ()) ? 'Z' : '-'; 5340 f[6] = (denormalized()) ? 'D' : '-'; 5341 f[7] = (invalid ()) ? 'I' : '-'; 5342 f[8] = '\x0'; 5343 // output 5344 printf("%04x flags = %s, cc = %s, top = %d", _value & 0xFFFF, f, c, top()); 5345 } 5346 5347 }; 5348 5349 class TagWord { 5350 public: 5351 int32_t _value; 5352 5353 int tag_at(int i) const { return (_value >> (i*2)) & 3; } 5354 5355 void print() const { 5356 printf("%04x", _value & 0xFFFF); 5357 } 5358 5359 }; 5360 5361 class FPU_Register { 5362 public: 5363 int32_t _m0; 5364 int32_t _m1; 5365 int16_t _ex; 5366 5367 bool is_indefinite() const { 5368 return _ex == -1 && _m1 == (int32_t)0xC0000000 && _m0 == 0; 5369 } 5370 5371 void print() const { 5372 char sign = (_ex < 0) ? '-' : '+'; 5373 const char* kind = (_ex == 0x7FFF || _ex == (int16_t)-1) ? "NaN" : " "; 5374 printf("%c%04hx.%08x%08x %s", sign, _ex, _m1, _m0, kind); 5375 }; 5376 5377 }; 5378 5379 class FPU_State { 5380 public: 5381 enum { 5382 register_size = 10, 5383 number_of_registers = 8, 5384 register_mask = 7 5385 }; 5386 5387 ControlWord _control_word; 5388 StatusWord _status_word; 5389 TagWord _tag_word; 5390 int32_t _error_offset; 5391 int32_t _error_selector; 5392 int32_t _data_offset; 5393 int32_t _data_selector; 5394 int8_t _register[register_size * number_of_registers]; 5395 5396 int tag_for_st(int i) const { return _tag_word.tag_at((_status_word.top() + i) & register_mask); } 5397 FPU_Register* st(int i) const { return (FPU_Register*)&_register[register_size * i]; } 5398 5399 const char* tag_as_string(int tag) const { 5400 switch (tag) { 5401 case 0: return "valid"; 5402 case 1: return "zero"; 5403 case 2: return "special"; 5404 case 3: return "empty"; 5405 } 5406 ShouldNotReachHere(); 5407 return nullptr; 5408 } 5409 5410 void print() const { 5411 // print computation registers 5412 { int t = _status_word.top(); 5413 for (int i = 0; i < number_of_registers; i++) { 5414 int j = (i - t) & register_mask; 5415 printf("%c r%d = ST%d = ", (j == 0 ? '*' : ' '), i, j); 5416 st(j)->print(); 5417 printf(" %s\n", tag_as_string(_tag_word.tag_at(i))); 5418 } 5419 } 5420 printf("\n"); 5421 // print control registers 5422 printf("ctrl = "); _control_word.print(); printf("\n"); 5423 printf("stat = "); _status_word .print(); printf("\n"); 5424 printf("tags = "); _tag_word .print(); printf("\n"); 5425 } 5426 5427 }; 5428 5429 class Flag_Register { 5430 public: 5431 int32_t _value; 5432 5433 bool overflow() const { return ((_value >> 11) & 1) != 0; } 5434 bool direction() const { return ((_value >> 10) & 1) != 0; } 5435 bool sign() const { return ((_value >> 7) & 1) != 0; } 5436 bool zero() const { return ((_value >> 6) & 1) != 0; } 5437 bool auxiliary_carry() const { return ((_value >> 4) & 1) != 0; } 5438 bool parity() const { return ((_value >> 2) & 1) != 0; } 5439 bool carry() const { return ((_value >> 0) & 1) != 0; } 5440 5441 void print() const { 5442 // flags 5443 char f[8]; 5444 f[0] = (overflow ()) ? 'O' : '-'; 5445 f[1] = (direction ()) ? 'D' : '-'; 5446 f[2] = (sign ()) ? 'S' : '-'; 5447 f[3] = (zero ()) ? 'Z' : '-'; 5448 f[4] = (auxiliary_carry()) ? 'A' : '-'; 5449 f[5] = (parity ()) ? 'P' : '-'; 5450 f[6] = (carry ()) ? 'C' : '-'; 5451 f[7] = '\x0'; 5452 // output 5453 printf("%08x flags = %s", _value, f); 5454 } 5455 5456 }; 5457 5458 class IU_Register { 5459 public: 5460 int32_t _value; 5461 5462 void print() const { 5463 printf("%08x %11d", _value, _value); 5464 } 5465 5466 }; 5467 5468 class IU_State { 5469 public: 5470 Flag_Register _eflags; 5471 IU_Register _rdi; 5472 IU_Register _rsi; 5473 IU_Register _rbp; 5474 IU_Register _rsp; 5475 IU_Register _rbx; 5476 IU_Register _rdx; 5477 IU_Register _rcx; 5478 IU_Register _rax; 5479 5480 void print() const { 5481 // computation registers 5482 printf("rax, = "); _rax.print(); printf("\n"); 5483 printf("rbx, = "); _rbx.print(); printf("\n"); 5484 printf("rcx = "); _rcx.print(); printf("\n"); 5485 printf("rdx = "); _rdx.print(); printf("\n"); 5486 printf("rdi = "); _rdi.print(); printf("\n"); 5487 printf("rsi = "); _rsi.print(); printf("\n"); 5488 printf("rbp, = "); _rbp.print(); printf("\n"); 5489 printf("rsp = "); _rsp.print(); printf("\n"); 5490 printf("\n"); 5491 // control registers 5492 printf("flgs = "); _eflags.print(); printf("\n"); 5493 } 5494 }; 5495 5496 5497 class CPU_State { 5498 public: 5499 FPU_State _fpu_state; 5500 IU_State _iu_state; 5501 5502 void print() const { 5503 printf("--------------------------------------------------\n"); 5504 _iu_state .print(); 5505 printf("\n"); 5506 _fpu_state.print(); 5507 printf("--------------------------------------------------\n"); 5508 } 5509 5510 }; 5511 5512 5513 static void _print_CPU_state(CPU_State* state) { 5514 state->print(); 5515 }; 5516 5517 5518 void MacroAssembler::print_CPU_state() { 5519 push_CPU_state(); 5520 push(rsp); // pass CPU state 5521 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _print_CPU_state))); 5522 addptr(rsp, wordSize); // discard argument 5523 pop_CPU_state(); 5524 } 5525 5526 5527 #ifndef _LP64 5528 static bool _verify_FPU(int stack_depth, char* s, CPU_State* state) { 5529 static int counter = 0; 5530 FPU_State* fs = &state->_fpu_state; 5531 counter++; 5532 // For leaf calls, only verify that the top few elements remain empty. 5533 // We only need 1 empty at the top for C2 code. 5534 if( stack_depth < 0 ) { 5535 if( fs->tag_for_st(7) != 3 ) { 5536 printf("FPR7 not empty\n"); 5537 state->print(); 5538 assert(false, "error"); 5539 return false; 5540 } 5541 return true; // All other stack states do not matter 5542 } 5543 5544 assert((fs->_control_word._value & 0xffff) == StubRoutines::x86::fpu_cntrl_wrd_std(), 5545 "bad FPU control word"); 5546 5547 // compute stack depth 5548 int i = 0; 5549 while (i < FPU_State::number_of_registers && fs->tag_for_st(i) < 3) i++; 5550 int d = i; 5551 while (i < FPU_State::number_of_registers && fs->tag_for_st(i) == 3) i++; 5552 // verify findings 5553 if (i != FPU_State::number_of_registers) { 5554 // stack not contiguous 5555 printf("%s: stack not contiguous at ST%d\n", s, i); 5556 state->print(); 5557 assert(false, "error"); 5558 return false; 5559 } 5560 // check if computed stack depth corresponds to expected stack depth 5561 if (stack_depth < 0) { 5562 // expected stack depth is -stack_depth or less 5563 if (d > -stack_depth) { 5564 // too many elements on the stack 5565 printf("%s: <= %d stack elements expected but found %d\n", s, -stack_depth, d); 5566 state->print(); 5567 assert(false, "error"); 5568 return false; 5569 } 5570 } else { 5571 // expected stack depth is stack_depth 5572 if (d != stack_depth) { 5573 // wrong stack depth 5574 printf("%s: %d stack elements expected but found %d\n", s, stack_depth, d); 5575 state->print(); 5576 assert(false, "error"); 5577 return false; 5578 } 5579 } 5580 // everything is cool 5581 return true; 5582 } 5583 5584 void MacroAssembler::verify_FPU(int stack_depth, const char* s) { 5585 if (!VerifyFPU) return; 5586 push_CPU_state(); 5587 push(rsp); // pass CPU state 5588 ExternalAddress msg((address) s); 5589 // pass message string s 5590 pushptr(msg.addr(), noreg); 5591 push(stack_depth); // pass stack depth 5592 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _verify_FPU))); 5593 addptr(rsp, 3 * wordSize); // discard arguments 5594 // check for error 5595 { Label L; 5596 testl(rax, rax); 5597 jcc(Assembler::notZero, L); 5598 int3(); // break if error condition 5599 bind(L); 5600 } 5601 pop_CPU_state(); 5602 } 5603 #endif // _LP64 5604 5605 void MacroAssembler::restore_cpu_control_state_after_jni(Register rscratch) { 5606 // Either restore the MXCSR register after returning from the JNI Call 5607 // or verify that it wasn't changed (with -Xcheck:jni flag). 5608 if (VM_Version::supports_sse()) { 5609 if (RestoreMXCSROnJNICalls) { 5610 ldmxcsr(ExternalAddress(StubRoutines::x86::addr_mxcsr_std()), rscratch); 5611 } else if (CheckJNICalls) { 5612 call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry())); 5613 } 5614 } 5615 // Clear upper bits of YMM registers to avoid SSE <-> AVX transition penalty. 5616 vzeroupper(); 5617 5618 #ifndef _LP64 5619 // Either restore the x87 floating pointer control word after returning 5620 // from the JNI call or verify that it wasn't changed. 5621 if (CheckJNICalls) { 5622 call(RuntimeAddress(StubRoutines::x86::verify_fpu_cntrl_wrd_entry())); 5623 } 5624 #endif // _LP64 5625 } 5626 5627 // ((OopHandle)result).resolve(); 5628 void MacroAssembler::resolve_oop_handle(Register result, Register tmp) { 5629 assert_different_registers(result, tmp); 5630 5631 // Only 64 bit platforms support GCs that require a tmp register 5632 // Only IN_HEAP loads require a thread_tmp register 5633 // OopHandle::resolve is an indirection like jobject. 5634 access_load_at(T_OBJECT, IN_NATIVE, 5635 result, Address(result, 0), tmp, /*tmp_thread*/noreg); 5636 } 5637 5638 // ((WeakHandle)result).resolve(); 5639 void MacroAssembler::resolve_weak_handle(Register rresult, Register rtmp) { 5640 assert_different_registers(rresult, rtmp); 5641 Label resolved; 5642 5643 // A null weak handle resolves to null. 5644 cmpptr(rresult, 0); 5645 jcc(Assembler::equal, resolved); 5646 5647 // Only 64 bit platforms support GCs that require a tmp register 5648 // Only IN_HEAP loads require a thread_tmp register 5649 // WeakHandle::resolve is an indirection like jweak. 5650 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 5651 rresult, Address(rresult, 0), rtmp, /*tmp_thread*/noreg); 5652 bind(resolved); 5653 } 5654 5655 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) { 5656 // get mirror 5657 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 5658 load_method_holder(mirror, method); 5659 movptr(mirror, Address(mirror, mirror_offset)); 5660 resolve_oop_handle(mirror, tmp); 5661 } 5662 5663 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) { 5664 load_method_holder(rresult, rmethod); 5665 movptr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset())); 5666 } 5667 5668 void MacroAssembler::load_method_holder(Register holder, Register method) { 5669 movptr(holder, Address(method, Method::const_offset())); // ConstMethod* 5670 movptr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool* 5671 movptr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass* 5672 } 5673 5674 void MacroAssembler::load_klass(Register dst, Register src, Register tmp) { 5675 assert_different_registers(src, tmp); 5676 assert_different_registers(dst, tmp); 5677 #ifdef _LP64 5678 if (UseCompressedClassPointers) { 5679 movl(dst, Address(src, oopDesc::klass_offset_in_bytes())); 5680 decode_klass_not_null(dst, tmp); 5681 } else 5682 #endif 5683 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes())); 5684 } 5685 5686 void MacroAssembler::store_klass(Register dst, Register src, Register tmp) { 5687 assert_different_registers(src, tmp); 5688 assert_different_registers(dst, tmp); 5689 #ifdef _LP64 5690 if (UseCompressedClassPointers) { 5691 encode_klass_not_null(src, tmp); 5692 movl(Address(dst, oopDesc::klass_offset_in_bytes()), src); 5693 } else 5694 #endif 5695 movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src); 5696 } 5697 5698 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src, 5699 Register tmp1, Register thread_tmp) { 5700 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5701 decorators = AccessInternal::decorator_fixup(decorators, type); 5702 bool as_raw = (decorators & AS_RAW) != 0; 5703 if (as_raw) { 5704 bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, thread_tmp); 5705 } else { 5706 bs->load_at(this, decorators, type, dst, src, tmp1, thread_tmp); 5707 } 5708 } 5709 5710 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val, 5711 Register tmp1, Register tmp2, Register tmp3) { 5712 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5713 decorators = AccessInternal::decorator_fixup(decorators, type); 5714 bool as_raw = (decorators & AS_RAW) != 0; 5715 if (as_raw) { 5716 bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 5717 } else { 5718 bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 5719 } 5720 } 5721 5722 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1, 5723 Register thread_tmp, DecoratorSet decorators) { 5724 access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp); 5725 } 5726 5727 // Doesn't do verification, generates fixed size code 5728 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1, 5729 Register thread_tmp, DecoratorSet decorators) { 5730 access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, thread_tmp); 5731 } 5732 5733 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1, 5734 Register tmp2, Register tmp3, DecoratorSet decorators) { 5735 access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3); 5736 } 5737 5738 // Used for storing nulls. 5739 void MacroAssembler::store_heap_oop_null(Address dst) { 5740 access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg); 5741 } 5742 5743 #ifdef _LP64 5744 void MacroAssembler::store_klass_gap(Register dst, Register src) { 5745 if (UseCompressedClassPointers) { 5746 // Store to klass gap in destination 5747 movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src); 5748 } 5749 } 5750 5751 #ifdef ASSERT 5752 void MacroAssembler::verify_heapbase(const char* msg) { 5753 assert (UseCompressedOops, "should be compressed"); 5754 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5755 if (CheckCompressedOops) { 5756 Label ok; 5757 ExternalAddress src2(CompressedOops::ptrs_base_addr()); 5758 const bool is_src2_reachable = reachable(src2); 5759 if (!is_src2_reachable) { 5760 push(rscratch1); // cmpptr trashes rscratch1 5761 } 5762 cmpptr(r12_heapbase, src2, rscratch1); 5763 jcc(Assembler::equal, ok); 5764 STOP(msg); 5765 bind(ok); 5766 if (!is_src2_reachable) { 5767 pop(rscratch1); 5768 } 5769 } 5770 } 5771 #endif 5772 5773 // Algorithm must match oop.inline.hpp encode_heap_oop. 5774 void MacroAssembler::encode_heap_oop(Register r) { 5775 #ifdef ASSERT 5776 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?"); 5777 #endif 5778 verify_oop_msg(r, "broken oop in encode_heap_oop"); 5779 if (CompressedOops::base() == nullptr) { 5780 if (CompressedOops::shift() != 0) { 5781 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5782 shrq(r, LogMinObjAlignmentInBytes); 5783 } 5784 return; 5785 } 5786 testq(r, r); 5787 cmovq(Assembler::equal, r, r12_heapbase); 5788 subq(r, r12_heapbase); 5789 shrq(r, LogMinObjAlignmentInBytes); 5790 } 5791 5792 void MacroAssembler::encode_heap_oop_not_null(Register r) { 5793 #ifdef ASSERT 5794 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?"); 5795 if (CheckCompressedOops) { 5796 Label ok; 5797 testq(r, r); 5798 jcc(Assembler::notEqual, ok); 5799 STOP("null oop passed to encode_heap_oop_not_null"); 5800 bind(ok); 5801 } 5802 #endif 5803 verify_oop_msg(r, "broken oop in encode_heap_oop_not_null"); 5804 if (CompressedOops::base() != nullptr) { 5805 subq(r, r12_heapbase); 5806 } 5807 if (CompressedOops::shift() != 0) { 5808 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5809 shrq(r, LogMinObjAlignmentInBytes); 5810 } 5811 } 5812 5813 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) { 5814 #ifdef ASSERT 5815 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?"); 5816 if (CheckCompressedOops) { 5817 Label ok; 5818 testq(src, src); 5819 jcc(Assembler::notEqual, ok); 5820 STOP("null oop passed to encode_heap_oop_not_null2"); 5821 bind(ok); 5822 } 5823 #endif 5824 verify_oop_msg(src, "broken oop in encode_heap_oop_not_null2"); 5825 if (dst != src) { 5826 movq(dst, src); 5827 } 5828 if (CompressedOops::base() != nullptr) { 5829 subq(dst, r12_heapbase); 5830 } 5831 if (CompressedOops::shift() != 0) { 5832 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5833 shrq(dst, LogMinObjAlignmentInBytes); 5834 } 5835 } 5836 5837 void MacroAssembler::decode_heap_oop(Register r) { 5838 #ifdef ASSERT 5839 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?"); 5840 #endif 5841 if (CompressedOops::base() == nullptr) { 5842 if (CompressedOops::shift() != 0) { 5843 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5844 shlq(r, LogMinObjAlignmentInBytes); 5845 } 5846 } else { 5847 Label done; 5848 shlq(r, LogMinObjAlignmentInBytes); 5849 jccb(Assembler::equal, done); 5850 addq(r, r12_heapbase); 5851 bind(done); 5852 } 5853 verify_oop_msg(r, "broken oop in decode_heap_oop"); 5854 } 5855 5856 void MacroAssembler::decode_heap_oop_not_null(Register r) { 5857 // Note: it will change flags 5858 assert (UseCompressedOops, "should only be used for compressed headers"); 5859 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5860 // Cannot assert, unverified entry point counts instructions (see .ad file) 5861 // vtableStubs also counts instructions in pd_code_size_limit. 5862 // Also do not verify_oop as this is called by verify_oop. 5863 if (CompressedOops::shift() != 0) { 5864 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5865 shlq(r, LogMinObjAlignmentInBytes); 5866 if (CompressedOops::base() != nullptr) { 5867 addq(r, r12_heapbase); 5868 } 5869 } else { 5870 assert (CompressedOops::base() == nullptr, "sanity"); 5871 } 5872 } 5873 5874 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { 5875 // Note: it will change flags 5876 assert (UseCompressedOops, "should only be used for compressed headers"); 5877 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5878 // Cannot assert, unverified entry point counts instructions (see .ad file) 5879 // vtableStubs also counts instructions in pd_code_size_limit. 5880 // Also do not verify_oop as this is called by verify_oop. 5881 if (CompressedOops::shift() != 0) { 5882 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5883 if (LogMinObjAlignmentInBytes == Address::times_8) { 5884 leaq(dst, Address(r12_heapbase, src, Address::times_8, 0)); 5885 } else { 5886 if (dst != src) { 5887 movq(dst, src); 5888 } 5889 shlq(dst, LogMinObjAlignmentInBytes); 5890 if (CompressedOops::base() != nullptr) { 5891 addq(dst, r12_heapbase); 5892 } 5893 } 5894 } else { 5895 assert (CompressedOops::base() == nullptr, "sanity"); 5896 if (dst != src) { 5897 movq(dst, src); 5898 } 5899 } 5900 } 5901 5902 void MacroAssembler::encode_klass_not_null(Register r, Register tmp) { 5903 assert_different_registers(r, tmp); 5904 if (CompressedKlassPointers::base() != nullptr) { 5905 mov64(tmp, (int64_t)CompressedKlassPointers::base()); 5906 subq(r, tmp); 5907 } 5908 if (CompressedKlassPointers::shift() != 0) { 5909 assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); 5910 shrq(r, LogKlassAlignmentInBytes); 5911 } 5912 } 5913 5914 void MacroAssembler::encode_and_move_klass_not_null(Register dst, Register src) { 5915 assert_different_registers(src, dst); 5916 if (CompressedKlassPointers::base() != nullptr) { 5917 mov64(dst, -(int64_t)CompressedKlassPointers::base()); 5918 addq(dst, src); 5919 } else { 5920 movptr(dst, src); 5921 } 5922 if (CompressedKlassPointers::shift() != 0) { 5923 assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); 5924 shrq(dst, LogKlassAlignmentInBytes); 5925 } 5926 } 5927 5928 void MacroAssembler::decode_klass_not_null(Register r, Register tmp) { 5929 assert_different_registers(r, tmp); 5930 // Note: it will change flags 5931 assert(UseCompressedClassPointers, "should only be used for compressed headers"); 5932 // Cannot assert, unverified entry point counts instructions (see .ad file) 5933 // vtableStubs also counts instructions in pd_code_size_limit. 5934 // Also do not verify_oop as this is called by verify_oop. 5935 if (CompressedKlassPointers::shift() != 0) { 5936 assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); 5937 shlq(r, LogKlassAlignmentInBytes); 5938 } 5939 if (CompressedKlassPointers::base() != nullptr) { 5940 mov64(tmp, (int64_t)CompressedKlassPointers::base()); 5941 addq(r, tmp); 5942 } 5943 } 5944 5945 void MacroAssembler::decode_and_move_klass_not_null(Register dst, Register src) { 5946 assert_different_registers(src, dst); 5947 // Note: it will change flags 5948 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5949 // Cannot assert, unverified entry point counts instructions (see .ad file) 5950 // vtableStubs also counts instructions in pd_code_size_limit. 5951 // Also do not verify_oop as this is called by verify_oop. 5952 5953 if (CompressedKlassPointers::base() == nullptr && 5954 CompressedKlassPointers::shift() == 0) { 5955 // The best case scenario is that there is no base or shift. Then it is already 5956 // a pointer that needs nothing but a register rename. 5957 movl(dst, src); 5958 } else { 5959 if (CompressedKlassPointers::base() != nullptr) { 5960 mov64(dst, (int64_t)CompressedKlassPointers::base()); 5961 } else { 5962 xorq(dst, dst); 5963 } 5964 if (CompressedKlassPointers::shift() != 0) { 5965 assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); 5966 assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?"); 5967 leaq(dst, Address(dst, src, Address::times_8, 0)); 5968 } else { 5969 addq(dst, src); 5970 } 5971 } 5972 } 5973 5974 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) { 5975 assert (UseCompressedOops, "should only be used for compressed headers"); 5976 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5977 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5978 int oop_index = oop_recorder()->find_index(obj); 5979 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5980 mov_narrow_oop(dst, oop_index, rspec); 5981 } 5982 5983 void MacroAssembler::set_narrow_oop(Address dst, jobject obj) { 5984 assert (UseCompressedOops, "should only be used for compressed headers"); 5985 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5986 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5987 int oop_index = oop_recorder()->find_index(obj); 5988 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5989 mov_narrow_oop(dst, oop_index, rspec); 5990 } 5991 5992 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) { 5993 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5994 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5995 int klass_index = oop_recorder()->find_index(k); 5996 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 5997 mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 5998 } 5999 6000 void MacroAssembler::set_narrow_klass(Address dst, Klass* k) { 6001 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 6002 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6003 int klass_index = oop_recorder()->find_index(k); 6004 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 6005 mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 6006 } 6007 6008 void MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) { 6009 assert (UseCompressedOops, "should only be used for compressed headers"); 6010 assert (Universe::heap() != nullptr, "java heap should be initialized"); 6011 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6012 int oop_index = oop_recorder()->find_index(obj); 6013 RelocationHolder rspec = oop_Relocation::spec(oop_index); 6014 Assembler::cmp_narrow_oop(dst, oop_index, rspec); 6015 } 6016 6017 void MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) { 6018 assert (UseCompressedOops, "should only be used for compressed headers"); 6019 assert (Universe::heap() != nullptr, "java heap should be initialized"); 6020 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6021 int oop_index = oop_recorder()->find_index(obj); 6022 RelocationHolder rspec = oop_Relocation::spec(oop_index); 6023 Assembler::cmp_narrow_oop(dst, oop_index, rspec); 6024 } 6025 6026 void MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) { 6027 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 6028 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6029 int klass_index = oop_recorder()->find_index(k); 6030 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 6031 Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 6032 } 6033 6034 void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) { 6035 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 6036 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6037 int klass_index = oop_recorder()->find_index(k); 6038 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 6039 Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 6040 } 6041 6042 void MacroAssembler::reinit_heapbase() { 6043 if (UseCompressedOops) { 6044 if (Universe::heap() != nullptr) { 6045 if (CompressedOops::base() == nullptr) { 6046 MacroAssembler::xorptr(r12_heapbase, r12_heapbase); 6047 } else { 6048 mov64(r12_heapbase, (int64_t)CompressedOops::ptrs_base()); 6049 } 6050 } else { 6051 movptr(r12_heapbase, ExternalAddress(CompressedOops::ptrs_base_addr())); 6052 } 6053 } 6054 } 6055 6056 #endif // _LP64 6057 6058 #if COMPILER2_OR_JVMCI 6059 6060 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM/ZMM registers 6061 void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask) { 6062 // cnt - number of qwords (8-byte words). 6063 // base - start address, qword aligned. 6064 Label L_zero_64_bytes, L_loop, L_sloop, L_tail, L_end; 6065 bool use64byteVector = (MaxVectorSize == 64) && (VM_Version::avx3_threshold() == 0); 6066 if (use64byteVector) { 6067 vpxor(xtmp, xtmp, xtmp, AVX_512bit); 6068 } else if (MaxVectorSize >= 32) { 6069 vpxor(xtmp, xtmp, xtmp, AVX_256bit); 6070 } else { 6071 pxor(xtmp, xtmp); 6072 } 6073 jmp(L_zero_64_bytes); 6074 6075 BIND(L_loop); 6076 if (MaxVectorSize >= 32) { 6077 fill64(base, 0, xtmp, use64byteVector); 6078 } else { 6079 movdqu(Address(base, 0), xtmp); 6080 movdqu(Address(base, 16), xtmp); 6081 movdqu(Address(base, 32), xtmp); 6082 movdqu(Address(base, 48), xtmp); 6083 } 6084 addptr(base, 64); 6085 6086 BIND(L_zero_64_bytes); 6087 subptr(cnt, 8); 6088 jccb(Assembler::greaterEqual, L_loop); 6089 6090 // Copy trailing 64 bytes 6091 if (use64byteVector) { 6092 addptr(cnt, 8); 6093 jccb(Assembler::equal, L_end); 6094 fill64_masked(3, base, 0, xtmp, mask, cnt, rtmp, true); 6095 jmp(L_end); 6096 } else { 6097 addptr(cnt, 4); 6098 jccb(Assembler::less, L_tail); 6099 if (MaxVectorSize >= 32) { 6100 vmovdqu(Address(base, 0), xtmp); 6101 } else { 6102 movdqu(Address(base, 0), xtmp); 6103 movdqu(Address(base, 16), xtmp); 6104 } 6105 } 6106 addptr(base, 32); 6107 subptr(cnt, 4); 6108 6109 BIND(L_tail); 6110 addptr(cnt, 4); 6111 jccb(Assembler::lessEqual, L_end); 6112 if (UseAVX > 2 && MaxVectorSize >= 32 && VM_Version::supports_avx512vl()) { 6113 fill32_masked(3, base, 0, xtmp, mask, cnt, rtmp); 6114 } else { 6115 decrement(cnt); 6116 6117 BIND(L_sloop); 6118 movq(Address(base, 0), xtmp); 6119 addptr(base, 8); 6120 decrement(cnt); 6121 jccb(Assembler::greaterEqual, L_sloop); 6122 } 6123 BIND(L_end); 6124 } 6125 6126 // Clearing constant sized memory using YMM/ZMM registers. 6127 void MacroAssembler::clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask) { 6128 assert(UseAVX > 2 && VM_Version::supports_avx512vl(), ""); 6129 bool use64byteVector = (MaxVectorSize > 32) && (VM_Version::avx3_threshold() == 0); 6130 6131 int vector64_count = (cnt & (~0x7)) >> 3; 6132 cnt = cnt & 0x7; 6133 const int fill64_per_loop = 4; 6134 const int max_unrolled_fill64 = 8; 6135 6136 // 64 byte initialization loop. 6137 vpxor(xtmp, xtmp, xtmp, use64byteVector ? AVX_512bit : AVX_256bit); 6138 int start64 = 0; 6139 if (vector64_count > max_unrolled_fill64) { 6140 Label LOOP; 6141 Register index = rtmp; 6142 6143 start64 = vector64_count - (vector64_count % fill64_per_loop); 6144 6145 movl(index, 0); 6146 BIND(LOOP); 6147 for (int i = 0; i < fill64_per_loop; i++) { 6148 fill64(Address(base, index, Address::times_1, i * 64), xtmp, use64byteVector); 6149 } 6150 addl(index, fill64_per_loop * 64); 6151 cmpl(index, start64 * 64); 6152 jccb(Assembler::less, LOOP); 6153 } 6154 for (int i = start64; i < vector64_count; i++) { 6155 fill64(base, i * 64, xtmp, use64byteVector); 6156 } 6157 6158 // Clear remaining 64 byte tail. 6159 int disp = vector64_count * 64; 6160 if (cnt) { 6161 switch (cnt) { 6162 case 1: 6163 movq(Address(base, disp), xtmp); 6164 break; 6165 case 2: 6166 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_128bit); 6167 break; 6168 case 3: 6169 movl(rtmp, 0x7); 6170 kmovwl(mask, rtmp); 6171 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_256bit); 6172 break; 6173 case 4: 6174 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 6175 break; 6176 case 5: 6177 if (use64byteVector) { 6178 movl(rtmp, 0x1F); 6179 kmovwl(mask, rtmp); 6180 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit); 6181 } else { 6182 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 6183 movq(Address(base, disp + 32), xtmp); 6184 } 6185 break; 6186 case 6: 6187 if (use64byteVector) { 6188 movl(rtmp, 0x3F); 6189 kmovwl(mask, rtmp); 6190 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit); 6191 } else { 6192 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 6193 evmovdqu(T_LONG, k0, Address(base, disp + 32), xtmp, false, Assembler::AVX_128bit); 6194 } 6195 break; 6196 case 7: 6197 if (use64byteVector) { 6198 movl(rtmp, 0x7F); 6199 kmovwl(mask, rtmp); 6200 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit); 6201 } else { 6202 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 6203 movl(rtmp, 0x7); 6204 kmovwl(mask, rtmp); 6205 evmovdqu(T_LONG, mask, Address(base, disp + 32), xtmp, true, Assembler::AVX_256bit); 6206 } 6207 break; 6208 default: 6209 fatal("Unexpected length : %d\n",cnt); 6210 break; 6211 } 6212 } 6213 } 6214 6215 void MacroAssembler::clear_mem(Register base, Register cnt, Register tmp, XMMRegister xtmp, 6216 bool is_large, KRegister mask) { 6217 // cnt - number of qwords (8-byte words). 6218 // base - start address, qword aligned. 6219 // is_large - if optimizers know cnt is larger than InitArrayShortSize 6220 assert(base==rdi, "base register must be edi for rep stos"); 6221 assert(tmp==rax, "tmp register must be eax for rep stos"); 6222 assert(cnt==rcx, "cnt register must be ecx for rep stos"); 6223 assert(InitArrayShortSize % BytesPerLong == 0, 6224 "InitArrayShortSize should be the multiple of BytesPerLong"); 6225 6226 Label DONE; 6227 if (!is_large || !UseXMMForObjInit) { 6228 xorptr(tmp, tmp); 6229 } 6230 6231 if (!is_large) { 6232 Label LOOP, LONG; 6233 cmpptr(cnt, InitArrayShortSize/BytesPerLong); 6234 jccb(Assembler::greater, LONG); 6235 6236 NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM 6237 6238 decrement(cnt); 6239 jccb(Assembler::negative, DONE); // Zero length 6240 6241 // Use individual pointer-sized stores for small counts: 6242 BIND(LOOP); 6243 movptr(Address(base, cnt, Address::times_ptr), tmp); 6244 decrement(cnt); 6245 jccb(Assembler::greaterEqual, LOOP); 6246 jmpb(DONE); 6247 6248 BIND(LONG); 6249 } 6250 6251 // Use longer rep-prefixed ops for non-small counts: 6252 if (UseFastStosb) { 6253 shlptr(cnt, 3); // convert to number of bytes 6254 rep_stosb(); 6255 } else if (UseXMMForObjInit) { 6256 xmm_clear_mem(base, cnt, tmp, xtmp, mask); 6257 } else { 6258 NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM 6259 rep_stos(); 6260 } 6261 6262 BIND(DONE); 6263 } 6264 6265 #endif //COMPILER2_OR_JVMCI 6266 6267 6268 void MacroAssembler::generate_fill(BasicType t, bool aligned, 6269 Register to, Register value, Register count, 6270 Register rtmp, XMMRegister xtmp) { 6271 ShortBranchVerifier sbv(this); 6272 assert_different_registers(to, value, count, rtmp); 6273 Label L_exit; 6274 Label L_fill_2_bytes, L_fill_4_bytes; 6275 6276 #if defined(COMPILER2) && defined(_LP64) 6277 if(MaxVectorSize >=32 && 6278 VM_Version::supports_avx512vlbw() && 6279 VM_Version::supports_bmi2()) { 6280 generate_fill_avx3(t, to, value, count, rtmp, xtmp); 6281 return; 6282 } 6283 #endif 6284 6285 int shift = -1; 6286 switch (t) { 6287 case T_BYTE: 6288 shift = 2; 6289 break; 6290 case T_SHORT: 6291 shift = 1; 6292 break; 6293 case T_INT: 6294 shift = 0; 6295 break; 6296 default: ShouldNotReachHere(); 6297 } 6298 6299 if (t == T_BYTE) { 6300 andl(value, 0xff); 6301 movl(rtmp, value); 6302 shll(rtmp, 8); 6303 orl(value, rtmp); 6304 } 6305 if (t == T_SHORT) { 6306 andl(value, 0xffff); 6307 } 6308 if (t == T_BYTE || t == T_SHORT) { 6309 movl(rtmp, value); 6310 shll(rtmp, 16); 6311 orl(value, rtmp); 6312 } 6313 6314 cmpptr(count, 2<<shift); // Short arrays (< 8 bytes) fill by element 6315 jcc(Assembler::below, L_fill_4_bytes); // use unsigned cmp 6316 if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) { 6317 Label L_skip_align2; 6318 // align source address at 4 bytes address boundary 6319 if (t == T_BYTE) { 6320 Label L_skip_align1; 6321 // One byte misalignment happens only for byte arrays 6322 testptr(to, 1); 6323 jccb(Assembler::zero, L_skip_align1); 6324 movb(Address(to, 0), value); 6325 increment(to); 6326 decrement(count); 6327 BIND(L_skip_align1); 6328 } 6329 // Two bytes misalignment happens only for byte and short (char) arrays 6330 testptr(to, 2); 6331 jccb(Assembler::zero, L_skip_align2); 6332 movw(Address(to, 0), value); 6333 addptr(to, 2); 6334 subptr(count, 1<<(shift-1)); 6335 BIND(L_skip_align2); 6336 } 6337 if (UseSSE < 2) { 6338 Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes; 6339 // Fill 32-byte chunks 6340 subptr(count, 8 << shift); 6341 jcc(Assembler::less, L_check_fill_8_bytes); 6342 align(16); 6343 6344 BIND(L_fill_32_bytes_loop); 6345 6346 for (int i = 0; i < 32; i += 4) { 6347 movl(Address(to, i), value); 6348 } 6349 6350 addptr(to, 32); 6351 subptr(count, 8 << shift); 6352 jcc(Assembler::greaterEqual, L_fill_32_bytes_loop); 6353 BIND(L_check_fill_8_bytes); 6354 addptr(count, 8 << shift); 6355 jccb(Assembler::zero, L_exit); 6356 jmpb(L_fill_8_bytes); 6357 6358 // 6359 // length is too short, just fill qwords 6360 // 6361 BIND(L_fill_8_bytes_loop); 6362 movl(Address(to, 0), value); 6363 movl(Address(to, 4), value); 6364 addptr(to, 8); 6365 BIND(L_fill_8_bytes); 6366 subptr(count, 1 << (shift + 1)); 6367 jcc(Assembler::greaterEqual, L_fill_8_bytes_loop); 6368 // fall through to fill 4 bytes 6369 } else { 6370 Label L_fill_32_bytes; 6371 if (!UseUnalignedLoadStores) { 6372 // align to 8 bytes, we know we are 4 byte aligned to start 6373 testptr(to, 4); 6374 jccb(Assembler::zero, L_fill_32_bytes); 6375 movl(Address(to, 0), value); 6376 addptr(to, 4); 6377 subptr(count, 1<<shift); 6378 } 6379 BIND(L_fill_32_bytes); 6380 { 6381 assert( UseSSE >= 2, "supported cpu only" ); 6382 Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes; 6383 movdl(xtmp, value); 6384 if (UseAVX >= 2 && UseUnalignedLoadStores) { 6385 Label L_check_fill_32_bytes; 6386 if (UseAVX > 2) { 6387 // Fill 64-byte chunks 6388 Label L_fill_64_bytes_loop_avx3, L_check_fill_64_bytes_avx2; 6389 6390 // If number of bytes to fill < VM_Version::avx3_threshold(), perform fill using AVX2 6391 cmpptr(count, VM_Version::avx3_threshold()); 6392 jccb(Assembler::below, L_check_fill_64_bytes_avx2); 6393 6394 vpbroadcastd(xtmp, xtmp, Assembler::AVX_512bit); 6395 6396 subptr(count, 16 << shift); 6397 jccb(Assembler::less, L_check_fill_32_bytes); 6398 align(16); 6399 6400 BIND(L_fill_64_bytes_loop_avx3); 6401 evmovdqul(Address(to, 0), xtmp, Assembler::AVX_512bit); 6402 addptr(to, 64); 6403 subptr(count, 16 << shift); 6404 jcc(Assembler::greaterEqual, L_fill_64_bytes_loop_avx3); 6405 jmpb(L_check_fill_32_bytes); 6406 6407 BIND(L_check_fill_64_bytes_avx2); 6408 } 6409 // Fill 64-byte chunks 6410 Label L_fill_64_bytes_loop; 6411 vpbroadcastd(xtmp, xtmp, Assembler::AVX_256bit); 6412 6413 subptr(count, 16 << shift); 6414 jcc(Assembler::less, L_check_fill_32_bytes); 6415 align(16); 6416 6417 BIND(L_fill_64_bytes_loop); 6418 vmovdqu(Address(to, 0), xtmp); 6419 vmovdqu(Address(to, 32), xtmp); 6420 addptr(to, 64); 6421 subptr(count, 16 << shift); 6422 jcc(Assembler::greaterEqual, L_fill_64_bytes_loop); 6423 6424 BIND(L_check_fill_32_bytes); 6425 addptr(count, 8 << shift); 6426 jccb(Assembler::less, L_check_fill_8_bytes); 6427 vmovdqu(Address(to, 0), xtmp); 6428 addptr(to, 32); 6429 subptr(count, 8 << shift); 6430 6431 BIND(L_check_fill_8_bytes); 6432 // clean upper bits of YMM registers 6433 movdl(xtmp, value); 6434 pshufd(xtmp, xtmp, 0); 6435 } else { 6436 // Fill 32-byte chunks 6437 pshufd(xtmp, xtmp, 0); 6438 6439 subptr(count, 8 << shift); 6440 jcc(Assembler::less, L_check_fill_8_bytes); 6441 align(16); 6442 6443 BIND(L_fill_32_bytes_loop); 6444 6445 if (UseUnalignedLoadStores) { 6446 movdqu(Address(to, 0), xtmp); 6447 movdqu(Address(to, 16), xtmp); 6448 } else { 6449 movq(Address(to, 0), xtmp); 6450 movq(Address(to, 8), xtmp); 6451 movq(Address(to, 16), xtmp); 6452 movq(Address(to, 24), xtmp); 6453 } 6454 6455 addptr(to, 32); 6456 subptr(count, 8 << shift); 6457 jcc(Assembler::greaterEqual, L_fill_32_bytes_loop); 6458 6459 BIND(L_check_fill_8_bytes); 6460 } 6461 addptr(count, 8 << shift); 6462 jccb(Assembler::zero, L_exit); 6463 jmpb(L_fill_8_bytes); 6464 6465 // 6466 // length is too short, just fill qwords 6467 // 6468 BIND(L_fill_8_bytes_loop); 6469 movq(Address(to, 0), xtmp); 6470 addptr(to, 8); 6471 BIND(L_fill_8_bytes); 6472 subptr(count, 1 << (shift + 1)); 6473 jcc(Assembler::greaterEqual, L_fill_8_bytes_loop); 6474 } 6475 } 6476 // fill trailing 4 bytes 6477 BIND(L_fill_4_bytes); 6478 testl(count, 1<<shift); 6479 jccb(Assembler::zero, L_fill_2_bytes); 6480 movl(Address(to, 0), value); 6481 if (t == T_BYTE || t == T_SHORT) { 6482 Label L_fill_byte; 6483 addptr(to, 4); 6484 BIND(L_fill_2_bytes); 6485 // fill trailing 2 bytes 6486 testl(count, 1<<(shift-1)); 6487 jccb(Assembler::zero, L_fill_byte); 6488 movw(Address(to, 0), value); 6489 if (t == T_BYTE) { 6490 addptr(to, 2); 6491 BIND(L_fill_byte); 6492 // fill trailing byte 6493 testl(count, 1); 6494 jccb(Assembler::zero, L_exit); 6495 movb(Address(to, 0), value); 6496 } else { 6497 BIND(L_fill_byte); 6498 } 6499 } else { 6500 BIND(L_fill_2_bytes); 6501 } 6502 BIND(L_exit); 6503 } 6504 6505 void MacroAssembler::evpbroadcast(BasicType type, XMMRegister dst, Register src, int vector_len) { 6506 switch(type) { 6507 case T_BYTE: 6508 case T_BOOLEAN: 6509 evpbroadcastb(dst, src, vector_len); 6510 break; 6511 case T_SHORT: 6512 case T_CHAR: 6513 evpbroadcastw(dst, src, vector_len); 6514 break; 6515 case T_INT: 6516 case T_FLOAT: 6517 evpbroadcastd(dst, src, vector_len); 6518 break; 6519 case T_LONG: 6520 case T_DOUBLE: 6521 evpbroadcastq(dst, src, vector_len); 6522 break; 6523 default: 6524 fatal("Unhandled type : %s", type2name(type)); 6525 break; 6526 } 6527 } 6528 6529 // encode char[] to byte[] in ISO_8859_1 or ASCII 6530 //@IntrinsicCandidate 6531 //private static int implEncodeISOArray(byte[] sa, int sp, 6532 //byte[] da, int dp, int len) { 6533 // int i = 0; 6534 // for (; i < len; i++) { 6535 // char c = StringUTF16.getChar(sa, sp++); 6536 // if (c > '\u00FF') 6537 // break; 6538 // da[dp++] = (byte)c; 6539 // } 6540 // return i; 6541 //} 6542 // 6543 //@IntrinsicCandidate 6544 //private static int implEncodeAsciiArray(char[] sa, int sp, 6545 // byte[] da, int dp, int len) { 6546 // int i = 0; 6547 // for (; i < len; i++) { 6548 // char c = sa[sp++]; 6549 // if (c >= '\u0080') 6550 // break; 6551 // da[dp++] = (byte)c; 6552 // } 6553 // return i; 6554 //} 6555 void MacroAssembler::encode_iso_array(Register src, Register dst, Register len, 6556 XMMRegister tmp1Reg, XMMRegister tmp2Reg, 6557 XMMRegister tmp3Reg, XMMRegister tmp4Reg, 6558 Register tmp5, Register result, bool ascii) { 6559 6560 // rsi: src 6561 // rdi: dst 6562 // rdx: len 6563 // rcx: tmp5 6564 // rax: result 6565 ShortBranchVerifier sbv(this); 6566 assert_different_registers(src, dst, len, tmp5, result); 6567 Label L_done, L_copy_1_char, L_copy_1_char_exit; 6568 6569 int mask = ascii ? 0xff80ff80 : 0xff00ff00; 6570 int short_mask = ascii ? 0xff80 : 0xff00; 6571 6572 // set result 6573 xorl(result, result); 6574 // check for zero length 6575 testl(len, len); 6576 jcc(Assembler::zero, L_done); 6577 6578 movl(result, len); 6579 6580 // Setup pointers 6581 lea(src, Address(src, len, Address::times_2)); // char[] 6582 lea(dst, Address(dst, len, Address::times_1)); // byte[] 6583 negptr(len); 6584 6585 if (UseSSE42Intrinsics || UseAVX >= 2) { 6586 Label L_copy_8_chars, L_copy_8_chars_exit; 6587 Label L_chars_16_check, L_copy_16_chars, L_copy_16_chars_exit; 6588 6589 if (UseAVX >= 2) { 6590 Label L_chars_32_check, L_copy_32_chars, L_copy_32_chars_exit; 6591 movl(tmp5, mask); // create mask to test for Unicode or non-ASCII chars in vector 6592 movdl(tmp1Reg, tmp5); 6593 vpbroadcastd(tmp1Reg, tmp1Reg, Assembler::AVX_256bit); 6594 jmp(L_chars_32_check); 6595 6596 bind(L_copy_32_chars); 6597 vmovdqu(tmp3Reg, Address(src, len, Address::times_2, -64)); 6598 vmovdqu(tmp4Reg, Address(src, len, Address::times_2, -32)); 6599 vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1); 6600 vptest(tmp2Reg, tmp1Reg); // check for Unicode or non-ASCII chars in vector 6601 jccb(Assembler::notZero, L_copy_32_chars_exit); 6602 vpackuswb(tmp3Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1); 6603 vpermq(tmp4Reg, tmp3Reg, 0xD8, /* vector_len */ 1); 6604 vmovdqu(Address(dst, len, Address::times_1, -32), tmp4Reg); 6605 6606 bind(L_chars_32_check); 6607 addptr(len, 32); 6608 jcc(Assembler::lessEqual, L_copy_32_chars); 6609 6610 bind(L_copy_32_chars_exit); 6611 subptr(len, 16); 6612 jccb(Assembler::greater, L_copy_16_chars_exit); 6613 6614 } else if (UseSSE42Intrinsics) { 6615 movl(tmp5, mask); // create mask to test for Unicode or non-ASCII chars in vector 6616 movdl(tmp1Reg, tmp5); 6617 pshufd(tmp1Reg, tmp1Reg, 0); 6618 jmpb(L_chars_16_check); 6619 } 6620 6621 bind(L_copy_16_chars); 6622 if (UseAVX >= 2) { 6623 vmovdqu(tmp2Reg, Address(src, len, Address::times_2, -32)); 6624 vptest(tmp2Reg, tmp1Reg); 6625 jcc(Assembler::notZero, L_copy_16_chars_exit); 6626 vpackuswb(tmp2Reg, tmp2Reg, tmp1Reg, /* vector_len */ 1); 6627 vpermq(tmp3Reg, tmp2Reg, 0xD8, /* vector_len */ 1); 6628 } else { 6629 if (UseAVX > 0) { 6630 movdqu(tmp3Reg, Address(src, len, Address::times_2, -32)); 6631 movdqu(tmp4Reg, Address(src, len, Address::times_2, -16)); 6632 vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 0); 6633 } else { 6634 movdqu(tmp3Reg, Address(src, len, Address::times_2, -32)); 6635 por(tmp2Reg, tmp3Reg); 6636 movdqu(tmp4Reg, Address(src, len, Address::times_2, -16)); 6637 por(tmp2Reg, tmp4Reg); 6638 } 6639 ptest(tmp2Reg, tmp1Reg); // check for Unicode or non-ASCII chars in vector 6640 jccb(Assembler::notZero, L_copy_16_chars_exit); 6641 packuswb(tmp3Reg, tmp4Reg); 6642 } 6643 movdqu(Address(dst, len, Address::times_1, -16), tmp3Reg); 6644 6645 bind(L_chars_16_check); 6646 addptr(len, 16); 6647 jcc(Assembler::lessEqual, L_copy_16_chars); 6648 6649 bind(L_copy_16_chars_exit); 6650 if (UseAVX >= 2) { 6651 // clean upper bits of YMM registers 6652 vpxor(tmp2Reg, tmp2Reg); 6653 vpxor(tmp3Reg, tmp3Reg); 6654 vpxor(tmp4Reg, tmp4Reg); 6655 movdl(tmp1Reg, tmp5); 6656 pshufd(tmp1Reg, tmp1Reg, 0); 6657 } 6658 subptr(len, 8); 6659 jccb(Assembler::greater, L_copy_8_chars_exit); 6660 6661 bind(L_copy_8_chars); 6662 movdqu(tmp3Reg, Address(src, len, Address::times_2, -16)); 6663 ptest(tmp3Reg, tmp1Reg); 6664 jccb(Assembler::notZero, L_copy_8_chars_exit); 6665 packuswb(tmp3Reg, tmp1Reg); 6666 movq(Address(dst, len, Address::times_1, -8), tmp3Reg); 6667 addptr(len, 8); 6668 jccb(Assembler::lessEqual, L_copy_8_chars); 6669 6670 bind(L_copy_8_chars_exit); 6671 subptr(len, 8); 6672 jccb(Assembler::zero, L_done); 6673 } 6674 6675 bind(L_copy_1_char); 6676 load_unsigned_short(tmp5, Address(src, len, Address::times_2, 0)); 6677 testl(tmp5, short_mask); // check if Unicode or non-ASCII char 6678 jccb(Assembler::notZero, L_copy_1_char_exit); 6679 movb(Address(dst, len, Address::times_1, 0), tmp5); 6680 addptr(len, 1); 6681 jccb(Assembler::less, L_copy_1_char); 6682 6683 bind(L_copy_1_char_exit); 6684 addptr(result, len); // len is negative count of not processed elements 6685 6686 bind(L_done); 6687 } 6688 6689 #ifdef _LP64 6690 /** 6691 * Helper for multiply_to_len(). 6692 */ 6693 void MacroAssembler::add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2) { 6694 addq(dest_lo, src1); 6695 adcq(dest_hi, 0); 6696 addq(dest_lo, src2); 6697 adcq(dest_hi, 0); 6698 } 6699 6700 /** 6701 * Multiply 64 bit by 64 bit first loop. 6702 */ 6703 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 6704 Register y, Register y_idx, Register z, 6705 Register carry, Register product, 6706 Register idx, Register kdx) { 6707 // 6708 // jlong carry, x[], y[], z[]; 6709 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 6710 // huge_128 product = y[idx] * x[xstart] + carry; 6711 // z[kdx] = (jlong)product; 6712 // carry = (jlong)(product >>> 64); 6713 // } 6714 // z[xstart] = carry; 6715 // 6716 6717 Label L_first_loop, L_first_loop_exit; 6718 Label L_one_x, L_one_y, L_multiply; 6719 6720 decrementl(xstart); 6721 jcc(Assembler::negative, L_one_x); 6722 6723 movq(x_xstart, Address(x, xstart, Address::times_4, 0)); 6724 rorq(x_xstart, 32); // convert big-endian to little-endian 6725 6726 bind(L_first_loop); 6727 decrementl(idx); 6728 jcc(Assembler::negative, L_first_loop_exit); 6729 decrementl(idx); 6730 jcc(Assembler::negative, L_one_y); 6731 movq(y_idx, Address(y, idx, Address::times_4, 0)); 6732 rorq(y_idx, 32); // convert big-endian to little-endian 6733 bind(L_multiply); 6734 movq(product, x_xstart); 6735 mulq(y_idx); // product(rax) * y_idx -> rdx:rax 6736 addq(product, carry); 6737 adcq(rdx, 0); 6738 subl(kdx, 2); 6739 movl(Address(z, kdx, Address::times_4, 4), product); 6740 shrq(product, 32); 6741 movl(Address(z, kdx, Address::times_4, 0), product); 6742 movq(carry, rdx); 6743 jmp(L_first_loop); 6744 6745 bind(L_one_y); 6746 movl(y_idx, Address(y, 0)); 6747 jmp(L_multiply); 6748 6749 bind(L_one_x); 6750 movl(x_xstart, Address(x, 0)); 6751 jmp(L_first_loop); 6752 6753 bind(L_first_loop_exit); 6754 } 6755 6756 /** 6757 * Multiply 64 bit by 64 bit and add 128 bit. 6758 */ 6759 void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y, Register z, 6760 Register yz_idx, Register idx, 6761 Register carry, Register product, int offset) { 6762 // huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry; 6763 // z[kdx] = (jlong)product; 6764 6765 movq(yz_idx, Address(y, idx, Address::times_4, offset)); 6766 rorq(yz_idx, 32); // convert big-endian to little-endian 6767 movq(product, x_xstart); 6768 mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax) 6769 movq(yz_idx, Address(z, idx, Address::times_4, offset)); 6770 rorq(yz_idx, 32); // convert big-endian to little-endian 6771 6772 add2_with_carry(rdx, product, carry, yz_idx); 6773 6774 movl(Address(z, idx, Address::times_4, offset+4), product); 6775 shrq(product, 32); 6776 movl(Address(z, idx, Address::times_4, offset), product); 6777 6778 } 6779 6780 /** 6781 * Multiply 128 bit by 128 bit. Unrolled inner loop. 6782 */ 6783 void MacroAssembler::multiply_128_x_128_loop(Register x_xstart, Register y, Register z, 6784 Register yz_idx, Register idx, Register jdx, 6785 Register carry, Register product, 6786 Register carry2) { 6787 // jlong carry, x[], y[], z[]; 6788 // int kdx = ystart+1; 6789 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 6790 // huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry; 6791 // z[kdx+idx+1] = (jlong)product; 6792 // jlong carry2 = (jlong)(product >>> 64); 6793 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry2; 6794 // z[kdx+idx] = (jlong)product; 6795 // carry = (jlong)(product >>> 64); 6796 // } 6797 // idx += 2; 6798 // if (idx > 0) { 6799 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry; 6800 // z[kdx+idx] = (jlong)product; 6801 // carry = (jlong)(product >>> 64); 6802 // } 6803 // 6804 6805 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 6806 6807 movl(jdx, idx); 6808 andl(jdx, 0xFFFFFFFC); 6809 shrl(jdx, 2); 6810 6811 bind(L_third_loop); 6812 subl(jdx, 1); 6813 jcc(Assembler::negative, L_third_loop_exit); 6814 subl(idx, 4); 6815 6816 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 8); 6817 movq(carry2, rdx); 6818 6819 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product, 0); 6820 movq(carry, rdx); 6821 jmp(L_third_loop); 6822 6823 bind (L_third_loop_exit); 6824 6825 andl (idx, 0x3); 6826 jcc(Assembler::zero, L_post_third_loop_done); 6827 6828 Label L_check_1; 6829 subl(idx, 2); 6830 jcc(Assembler::negative, L_check_1); 6831 6832 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 0); 6833 movq(carry, rdx); 6834 6835 bind (L_check_1); 6836 addl (idx, 0x2); 6837 andl (idx, 0x1); 6838 subl(idx, 1); 6839 jcc(Assembler::negative, L_post_third_loop_done); 6840 6841 movl(yz_idx, Address(y, idx, Address::times_4, 0)); 6842 movq(product, x_xstart); 6843 mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax) 6844 movl(yz_idx, Address(z, idx, Address::times_4, 0)); 6845 6846 add2_with_carry(rdx, product, yz_idx, carry); 6847 6848 movl(Address(z, idx, Address::times_4, 0), product); 6849 shrq(product, 32); 6850 6851 shlq(rdx, 32); 6852 orq(product, rdx); 6853 movq(carry, product); 6854 6855 bind(L_post_third_loop_done); 6856 } 6857 6858 /** 6859 * Multiply 128 bit by 128 bit using BMI2. Unrolled inner loop. 6860 * 6861 */ 6862 void MacroAssembler::multiply_128_x_128_bmi2_loop(Register y, Register z, 6863 Register carry, Register carry2, 6864 Register idx, Register jdx, 6865 Register yz_idx1, Register yz_idx2, 6866 Register tmp, Register tmp3, Register tmp4) { 6867 assert(UseBMI2Instructions, "should be used only when BMI2 is available"); 6868 6869 // jlong carry, x[], y[], z[]; 6870 // int kdx = ystart+1; 6871 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 6872 // huge_128 tmp3 = (y[idx+1] * rdx) + z[kdx+idx+1] + carry; 6873 // jlong carry2 = (jlong)(tmp3 >>> 64); 6874 // huge_128 tmp4 = (y[idx] * rdx) + z[kdx+idx] + carry2; 6875 // carry = (jlong)(tmp4 >>> 64); 6876 // z[kdx+idx+1] = (jlong)tmp3; 6877 // z[kdx+idx] = (jlong)tmp4; 6878 // } 6879 // idx += 2; 6880 // if (idx > 0) { 6881 // yz_idx1 = (y[idx] * rdx) + z[kdx+idx] + carry; 6882 // z[kdx+idx] = (jlong)yz_idx1; 6883 // carry = (jlong)(yz_idx1 >>> 64); 6884 // } 6885 // 6886 6887 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 6888 6889 movl(jdx, idx); 6890 andl(jdx, 0xFFFFFFFC); 6891 shrl(jdx, 2); 6892 6893 bind(L_third_loop); 6894 subl(jdx, 1); 6895 jcc(Assembler::negative, L_third_loop_exit); 6896 subl(idx, 4); 6897 6898 movq(yz_idx1, Address(y, idx, Address::times_4, 8)); 6899 rorxq(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian 6900 movq(yz_idx2, Address(y, idx, Address::times_4, 0)); 6901 rorxq(yz_idx2, yz_idx2, 32); 6902 6903 mulxq(tmp4, tmp3, yz_idx1); // yz_idx1 * rdx -> tmp4:tmp3 6904 mulxq(carry2, tmp, yz_idx2); // yz_idx2 * rdx -> carry2:tmp 6905 6906 movq(yz_idx1, Address(z, idx, Address::times_4, 8)); 6907 rorxq(yz_idx1, yz_idx1, 32); 6908 movq(yz_idx2, Address(z, idx, Address::times_4, 0)); 6909 rorxq(yz_idx2, yz_idx2, 32); 6910 6911 if (VM_Version::supports_adx()) { 6912 adcxq(tmp3, carry); 6913 adoxq(tmp3, yz_idx1); 6914 6915 adcxq(tmp4, tmp); 6916 adoxq(tmp4, yz_idx2); 6917 6918 movl(carry, 0); // does not affect flags 6919 adcxq(carry2, carry); 6920 adoxq(carry2, carry); 6921 } else { 6922 add2_with_carry(tmp4, tmp3, carry, yz_idx1); 6923 add2_with_carry(carry2, tmp4, tmp, yz_idx2); 6924 } 6925 movq(carry, carry2); 6926 6927 movl(Address(z, idx, Address::times_4, 12), tmp3); 6928 shrq(tmp3, 32); 6929 movl(Address(z, idx, Address::times_4, 8), tmp3); 6930 6931 movl(Address(z, idx, Address::times_4, 4), tmp4); 6932 shrq(tmp4, 32); 6933 movl(Address(z, idx, Address::times_4, 0), tmp4); 6934 6935 jmp(L_third_loop); 6936 6937 bind (L_third_loop_exit); 6938 6939 andl (idx, 0x3); 6940 jcc(Assembler::zero, L_post_third_loop_done); 6941 6942 Label L_check_1; 6943 subl(idx, 2); 6944 jcc(Assembler::negative, L_check_1); 6945 6946 movq(yz_idx1, Address(y, idx, Address::times_4, 0)); 6947 rorxq(yz_idx1, yz_idx1, 32); 6948 mulxq(tmp4, tmp3, yz_idx1); // yz_idx1 * rdx -> tmp4:tmp3 6949 movq(yz_idx2, Address(z, idx, Address::times_4, 0)); 6950 rorxq(yz_idx2, yz_idx2, 32); 6951 6952 add2_with_carry(tmp4, tmp3, carry, yz_idx2); 6953 6954 movl(Address(z, idx, Address::times_4, 4), tmp3); 6955 shrq(tmp3, 32); 6956 movl(Address(z, idx, Address::times_4, 0), tmp3); 6957 movq(carry, tmp4); 6958 6959 bind (L_check_1); 6960 addl (idx, 0x2); 6961 andl (idx, 0x1); 6962 subl(idx, 1); 6963 jcc(Assembler::negative, L_post_third_loop_done); 6964 movl(tmp4, Address(y, idx, Address::times_4, 0)); 6965 mulxq(carry2, tmp3, tmp4); // tmp4 * rdx -> carry2:tmp3 6966 movl(tmp4, Address(z, idx, Address::times_4, 0)); 6967 6968 add2_with_carry(carry2, tmp3, tmp4, carry); 6969 6970 movl(Address(z, idx, Address::times_4, 0), tmp3); 6971 shrq(tmp3, 32); 6972 6973 shlq(carry2, 32); 6974 orq(tmp3, carry2); 6975 movq(carry, tmp3); 6976 6977 bind(L_post_third_loop_done); 6978 } 6979 6980 /** 6981 * Code for BigInteger::multiplyToLen() intrinsic. 6982 * 6983 * rdi: x 6984 * rax: xlen 6985 * rsi: y 6986 * rcx: ylen 6987 * r8: z 6988 * r11: zlen 6989 * r12: tmp1 6990 * r13: tmp2 6991 * r14: tmp3 6992 * r15: tmp4 6993 * rbx: tmp5 6994 * 6995 */ 6996 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register zlen, 6997 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5) { 6998 ShortBranchVerifier sbv(this); 6999 assert_different_registers(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, rdx); 7000 7001 push(tmp1); 7002 push(tmp2); 7003 push(tmp3); 7004 push(tmp4); 7005 push(tmp5); 7006 7007 push(xlen); 7008 push(zlen); 7009 7010 const Register idx = tmp1; 7011 const Register kdx = tmp2; 7012 const Register xstart = tmp3; 7013 7014 const Register y_idx = tmp4; 7015 const Register carry = tmp5; 7016 const Register product = xlen; 7017 const Register x_xstart = zlen; // reuse register 7018 7019 // First Loop. 7020 // 7021 // final static long LONG_MASK = 0xffffffffL; 7022 // int xstart = xlen - 1; 7023 // int ystart = ylen - 1; 7024 // long carry = 0; 7025 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 7026 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry; 7027 // z[kdx] = (int)product; 7028 // carry = product >>> 32; 7029 // } 7030 // z[xstart] = (int)carry; 7031 // 7032 7033 movl(idx, ylen); // idx = ylen; 7034 movl(kdx, zlen); // kdx = xlen+ylen; 7035 xorq(carry, carry); // carry = 0; 7036 7037 Label L_done; 7038 7039 movl(xstart, xlen); 7040 decrementl(xstart); 7041 jcc(Assembler::negative, L_done); 7042 7043 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx); 7044 7045 Label L_second_loop; 7046 testl(kdx, kdx); 7047 jcc(Assembler::zero, L_second_loop); 7048 7049 Label L_carry; 7050 subl(kdx, 1); 7051 jcc(Assembler::zero, L_carry); 7052 7053 movl(Address(z, kdx, Address::times_4, 0), carry); 7054 shrq(carry, 32); 7055 subl(kdx, 1); 7056 7057 bind(L_carry); 7058 movl(Address(z, kdx, Address::times_4, 0), carry); 7059 7060 // Second and third (nested) loops. 7061 // 7062 // for (int i = xstart-1; i >= 0; i--) { // Second loop 7063 // carry = 0; 7064 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop 7065 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) + 7066 // (z[k] & LONG_MASK) + carry; 7067 // z[k] = (int)product; 7068 // carry = product >>> 32; 7069 // } 7070 // z[i] = (int)carry; 7071 // } 7072 // 7073 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx 7074 7075 const Register jdx = tmp1; 7076 7077 bind(L_second_loop); 7078 xorl(carry, carry); // carry = 0; 7079 movl(jdx, ylen); // j = ystart+1 7080 7081 subl(xstart, 1); // i = xstart-1; 7082 jcc(Assembler::negative, L_done); 7083 7084 push (z); 7085 7086 Label L_last_x; 7087 lea(z, Address(z, xstart, Address::times_4, 4)); // z = z + k - j 7088 subl(xstart, 1); // i = xstart-1; 7089 jcc(Assembler::negative, L_last_x); 7090 7091 if (UseBMI2Instructions) { 7092 movq(rdx, Address(x, xstart, Address::times_4, 0)); 7093 rorxq(rdx, rdx, 32); // convert big-endian to little-endian 7094 } else { 7095 movq(x_xstart, Address(x, xstart, Address::times_4, 0)); 7096 rorq(x_xstart, 32); // convert big-endian to little-endian 7097 } 7098 7099 Label L_third_loop_prologue; 7100 bind(L_third_loop_prologue); 7101 7102 push (x); 7103 push (xstart); 7104 push (ylen); 7105 7106 7107 if (UseBMI2Instructions) { 7108 multiply_128_x_128_bmi2_loop(y, z, carry, x, jdx, ylen, product, tmp2, x_xstart, tmp3, tmp4); 7109 } else { // !UseBMI2Instructions 7110 multiply_128_x_128_loop(x_xstart, y, z, y_idx, jdx, ylen, carry, product, x); 7111 } 7112 7113 pop(ylen); 7114 pop(xlen); 7115 pop(x); 7116 pop(z); 7117 7118 movl(tmp3, xlen); 7119 addl(tmp3, 1); 7120 movl(Address(z, tmp3, Address::times_4, 0), carry); 7121 subl(tmp3, 1); 7122 jccb(Assembler::negative, L_done); 7123 7124 shrq(carry, 32); 7125 movl(Address(z, tmp3, Address::times_4, 0), carry); 7126 jmp(L_second_loop); 7127 7128 // Next infrequent code is moved outside loops. 7129 bind(L_last_x); 7130 if (UseBMI2Instructions) { 7131 movl(rdx, Address(x, 0)); 7132 } else { 7133 movl(x_xstart, Address(x, 0)); 7134 } 7135 jmp(L_third_loop_prologue); 7136 7137 bind(L_done); 7138 7139 pop(zlen); 7140 pop(xlen); 7141 7142 pop(tmp5); 7143 pop(tmp4); 7144 pop(tmp3); 7145 pop(tmp2); 7146 pop(tmp1); 7147 } 7148 7149 void MacroAssembler::vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale, 7150 Register result, Register tmp1, Register tmp2, XMMRegister rymm0, XMMRegister rymm1, XMMRegister rymm2){ 7151 assert(UseSSE42Intrinsics, "SSE4.2 must be enabled."); 7152 Label VECTOR16_LOOP, VECTOR8_LOOP, VECTOR4_LOOP; 7153 Label VECTOR8_TAIL, VECTOR4_TAIL; 7154 Label VECTOR32_NOT_EQUAL, VECTOR16_NOT_EQUAL, VECTOR8_NOT_EQUAL, VECTOR4_NOT_EQUAL; 7155 Label SAME_TILL_END, DONE; 7156 Label BYTES_LOOP, BYTES_TAIL, BYTES_NOT_EQUAL; 7157 7158 //scale is in rcx in both Win64 and Unix 7159 ShortBranchVerifier sbv(this); 7160 7161 shlq(length); 7162 xorq(result, result); 7163 7164 if ((AVX3Threshold == 0) && (UseAVX > 2) && 7165 VM_Version::supports_avx512vlbw()) { 7166 Label VECTOR64_LOOP, VECTOR64_NOT_EQUAL, VECTOR32_TAIL; 7167 7168 cmpq(length, 64); 7169 jcc(Assembler::less, VECTOR32_TAIL); 7170 7171 movq(tmp1, length); 7172 andq(tmp1, 0x3F); // tail count 7173 andq(length, ~(0x3F)); //vector count 7174 7175 bind(VECTOR64_LOOP); 7176 // AVX512 code to compare 64 byte vectors. 7177 evmovdqub(rymm0, Address(obja, result), Assembler::AVX_512bit); 7178 evpcmpeqb(k7, rymm0, Address(objb, result), Assembler::AVX_512bit); 7179 kortestql(k7, k7); 7180 jcc(Assembler::aboveEqual, VECTOR64_NOT_EQUAL); // mismatch 7181 addq(result, 64); 7182 subq(length, 64); 7183 jccb(Assembler::notZero, VECTOR64_LOOP); 7184 7185 //bind(VECTOR64_TAIL); 7186 testq(tmp1, tmp1); 7187 jcc(Assembler::zero, SAME_TILL_END); 7188 7189 //bind(VECTOR64_TAIL); 7190 // AVX512 code to compare up to 63 byte vectors. 7191 mov64(tmp2, 0xFFFFFFFFFFFFFFFF); 7192 shlxq(tmp2, tmp2, tmp1); 7193 notq(tmp2); 7194 kmovql(k3, tmp2); 7195 7196 evmovdqub(rymm0, k3, Address(obja, result), false, Assembler::AVX_512bit); 7197 evpcmpeqb(k7, k3, rymm0, Address(objb, result), Assembler::AVX_512bit); 7198 7199 ktestql(k7, k3); 7200 jcc(Assembler::below, SAME_TILL_END); // not mismatch 7201 7202 bind(VECTOR64_NOT_EQUAL); 7203 kmovql(tmp1, k7); 7204 notq(tmp1); 7205 tzcntq(tmp1, tmp1); 7206 addq(result, tmp1); 7207 shrq(result); 7208 jmp(DONE); 7209 bind(VECTOR32_TAIL); 7210 } 7211 7212 cmpq(length, 8); 7213 jcc(Assembler::equal, VECTOR8_LOOP); 7214 jcc(Assembler::less, VECTOR4_TAIL); 7215 7216 if (UseAVX >= 2) { 7217 Label VECTOR16_TAIL, VECTOR32_LOOP; 7218 7219 cmpq(length, 16); 7220 jcc(Assembler::equal, VECTOR16_LOOP); 7221 jcc(Assembler::less, VECTOR8_LOOP); 7222 7223 cmpq(length, 32); 7224 jccb(Assembler::less, VECTOR16_TAIL); 7225 7226 subq(length, 32); 7227 bind(VECTOR32_LOOP); 7228 vmovdqu(rymm0, Address(obja, result)); 7229 vmovdqu(rymm1, Address(objb, result)); 7230 vpxor(rymm2, rymm0, rymm1, Assembler::AVX_256bit); 7231 vptest(rymm2, rymm2); 7232 jcc(Assembler::notZero, VECTOR32_NOT_EQUAL);//mismatch found 7233 addq(result, 32); 7234 subq(length, 32); 7235 jcc(Assembler::greaterEqual, VECTOR32_LOOP); 7236 addq(length, 32); 7237 jcc(Assembler::equal, SAME_TILL_END); 7238 //falling through if less than 32 bytes left //close the branch here. 7239 7240 bind(VECTOR16_TAIL); 7241 cmpq(length, 16); 7242 jccb(Assembler::less, VECTOR8_TAIL); 7243 bind(VECTOR16_LOOP); 7244 movdqu(rymm0, Address(obja, result)); 7245 movdqu(rymm1, Address(objb, result)); 7246 vpxor(rymm2, rymm0, rymm1, Assembler::AVX_128bit); 7247 ptest(rymm2, rymm2); 7248 jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found 7249 addq(result, 16); 7250 subq(length, 16); 7251 jcc(Assembler::equal, SAME_TILL_END); 7252 //falling through if less than 16 bytes left 7253 } else {//regular intrinsics 7254 7255 cmpq(length, 16); 7256 jccb(Assembler::less, VECTOR8_TAIL); 7257 7258 subq(length, 16); 7259 bind(VECTOR16_LOOP); 7260 movdqu(rymm0, Address(obja, result)); 7261 movdqu(rymm1, Address(objb, result)); 7262 pxor(rymm0, rymm1); 7263 ptest(rymm0, rymm0); 7264 jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found 7265 addq(result, 16); 7266 subq(length, 16); 7267 jccb(Assembler::greaterEqual, VECTOR16_LOOP); 7268 addq(length, 16); 7269 jcc(Assembler::equal, SAME_TILL_END); 7270 //falling through if less than 16 bytes left 7271 } 7272 7273 bind(VECTOR8_TAIL); 7274 cmpq(length, 8); 7275 jccb(Assembler::less, VECTOR4_TAIL); 7276 bind(VECTOR8_LOOP); 7277 movq(tmp1, Address(obja, result)); 7278 movq(tmp2, Address(objb, result)); 7279 xorq(tmp1, tmp2); 7280 testq(tmp1, tmp1); 7281 jcc(Assembler::notZero, VECTOR8_NOT_EQUAL);//mismatch found 7282 addq(result, 8); 7283 subq(length, 8); 7284 jcc(Assembler::equal, SAME_TILL_END); 7285 //falling through if less than 8 bytes left 7286 7287 bind(VECTOR4_TAIL); 7288 cmpq(length, 4); 7289 jccb(Assembler::less, BYTES_TAIL); 7290 bind(VECTOR4_LOOP); 7291 movl(tmp1, Address(obja, result)); 7292 xorl(tmp1, Address(objb, result)); 7293 testl(tmp1, tmp1); 7294 jcc(Assembler::notZero, VECTOR4_NOT_EQUAL);//mismatch found 7295 addq(result, 4); 7296 subq(length, 4); 7297 jcc(Assembler::equal, SAME_TILL_END); 7298 //falling through if less than 4 bytes left 7299 7300 bind(BYTES_TAIL); 7301 bind(BYTES_LOOP); 7302 load_unsigned_byte(tmp1, Address(obja, result)); 7303 load_unsigned_byte(tmp2, Address(objb, result)); 7304 xorl(tmp1, tmp2); 7305 testl(tmp1, tmp1); 7306 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found 7307 decq(length); 7308 jcc(Assembler::zero, SAME_TILL_END); 7309 incq(result); 7310 load_unsigned_byte(tmp1, Address(obja, result)); 7311 load_unsigned_byte(tmp2, Address(objb, result)); 7312 xorl(tmp1, tmp2); 7313 testl(tmp1, tmp1); 7314 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found 7315 decq(length); 7316 jcc(Assembler::zero, SAME_TILL_END); 7317 incq(result); 7318 load_unsigned_byte(tmp1, Address(obja, result)); 7319 load_unsigned_byte(tmp2, Address(objb, result)); 7320 xorl(tmp1, tmp2); 7321 testl(tmp1, tmp1); 7322 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found 7323 jmp(SAME_TILL_END); 7324 7325 if (UseAVX >= 2) { 7326 bind(VECTOR32_NOT_EQUAL); 7327 vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_256bit); 7328 vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_256bit); 7329 vpxor(rymm0, rymm0, rymm2, Assembler::AVX_256bit); 7330 vpmovmskb(tmp1, rymm0); 7331 bsfq(tmp1, tmp1); 7332 addq(result, tmp1); 7333 shrq(result); 7334 jmp(DONE); 7335 } 7336 7337 bind(VECTOR16_NOT_EQUAL); 7338 if (UseAVX >= 2) { 7339 vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_128bit); 7340 vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_128bit); 7341 pxor(rymm0, rymm2); 7342 } else { 7343 pcmpeqb(rymm2, rymm2); 7344 pxor(rymm0, rymm1); 7345 pcmpeqb(rymm0, rymm1); 7346 pxor(rymm0, rymm2); 7347 } 7348 pmovmskb(tmp1, rymm0); 7349 bsfq(tmp1, tmp1); 7350 addq(result, tmp1); 7351 shrq(result); 7352 jmpb(DONE); 7353 7354 bind(VECTOR8_NOT_EQUAL); 7355 bind(VECTOR4_NOT_EQUAL); 7356 bsfq(tmp1, tmp1); 7357 shrq(tmp1, 3); 7358 addq(result, tmp1); 7359 bind(BYTES_NOT_EQUAL); 7360 shrq(result); 7361 jmpb(DONE); 7362 7363 bind(SAME_TILL_END); 7364 mov64(result, -1); 7365 7366 bind(DONE); 7367 } 7368 7369 //Helper functions for square_to_len() 7370 7371 /** 7372 * Store the squares of x[], right shifted one bit (divided by 2) into z[] 7373 * Preserves x and z and modifies rest of the registers. 7374 */ 7375 void MacroAssembler::square_rshift(Register x, Register xlen, Register z, Register tmp1, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 7376 // Perform square and right shift by 1 7377 // Handle odd xlen case first, then for even xlen do the following 7378 // jlong carry = 0; 7379 // for (int j=0, i=0; j < xlen; j+=2, i+=4) { 7380 // huge_128 product = x[j:j+1] * x[j:j+1]; 7381 // z[i:i+1] = (carry << 63) | (jlong)(product >>> 65); 7382 // z[i+2:i+3] = (jlong)(product >>> 1); 7383 // carry = (jlong)product; 7384 // } 7385 7386 xorq(tmp5, tmp5); // carry 7387 xorq(rdxReg, rdxReg); 7388 xorl(tmp1, tmp1); // index for x 7389 xorl(tmp4, tmp4); // index for z 7390 7391 Label L_first_loop, L_first_loop_exit; 7392 7393 testl(xlen, 1); 7394 jccb(Assembler::zero, L_first_loop); //jump if xlen is even 7395 7396 // Square and right shift by 1 the odd element using 32 bit multiply 7397 movl(raxReg, Address(x, tmp1, Address::times_4, 0)); 7398 imulq(raxReg, raxReg); 7399 shrq(raxReg, 1); 7400 adcq(tmp5, 0); 7401 movq(Address(z, tmp4, Address::times_4, 0), raxReg); 7402 incrementl(tmp1); 7403 addl(tmp4, 2); 7404 7405 // Square and right shift by 1 the rest using 64 bit multiply 7406 bind(L_first_loop); 7407 cmpptr(tmp1, xlen); 7408 jccb(Assembler::equal, L_first_loop_exit); 7409 7410 // Square 7411 movq(raxReg, Address(x, tmp1, Address::times_4, 0)); 7412 rorq(raxReg, 32); // convert big-endian to little-endian 7413 mulq(raxReg); // 64-bit multiply rax * rax -> rdx:rax 7414 7415 // Right shift by 1 and save carry 7416 shrq(tmp5, 1); // rdx:rax:tmp5 = (tmp5:rdx:rax) >>> 1 7417 rcrq(rdxReg, 1); 7418 rcrq(raxReg, 1); 7419 adcq(tmp5, 0); 7420 7421 // Store result in z 7422 movq(Address(z, tmp4, Address::times_4, 0), rdxReg); 7423 movq(Address(z, tmp4, Address::times_4, 8), raxReg); 7424 7425 // Update indices for x and z 7426 addl(tmp1, 2); 7427 addl(tmp4, 4); 7428 jmp(L_first_loop); 7429 7430 bind(L_first_loop_exit); 7431 } 7432 7433 7434 /** 7435 * Perform the following multiply add operation using BMI2 instructions 7436 * carry:sum = sum + op1*op2 + carry 7437 * op2 should be in rdx 7438 * op2 is preserved, all other registers are modified 7439 */ 7440 void MacroAssembler::multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, Register tmp2) { 7441 // assert op2 is rdx 7442 mulxq(tmp2, op1, op1); // op1 * op2 -> tmp2:op1 7443 addq(sum, carry); 7444 adcq(tmp2, 0); 7445 addq(sum, op1); 7446 adcq(tmp2, 0); 7447 movq(carry, tmp2); 7448 } 7449 7450 /** 7451 * Perform the following multiply add operation: 7452 * carry:sum = sum + op1*op2 + carry 7453 * Preserves op1, op2 and modifies rest of registers 7454 */ 7455 void MacroAssembler::multiply_add_64(Register sum, Register op1, Register op2, Register carry, Register rdxReg, Register raxReg) { 7456 // rdx:rax = op1 * op2 7457 movq(raxReg, op2); 7458 mulq(op1); 7459 7460 // rdx:rax = sum + carry + rdx:rax 7461 addq(sum, carry); 7462 adcq(rdxReg, 0); 7463 addq(sum, raxReg); 7464 adcq(rdxReg, 0); 7465 7466 // carry:sum = rdx:sum 7467 movq(carry, rdxReg); 7468 } 7469 7470 /** 7471 * Add 64 bit long carry into z[] with carry propagation. 7472 * Preserves z and carry register values and modifies rest of registers. 7473 * 7474 */ 7475 void MacroAssembler::add_one_64(Register z, Register zlen, Register carry, Register tmp1) { 7476 Label L_fourth_loop, L_fourth_loop_exit; 7477 7478 movl(tmp1, 1); 7479 subl(zlen, 2); 7480 addq(Address(z, zlen, Address::times_4, 0), carry); 7481 7482 bind(L_fourth_loop); 7483 jccb(Assembler::carryClear, L_fourth_loop_exit); 7484 subl(zlen, 2); 7485 jccb(Assembler::negative, L_fourth_loop_exit); 7486 addq(Address(z, zlen, Address::times_4, 0), tmp1); 7487 jmp(L_fourth_loop); 7488 bind(L_fourth_loop_exit); 7489 } 7490 7491 /** 7492 * Shift z[] left by 1 bit. 7493 * Preserves x, len, z and zlen registers and modifies rest of the registers. 7494 * 7495 */ 7496 void MacroAssembler::lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4) { 7497 7498 Label L_fifth_loop, L_fifth_loop_exit; 7499 7500 // Fifth loop 7501 // Perform primitiveLeftShift(z, zlen, 1) 7502 7503 const Register prev_carry = tmp1; 7504 const Register new_carry = tmp4; 7505 const Register value = tmp2; 7506 const Register zidx = tmp3; 7507 7508 // int zidx, carry; 7509 // long value; 7510 // carry = 0; 7511 // for (zidx = zlen-2; zidx >=0; zidx -= 2) { 7512 // (carry:value) = (z[i] << 1) | carry ; 7513 // z[i] = value; 7514 // } 7515 7516 movl(zidx, zlen); 7517 xorl(prev_carry, prev_carry); // clear carry flag and prev_carry register 7518 7519 bind(L_fifth_loop); 7520 decl(zidx); // Use decl to preserve carry flag 7521 decl(zidx); 7522 jccb(Assembler::negative, L_fifth_loop_exit); 7523 7524 if (UseBMI2Instructions) { 7525 movq(value, Address(z, zidx, Address::times_4, 0)); 7526 rclq(value, 1); 7527 rorxq(value, value, 32); 7528 movq(Address(z, zidx, Address::times_4, 0), value); // Store back in big endian form 7529 } 7530 else { 7531 // clear new_carry 7532 xorl(new_carry, new_carry); 7533 7534 // Shift z[i] by 1, or in previous carry and save new carry 7535 movq(value, Address(z, zidx, Address::times_4, 0)); 7536 shlq(value, 1); 7537 adcl(new_carry, 0); 7538 7539 orq(value, prev_carry); 7540 rorq(value, 0x20); 7541 movq(Address(z, zidx, Address::times_4, 0), value); // Store back in big endian form 7542 7543 // Set previous carry = new carry 7544 movl(prev_carry, new_carry); 7545 } 7546 jmp(L_fifth_loop); 7547 7548 bind(L_fifth_loop_exit); 7549 } 7550 7551 7552 /** 7553 * Code for BigInteger::squareToLen() intrinsic 7554 * 7555 * rdi: x 7556 * rsi: len 7557 * r8: z 7558 * rcx: zlen 7559 * r12: tmp1 7560 * r13: tmp2 7561 * r14: tmp3 7562 * r15: tmp4 7563 * rbx: tmp5 7564 * 7565 */ 7566 void MacroAssembler::square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 7567 7568 Label L_second_loop, L_second_loop_exit, L_third_loop, L_third_loop_exit, L_last_x, L_multiply; 7569 push(tmp1); 7570 push(tmp2); 7571 push(tmp3); 7572 push(tmp4); 7573 push(tmp5); 7574 7575 // First loop 7576 // Store the squares, right shifted one bit (i.e., divided by 2). 7577 square_rshift(x, len, z, tmp1, tmp3, tmp4, tmp5, rdxReg, raxReg); 7578 7579 // Add in off-diagonal sums. 7580 // 7581 // Second, third (nested) and fourth loops. 7582 // zlen +=2; 7583 // for (int xidx=len-2,zidx=zlen-4; xidx > 0; xidx-=2,zidx-=4) { 7584 // carry = 0; 7585 // long op2 = x[xidx:xidx+1]; 7586 // for (int j=xidx-2,k=zidx; j >= 0; j-=2) { 7587 // k -= 2; 7588 // long op1 = x[j:j+1]; 7589 // long sum = z[k:k+1]; 7590 // carry:sum = multiply_add_64(sum, op1, op2, carry, tmp_regs); 7591 // z[k:k+1] = sum; 7592 // } 7593 // add_one_64(z, k, carry, tmp_regs); 7594 // } 7595 7596 const Register carry = tmp5; 7597 const Register sum = tmp3; 7598 const Register op1 = tmp4; 7599 Register op2 = tmp2; 7600 7601 push(zlen); 7602 push(len); 7603 addl(zlen,2); 7604 bind(L_second_loop); 7605 xorq(carry, carry); 7606 subl(zlen, 4); 7607 subl(len, 2); 7608 push(zlen); 7609 push(len); 7610 cmpl(len, 0); 7611 jccb(Assembler::lessEqual, L_second_loop_exit); 7612 7613 // Multiply an array by one 64 bit long. 7614 if (UseBMI2Instructions) { 7615 op2 = rdxReg; 7616 movq(op2, Address(x, len, Address::times_4, 0)); 7617 rorxq(op2, op2, 32); 7618 } 7619 else { 7620 movq(op2, Address(x, len, Address::times_4, 0)); 7621 rorq(op2, 32); 7622 } 7623 7624 bind(L_third_loop); 7625 decrementl(len); 7626 jccb(Assembler::negative, L_third_loop_exit); 7627 decrementl(len); 7628 jccb(Assembler::negative, L_last_x); 7629 7630 movq(op1, Address(x, len, Address::times_4, 0)); 7631 rorq(op1, 32); 7632 7633 bind(L_multiply); 7634 subl(zlen, 2); 7635 movq(sum, Address(z, zlen, Address::times_4, 0)); 7636 7637 // Multiply 64 bit by 64 bit and add 64 bits lower half and upper 64 bits as carry. 7638 if (UseBMI2Instructions) { 7639 multiply_add_64_bmi2(sum, op1, op2, carry, tmp2); 7640 } 7641 else { 7642 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 7643 } 7644 7645 movq(Address(z, zlen, Address::times_4, 0), sum); 7646 7647 jmp(L_third_loop); 7648 bind(L_third_loop_exit); 7649 7650 // Fourth loop 7651 // Add 64 bit long carry into z with carry propagation. 7652 // Uses offsetted zlen. 7653 add_one_64(z, zlen, carry, tmp1); 7654 7655 pop(len); 7656 pop(zlen); 7657 jmp(L_second_loop); 7658 7659 // Next infrequent code is moved outside loops. 7660 bind(L_last_x); 7661 movl(op1, Address(x, 0)); 7662 jmp(L_multiply); 7663 7664 bind(L_second_loop_exit); 7665 pop(len); 7666 pop(zlen); 7667 pop(len); 7668 pop(zlen); 7669 7670 // Fifth loop 7671 // Shift z left 1 bit. 7672 lshift_by_1(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4); 7673 7674 // z[zlen-1] |= x[len-1] & 1; 7675 movl(tmp3, Address(x, len, Address::times_4, -4)); 7676 andl(tmp3, 1); 7677 orl(Address(z, zlen, Address::times_4, -4), tmp3); 7678 7679 pop(tmp5); 7680 pop(tmp4); 7681 pop(tmp3); 7682 pop(tmp2); 7683 pop(tmp1); 7684 } 7685 7686 /** 7687 * Helper function for mul_add() 7688 * Multiply the in[] by int k and add to out[] starting at offset offs using 7689 * 128 bit by 32 bit multiply and return the carry in tmp5. 7690 * Only quad int aligned length of in[] is operated on in this function. 7691 * k is in rdxReg for BMI2Instructions, for others it is in tmp2. 7692 * This function preserves out, in and k registers. 7693 * len and offset point to the appropriate index in "in" & "out" correspondingly 7694 * tmp5 has the carry. 7695 * other registers are temporary and are modified. 7696 * 7697 */ 7698 void MacroAssembler::mul_add_128_x_32_loop(Register out, Register in, 7699 Register offset, Register len, Register tmp1, Register tmp2, Register tmp3, 7700 Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 7701 7702 Label L_first_loop, L_first_loop_exit; 7703 7704 movl(tmp1, len); 7705 shrl(tmp1, 2); 7706 7707 bind(L_first_loop); 7708 subl(tmp1, 1); 7709 jccb(Assembler::negative, L_first_loop_exit); 7710 7711 subl(len, 4); 7712 subl(offset, 4); 7713 7714 Register op2 = tmp2; 7715 const Register sum = tmp3; 7716 const Register op1 = tmp4; 7717 const Register carry = tmp5; 7718 7719 if (UseBMI2Instructions) { 7720 op2 = rdxReg; 7721 } 7722 7723 movq(op1, Address(in, len, Address::times_4, 8)); 7724 rorq(op1, 32); 7725 movq(sum, Address(out, offset, Address::times_4, 8)); 7726 rorq(sum, 32); 7727 if (UseBMI2Instructions) { 7728 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg); 7729 } 7730 else { 7731 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 7732 } 7733 // Store back in big endian from little endian 7734 rorq(sum, 0x20); 7735 movq(Address(out, offset, Address::times_4, 8), sum); 7736 7737 movq(op1, Address(in, len, Address::times_4, 0)); 7738 rorq(op1, 32); 7739 movq(sum, Address(out, offset, Address::times_4, 0)); 7740 rorq(sum, 32); 7741 if (UseBMI2Instructions) { 7742 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg); 7743 } 7744 else { 7745 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 7746 } 7747 // Store back in big endian from little endian 7748 rorq(sum, 0x20); 7749 movq(Address(out, offset, Address::times_4, 0), sum); 7750 7751 jmp(L_first_loop); 7752 bind(L_first_loop_exit); 7753 } 7754 7755 /** 7756 * Code for BigInteger::mulAdd() intrinsic 7757 * 7758 * rdi: out 7759 * rsi: in 7760 * r11: offs (out.length - offset) 7761 * rcx: len 7762 * r8: k 7763 * r12: tmp1 7764 * r13: tmp2 7765 * r14: tmp3 7766 * r15: tmp4 7767 * rbx: tmp5 7768 * Multiply the in[] by word k and add to out[], return the carry in rax 7769 */ 7770 void MacroAssembler::mul_add(Register out, Register in, Register offs, 7771 Register len, Register k, Register tmp1, Register tmp2, Register tmp3, 7772 Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 7773 7774 Label L_carry, L_last_in, L_done; 7775 7776 // carry = 0; 7777 // for (int j=len-1; j >= 0; j--) { 7778 // long product = (in[j] & LONG_MASK) * kLong + 7779 // (out[offs] & LONG_MASK) + carry; 7780 // out[offs--] = (int)product; 7781 // carry = product >>> 32; 7782 // } 7783 // 7784 push(tmp1); 7785 push(tmp2); 7786 push(tmp3); 7787 push(tmp4); 7788 push(tmp5); 7789 7790 Register op2 = tmp2; 7791 const Register sum = tmp3; 7792 const Register op1 = tmp4; 7793 const Register carry = tmp5; 7794 7795 if (UseBMI2Instructions) { 7796 op2 = rdxReg; 7797 movl(op2, k); 7798 } 7799 else { 7800 movl(op2, k); 7801 } 7802 7803 xorq(carry, carry); 7804 7805 //First loop 7806 7807 //Multiply in[] by k in a 4 way unrolled loop using 128 bit by 32 bit multiply 7808 //The carry is in tmp5 7809 mul_add_128_x_32_loop(out, in, offs, len, tmp1, tmp2, tmp3, tmp4, tmp5, rdxReg, raxReg); 7810 7811 //Multiply the trailing in[] entry using 64 bit by 32 bit, if any 7812 decrementl(len); 7813 jccb(Assembler::negative, L_carry); 7814 decrementl(len); 7815 jccb(Assembler::negative, L_last_in); 7816 7817 movq(op1, Address(in, len, Address::times_4, 0)); 7818 rorq(op1, 32); 7819 7820 subl(offs, 2); 7821 movq(sum, Address(out, offs, Address::times_4, 0)); 7822 rorq(sum, 32); 7823 7824 if (UseBMI2Instructions) { 7825 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg); 7826 } 7827 else { 7828 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 7829 } 7830 7831 // Store back in big endian from little endian 7832 rorq(sum, 0x20); 7833 movq(Address(out, offs, Address::times_4, 0), sum); 7834 7835 testl(len, len); 7836 jccb(Assembler::zero, L_carry); 7837 7838 //Multiply the last in[] entry, if any 7839 bind(L_last_in); 7840 movl(op1, Address(in, 0)); 7841 movl(sum, Address(out, offs, Address::times_4, -4)); 7842 7843 movl(raxReg, k); 7844 mull(op1); //tmp4 * eax -> edx:eax 7845 addl(sum, carry); 7846 adcl(rdxReg, 0); 7847 addl(sum, raxReg); 7848 adcl(rdxReg, 0); 7849 movl(carry, rdxReg); 7850 7851 movl(Address(out, offs, Address::times_4, -4), sum); 7852 7853 bind(L_carry); 7854 //return tmp5/carry as carry in rax 7855 movl(rax, carry); 7856 7857 bind(L_done); 7858 pop(tmp5); 7859 pop(tmp4); 7860 pop(tmp3); 7861 pop(tmp2); 7862 pop(tmp1); 7863 } 7864 #endif 7865 7866 /** 7867 * Emits code to update CRC-32 with a byte value according to constants in table 7868 * 7869 * @param [in,out]crc Register containing the crc. 7870 * @param [in]val Register containing the byte to fold into the CRC. 7871 * @param [in]table Register containing the table of crc constants. 7872 * 7873 * uint32_t crc; 7874 * val = crc_table[(val ^ crc) & 0xFF]; 7875 * crc = val ^ (crc >> 8); 7876 * 7877 */ 7878 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 7879 xorl(val, crc); 7880 andl(val, 0xFF); 7881 shrl(crc, 8); // unsigned shift 7882 xorl(crc, Address(table, val, Address::times_4, 0)); 7883 } 7884 7885 /** 7886 * Fold 128-bit data chunk 7887 */ 7888 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) { 7889 if (UseAVX > 0) { 7890 vpclmulhdq(xtmp, xK, xcrc); // [123:64] 7891 vpclmulldq(xcrc, xK, xcrc); // [63:0] 7892 vpxor(xcrc, xcrc, Address(buf, offset), 0 /* vector_len */); 7893 pxor(xcrc, xtmp); 7894 } else { 7895 movdqa(xtmp, xcrc); 7896 pclmulhdq(xtmp, xK); // [123:64] 7897 pclmulldq(xcrc, xK); // [63:0] 7898 pxor(xcrc, xtmp); 7899 movdqu(xtmp, Address(buf, offset)); 7900 pxor(xcrc, xtmp); 7901 } 7902 } 7903 7904 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf) { 7905 if (UseAVX > 0) { 7906 vpclmulhdq(xtmp, xK, xcrc); 7907 vpclmulldq(xcrc, xK, xcrc); 7908 pxor(xcrc, xbuf); 7909 pxor(xcrc, xtmp); 7910 } else { 7911 movdqa(xtmp, xcrc); 7912 pclmulhdq(xtmp, xK); 7913 pclmulldq(xcrc, xK); 7914 pxor(xcrc, xbuf); 7915 pxor(xcrc, xtmp); 7916 } 7917 } 7918 7919 /** 7920 * 8-bit folds to compute 32-bit CRC 7921 * 7922 * uint64_t xcrc; 7923 * timesXtoThe32[xcrc & 0xFF] ^ (xcrc >> 8); 7924 */ 7925 void MacroAssembler::fold_8bit_crc32(XMMRegister xcrc, Register table, XMMRegister xtmp, Register tmp) { 7926 movdl(tmp, xcrc); 7927 andl(tmp, 0xFF); 7928 movdl(xtmp, Address(table, tmp, Address::times_4, 0)); 7929 psrldq(xcrc, 1); // unsigned shift one byte 7930 pxor(xcrc, xtmp); 7931 } 7932 7933 /** 7934 * uint32_t crc; 7935 * timesXtoThe32[crc & 0xFF] ^ (crc >> 8); 7936 */ 7937 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) { 7938 movl(tmp, crc); 7939 andl(tmp, 0xFF); 7940 shrl(crc, 8); 7941 xorl(crc, Address(table, tmp, Address::times_4, 0)); 7942 } 7943 7944 /** 7945 * @param crc register containing existing CRC (32-bit) 7946 * @param buf register pointing to input byte buffer (byte*) 7947 * @param len register containing number of bytes 7948 * @param table register that will contain address of CRC table 7949 * @param tmp scratch register 7950 */ 7951 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp) { 7952 assert_different_registers(crc, buf, len, table, tmp, rax); 7953 7954 Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned; 7955 Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop; 7956 7957 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 7958 // context for the registers used, where all instructions below are using 128-bit mode 7959 // On EVEX without VL and BW, these instructions will all be AVX. 7960 lea(table, ExternalAddress(StubRoutines::crc_table_addr())); 7961 notl(crc); // ~crc 7962 cmpl(len, 16); 7963 jcc(Assembler::less, L_tail); 7964 7965 // Align buffer to 16 bytes 7966 movl(tmp, buf); 7967 andl(tmp, 0xF); 7968 jccb(Assembler::zero, L_aligned); 7969 subl(tmp, 16); 7970 addl(len, tmp); 7971 7972 align(4); 7973 BIND(L_align_loop); 7974 movsbl(rax, Address(buf, 0)); // load byte with sign extension 7975 update_byte_crc32(crc, rax, table); 7976 increment(buf); 7977 incrementl(tmp); 7978 jccb(Assembler::less, L_align_loop); 7979 7980 BIND(L_aligned); 7981 movl(tmp, len); // save 7982 shrl(len, 4); 7983 jcc(Assembler::zero, L_tail_restore); 7984 7985 // Fold crc into first bytes of vector 7986 movdqa(xmm1, Address(buf, 0)); 7987 movdl(rax, xmm1); 7988 xorl(crc, rax); 7989 if (VM_Version::supports_sse4_1()) { 7990 pinsrd(xmm1, crc, 0); 7991 } else { 7992 pinsrw(xmm1, crc, 0); 7993 shrl(crc, 16); 7994 pinsrw(xmm1, crc, 1); 7995 } 7996 addptr(buf, 16); 7997 subl(len, 4); // len > 0 7998 jcc(Assembler::less, L_fold_tail); 7999 8000 movdqa(xmm2, Address(buf, 0)); 8001 movdqa(xmm3, Address(buf, 16)); 8002 movdqa(xmm4, Address(buf, 32)); 8003 addptr(buf, 48); 8004 subl(len, 3); 8005 jcc(Assembler::lessEqual, L_fold_512b); 8006 8007 // Fold total 512 bits of polynomial on each iteration, 8008 // 128 bits per each of 4 parallel streams. 8009 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 32), rscratch1); 8010 8011 align32(); 8012 BIND(L_fold_512b_loop); 8013 fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0); 8014 fold_128bit_crc32(xmm2, xmm0, xmm5, buf, 16); 8015 fold_128bit_crc32(xmm3, xmm0, xmm5, buf, 32); 8016 fold_128bit_crc32(xmm4, xmm0, xmm5, buf, 48); 8017 addptr(buf, 64); 8018 subl(len, 4); 8019 jcc(Assembler::greater, L_fold_512b_loop); 8020 8021 // Fold 512 bits to 128 bits. 8022 BIND(L_fold_512b); 8023 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16), rscratch1); 8024 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm2); 8025 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm3); 8026 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm4); 8027 8028 // Fold the rest of 128 bits data chunks 8029 BIND(L_fold_tail); 8030 addl(len, 3); 8031 jccb(Assembler::lessEqual, L_fold_128b); 8032 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16), rscratch1); 8033 8034 BIND(L_fold_tail_loop); 8035 fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0); 8036 addptr(buf, 16); 8037 decrementl(len); 8038 jccb(Assembler::greater, L_fold_tail_loop); 8039 8040 // Fold 128 bits in xmm1 down into 32 bits in crc register. 8041 BIND(L_fold_128b); 8042 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr()), rscratch1); 8043 if (UseAVX > 0) { 8044 vpclmulqdq(xmm2, xmm0, xmm1, 0x1); 8045 vpand(xmm3, xmm0, xmm2, 0 /* vector_len */); 8046 vpclmulqdq(xmm0, xmm0, xmm3, 0x1); 8047 } else { 8048 movdqa(xmm2, xmm0); 8049 pclmulqdq(xmm2, xmm1, 0x1); 8050 movdqa(xmm3, xmm0); 8051 pand(xmm3, xmm2); 8052 pclmulqdq(xmm0, xmm3, 0x1); 8053 } 8054 psrldq(xmm1, 8); 8055 psrldq(xmm2, 4); 8056 pxor(xmm0, xmm1); 8057 pxor(xmm0, xmm2); 8058 8059 // 8 8-bit folds to compute 32-bit CRC. 8060 for (int j = 0; j < 4; j++) { 8061 fold_8bit_crc32(xmm0, table, xmm1, rax); 8062 } 8063 movdl(crc, xmm0); // mov 32 bits to general register 8064 for (int j = 0; j < 4; j++) { 8065 fold_8bit_crc32(crc, table, rax); 8066 } 8067 8068 BIND(L_tail_restore); 8069 movl(len, tmp); // restore 8070 BIND(L_tail); 8071 andl(len, 0xf); 8072 jccb(Assembler::zero, L_exit); 8073 8074 // Fold the rest of bytes 8075 align(4); 8076 BIND(L_tail_loop); 8077 movsbl(rax, Address(buf, 0)); // load byte with sign extension 8078 update_byte_crc32(crc, rax, table); 8079 increment(buf); 8080 decrementl(len); 8081 jccb(Assembler::greater, L_tail_loop); 8082 8083 BIND(L_exit); 8084 notl(crc); // ~c 8085 } 8086 8087 #ifdef _LP64 8088 // Helper function for AVX 512 CRC32 8089 // Fold 512-bit data chunks 8090 void MacroAssembler::fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, 8091 Register pos, int offset) { 8092 evmovdquq(xmm3, Address(buf, pos, Address::times_1, offset), Assembler::AVX_512bit); 8093 evpclmulqdq(xtmp, xcrc, xK, 0x10, Assembler::AVX_512bit); // [123:64] 8094 evpclmulqdq(xmm2, xcrc, xK, 0x01, Assembler::AVX_512bit); // [63:0] 8095 evpxorq(xcrc, xtmp, xmm2, Assembler::AVX_512bit /* vector_len */); 8096 evpxorq(xcrc, xcrc, xmm3, Assembler::AVX_512bit /* vector_len */); 8097 } 8098 8099 // Helper function for AVX 512 CRC32 8100 // Compute CRC32 for < 256B buffers 8101 void MacroAssembler::kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register table, Register pos, 8102 Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop, 8103 Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup) { 8104 8105 Label L_less_than_32, L_exact_16_left, L_less_than_16_left; 8106 Label L_less_than_8_left, L_less_than_4_left, L_less_than_2_left, L_zero_left; 8107 Label L_only_less_than_4, L_only_less_than_3, L_only_less_than_2; 8108 8109 // check if there is enough buffer to be able to fold 16B at a time 8110 cmpl(len, 32); 8111 jcc(Assembler::less, L_less_than_32); 8112 8113 // if there is, load the constants 8114 movdqu(xmm10, Address(table, 1 * 16)); //rk1 and rk2 in xmm10 8115 movdl(xmm0, crc); // get the initial crc value 8116 movdqu(xmm7, Address(buf, pos, Address::times_1, 0 * 16)); //load the plaintext 8117 pxor(xmm7, xmm0); 8118 8119 // update the buffer pointer 8120 addl(pos, 16); 8121 //update the counter.subtract 32 instead of 16 to save one instruction from the loop 8122 subl(len, 32); 8123 jmp(L_16B_reduction_loop); 8124 8125 bind(L_less_than_32); 8126 //mov initial crc to the return value. this is necessary for zero - length buffers. 8127 movl(rax, crc); 8128 testl(len, len); 8129 jcc(Assembler::equal, L_cleanup); 8130 8131 movdl(xmm0, crc); //get the initial crc value 8132 8133 cmpl(len, 16); 8134 jcc(Assembler::equal, L_exact_16_left); 8135 jcc(Assembler::less, L_less_than_16_left); 8136 8137 movdqu(xmm7, Address(buf, pos, Address::times_1, 0 * 16)); //load the plaintext 8138 pxor(xmm7, xmm0); //xor the initial crc value 8139 addl(pos, 16); 8140 subl(len, 16); 8141 movdqu(xmm10, Address(table, 1 * 16)); // rk1 and rk2 in xmm10 8142 jmp(L_get_last_two_xmms); 8143 8144 bind(L_less_than_16_left); 8145 //use stack space to load data less than 16 bytes, zero - out the 16B in memory first. 8146 pxor(xmm1, xmm1); 8147 movptr(tmp1, rsp); 8148 movdqu(Address(tmp1, 0 * 16), xmm1); 8149 8150 cmpl(len, 4); 8151 jcc(Assembler::less, L_only_less_than_4); 8152 8153 //backup the counter value 8154 movl(tmp2, len); 8155 cmpl(len, 8); 8156 jcc(Assembler::less, L_less_than_8_left); 8157 8158 //load 8 Bytes 8159 movq(rax, Address(buf, pos, Address::times_1, 0 * 16)); 8160 movq(Address(tmp1, 0 * 16), rax); 8161 addptr(tmp1, 8); 8162 subl(len, 8); 8163 addl(pos, 8); 8164 8165 bind(L_less_than_8_left); 8166 cmpl(len, 4); 8167 jcc(Assembler::less, L_less_than_4_left); 8168 8169 //load 4 Bytes 8170 movl(rax, Address(buf, pos, Address::times_1, 0)); 8171 movl(Address(tmp1, 0 * 16), rax); 8172 addptr(tmp1, 4); 8173 subl(len, 4); 8174 addl(pos, 4); 8175 8176 bind(L_less_than_4_left); 8177 cmpl(len, 2); 8178 jcc(Assembler::less, L_less_than_2_left); 8179 8180 // load 2 Bytes 8181 movw(rax, Address(buf, pos, Address::times_1, 0)); 8182 movl(Address(tmp1, 0 * 16), rax); 8183 addptr(tmp1, 2); 8184 subl(len, 2); 8185 addl(pos, 2); 8186 8187 bind(L_less_than_2_left); 8188 cmpl(len, 1); 8189 jcc(Assembler::less, L_zero_left); 8190 8191 // load 1 Byte 8192 movb(rax, Address(buf, pos, Address::times_1, 0)); 8193 movb(Address(tmp1, 0 * 16), rax); 8194 8195 bind(L_zero_left); 8196 movdqu(xmm7, Address(rsp, 0)); 8197 pxor(xmm7, xmm0); //xor the initial crc value 8198 8199 lea(rax, ExternalAddress(StubRoutines::x86::shuf_table_crc32_avx512_addr())); 8200 movdqu(xmm0, Address(rax, tmp2)); 8201 pshufb(xmm7, xmm0); 8202 jmp(L_128_done); 8203 8204 bind(L_exact_16_left); 8205 movdqu(xmm7, Address(buf, pos, Address::times_1, 0)); 8206 pxor(xmm7, xmm0); //xor the initial crc value 8207 jmp(L_128_done); 8208 8209 bind(L_only_less_than_4); 8210 cmpl(len, 3); 8211 jcc(Assembler::less, L_only_less_than_3); 8212 8213 // load 3 Bytes 8214 movb(rax, Address(buf, pos, Address::times_1, 0)); 8215 movb(Address(tmp1, 0), rax); 8216 8217 movb(rax, Address(buf, pos, Address::times_1, 1)); 8218 movb(Address(tmp1, 1), rax); 8219 8220 movb(rax, Address(buf, pos, Address::times_1, 2)); 8221 movb(Address(tmp1, 2), rax); 8222 8223 movdqu(xmm7, Address(rsp, 0)); 8224 pxor(xmm7, xmm0); //xor the initial crc value 8225 8226 pslldq(xmm7, 0x5); 8227 jmp(L_barrett); 8228 bind(L_only_less_than_3); 8229 cmpl(len, 2); 8230 jcc(Assembler::less, L_only_less_than_2); 8231 8232 // load 2 Bytes 8233 movb(rax, Address(buf, pos, Address::times_1, 0)); 8234 movb(Address(tmp1, 0), rax); 8235 8236 movb(rax, Address(buf, pos, Address::times_1, 1)); 8237 movb(Address(tmp1, 1), rax); 8238 8239 movdqu(xmm7, Address(rsp, 0)); 8240 pxor(xmm7, xmm0); //xor the initial crc value 8241 8242 pslldq(xmm7, 0x6); 8243 jmp(L_barrett); 8244 8245 bind(L_only_less_than_2); 8246 //load 1 Byte 8247 movb(rax, Address(buf, pos, Address::times_1, 0)); 8248 movb(Address(tmp1, 0), rax); 8249 8250 movdqu(xmm7, Address(rsp, 0)); 8251 pxor(xmm7, xmm0); //xor the initial crc value 8252 8253 pslldq(xmm7, 0x7); 8254 } 8255 8256 /** 8257 * Compute CRC32 using AVX512 instructions 8258 * param crc register containing existing CRC (32-bit) 8259 * param buf register pointing to input byte buffer (byte*) 8260 * param len register containing number of bytes 8261 * param table address of crc or crc32c table 8262 * param tmp1 scratch register 8263 * param tmp2 scratch register 8264 * return rax result register 8265 * 8266 * This routine is identical for crc32c with the exception of the precomputed constant 8267 * table which will be passed as the table argument. The calculation steps are 8268 * the same for both variants. 8269 */ 8270 void MacroAssembler::kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2) { 8271 assert_different_registers(crc, buf, len, table, tmp1, tmp2, rax, r12); 8272 8273 Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned; 8274 Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop; 8275 Label L_less_than_256, L_fold_128_B_loop, L_fold_256_B_loop; 8276 Label L_fold_128_B_register, L_final_reduction_for_128, L_16B_reduction_loop; 8277 Label L_128_done, L_get_last_two_xmms, L_barrett, L_cleanup; 8278 8279 const Register pos = r12; 8280 push(r12); 8281 subptr(rsp, 16 * 2 + 8); 8282 8283 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 8284 // context for the registers used, where all instructions below are using 128-bit mode 8285 // On EVEX without VL and BW, these instructions will all be AVX. 8286 movl(pos, 0); 8287 8288 // check if smaller than 256B 8289 cmpl(len, 256); 8290 jcc(Assembler::less, L_less_than_256); 8291 8292 // load the initial crc value 8293 movdl(xmm10, crc); 8294 8295 // receive the initial 64B data, xor the initial crc value 8296 evmovdquq(xmm0, Address(buf, pos, Address::times_1, 0 * 64), Assembler::AVX_512bit); 8297 evmovdquq(xmm4, Address(buf, pos, Address::times_1, 1 * 64), Assembler::AVX_512bit); 8298 evpxorq(xmm0, xmm0, xmm10, Assembler::AVX_512bit); 8299 evbroadcasti32x4(xmm10, Address(table, 2 * 16), Assembler::AVX_512bit); //zmm10 has rk3 and rk4 8300 8301 subl(len, 256); 8302 cmpl(len, 256); 8303 jcc(Assembler::less, L_fold_128_B_loop); 8304 8305 evmovdquq(xmm7, Address(buf, pos, Address::times_1, 2 * 64), Assembler::AVX_512bit); 8306 evmovdquq(xmm8, Address(buf, pos, Address::times_1, 3 * 64), Assembler::AVX_512bit); 8307 evbroadcasti32x4(xmm16, Address(table, 0 * 16), Assembler::AVX_512bit); //zmm16 has rk-1 and rk-2 8308 subl(len, 256); 8309 8310 bind(L_fold_256_B_loop); 8311 addl(pos, 256); 8312 fold512bit_crc32_avx512(xmm0, xmm16, xmm1, buf, pos, 0 * 64); 8313 fold512bit_crc32_avx512(xmm4, xmm16, xmm1, buf, pos, 1 * 64); 8314 fold512bit_crc32_avx512(xmm7, xmm16, xmm1, buf, pos, 2 * 64); 8315 fold512bit_crc32_avx512(xmm8, xmm16, xmm1, buf, pos, 3 * 64); 8316 8317 subl(len, 256); 8318 jcc(Assembler::greaterEqual, L_fold_256_B_loop); 8319 8320 // Fold 256 into 128 8321 addl(pos, 256); 8322 evpclmulqdq(xmm1, xmm0, xmm10, 0x01, Assembler::AVX_512bit); 8323 evpclmulqdq(xmm2, xmm0, xmm10, 0x10, Assembler::AVX_512bit); 8324 vpternlogq(xmm7, 0x96, xmm1, xmm2, Assembler::AVX_512bit); // xor ABC 8325 8326 evpclmulqdq(xmm5, xmm4, xmm10, 0x01, Assembler::AVX_512bit); 8327 evpclmulqdq(xmm6, xmm4, xmm10, 0x10, Assembler::AVX_512bit); 8328 vpternlogq(xmm8, 0x96, xmm5, xmm6, Assembler::AVX_512bit); // xor ABC 8329 8330 evmovdquq(xmm0, xmm7, Assembler::AVX_512bit); 8331 evmovdquq(xmm4, xmm8, Assembler::AVX_512bit); 8332 8333 addl(len, 128); 8334 jmp(L_fold_128_B_register); 8335 8336 // at this section of the code, there is 128 * x + y(0 <= y<128) bytes of buffer.The fold_128_B_loop 8337 // loop will fold 128B at a time until we have 128 + y Bytes of buffer 8338 8339 // fold 128B at a time.This section of the code folds 8 xmm registers in parallel 8340 bind(L_fold_128_B_loop); 8341 addl(pos, 128); 8342 fold512bit_crc32_avx512(xmm0, xmm10, xmm1, buf, pos, 0 * 64); 8343 fold512bit_crc32_avx512(xmm4, xmm10, xmm1, buf, pos, 1 * 64); 8344 8345 subl(len, 128); 8346 jcc(Assembler::greaterEqual, L_fold_128_B_loop); 8347 8348 addl(pos, 128); 8349 8350 // at this point, the buffer pointer is pointing at the last y Bytes of the buffer, where 0 <= y < 128 8351 // the 128B of folded data is in 8 of the xmm registers : xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7 8352 bind(L_fold_128_B_register); 8353 evmovdquq(xmm16, Address(table, 5 * 16), Assembler::AVX_512bit); // multiply by rk9-rk16 8354 evmovdquq(xmm11, Address(table, 9 * 16), Assembler::AVX_512bit); // multiply by rk17-rk20, rk1,rk2, 0,0 8355 evpclmulqdq(xmm1, xmm0, xmm16, 0x01, Assembler::AVX_512bit); 8356 evpclmulqdq(xmm2, xmm0, xmm16, 0x10, Assembler::AVX_512bit); 8357 // save last that has no multiplicand 8358 vextracti64x2(xmm7, xmm4, 3); 8359 8360 evpclmulqdq(xmm5, xmm4, xmm11, 0x01, Assembler::AVX_512bit); 8361 evpclmulqdq(xmm6, xmm4, xmm11, 0x10, Assembler::AVX_512bit); 8362 // Needed later in reduction loop 8363 movdqu(xmm10, Address(table, 1 * 16)); 8364 vpternlogq(xmm1, 0x96, xmm2, xmm5, Assembler::AVX_512bit); // xor ABC 8365 vpternlogq(xmm1, 0x96, xmm6, xmm7, Assembler::AVX_512bit); // xor ABC 8366 8367 // Swap 1,0,3,2 - 01 00 11 10 8368 evshufi64x2(xmm8, xmm1, xmm1, 0x4e, Assembler::AVX_512bit); 8369 evpxorq(xmm8, xmm8, xmm1, Assembler::AVX_256bit); 8370 vextracti128(xmm5, xmm8, 1); 8371 evpxorq(xmm7, xmm5, xmm8, Assembler::AVX_128bit); 8372 8373 // instead of 128, we add 128 - 16 to the loop counter to save 1 instruction from the loop 8374 // instead of a cmp instruction, we use the negative flag with the jl instruction 8375 addl(len, 128 - 16); 8376 jcc(Assembler::less, L_final_reduction_for_128); 8377 8378 bind(L_16B_reduction_loop); 8379 vpclmulqdq(xmm8, xmm7, xmm10, 0x01); 8380 vpclmulqdq(xmm7, xmm7, xmm10, 0x10); 8381 vpxor(xmm7, xmm7, xmm8, Assembler::AVX_128bit); 8382 movdqu(xmm0, Address(buf, pos, Address::times_1, 0 * 16)); 8383 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 8384 addl(pos, 16); 8385 subl(len, 16); 8386 jcc(Assembler::greaterEqual, L_16B_reduction_loop); 8387 8388 bind(L_final_reduction_for_128); 8389 addl(len, 16); 8390 jcc(Assembler::equal, L_128_done); 8391 8392 bind(L_get_last_two_xmms); 8393 movdqu(xmm2, xmm7); 8394 addl(pos, len); 8395 movdqu(xmm1, Address(buf, pos, Address::times_1, -16)); 8396 subl(pos, len); 8397 8398 // get rid of the extra data that was loaded before 8399 // load the shift constant 8400 lea(rax, ExternalAddress(StubRoutines::x86::shuf_table_crc32_avx512_addr())); 8401 movdqu(xmm0, Address(rax, len)); 8402 addl(rax, len); 8403 8404 vpshufb(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 8405 //Change mask to 512 8406 vpxor(xmm0, xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr() + 2 * 16), Assembler::AVX_128bit, tmp2); 8407 vpshufb(xmm2, xmm2, xmm0, Assembler::AVX_128bit); 8408 8409 blendvpb(xmm2, xmm2, xmm1, xmm0, Assembler::AVX_128bit); 8410 vpclmulqdq(xmm8, xmm7, xmm10, 0x01); 8411 vpclmulqdq(xmm7, xmm7, xmm10, 0x10); 8412 vpxor(xmm7, xmm7, xmm8, Assembler::AVX_128bit); 8413 vpxor(xmm7, xmm7, xmm2, Assembler::AVX_128bit); 8414 8415 bind(L_128_done); 8416 // compute crc of a 128-bit value 8417 movdqu(xmm10, Address(table, 3 * 16)); 8418 movdqu(xmm0, xmm7); 8419 8420 // 64b fold 8421 vpclmulqdq(xmm7, xmm7, xmm10, 0x0); 8422 vpsrldq(xmm0, xmm0, 0x8, Assembler::AVX_128bit); 8423 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 8424 8425 // 32b fold 8426 movdqu(xmm0, xmm7); 8427 vpslldq(xmm7, xmm7, 0x4, Assembler::AVX_128bit); 8428 vpclmulqdq(xmm7, xmm7, xmm10, 0x10); 8429 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 8430 jmp(L_barrett); 8431 8432 bind(L_less_than_256); 8433 kernel_crc32_avx512_256B(crc, buf, len, table, pos, tmp1, tmp2, L_barrett, L_16B_reduction_loop, L_get_last_two_xmms, L_128_done, L_cleanup); 8434 8435 //barrett reduction 8436 bind(L_barrett); 8437 vpand(xmm7, xmm7, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr() + 1 * 16), Assembler::AVX_128bit, tmp2); 8438 movdqu(xmm1, xmm7); 8439 movdqu(xmm2, xmm7); 8440 movdqu(xmm10, Address(table, 4 * 16)); 8441 8442 pclmulqdq(xmm7, xmm10, 0x0); 8443 pxor(xmm7, xmm2); 8444 vpand(xmm7, xmm7, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr()), Assembler::AVX_128bit, tmp2); 8445 movdqu(xmm2, xmm7); 8446 pclmulqdq(xmm7, xmm10, 0x10); 8447 pxor(xmm7, xmm2); 8448 pxor(xmm7, xmm1); 8449 pextrd(crc, xmm7, 2); 8450 8451 bind(L_cleanup); 8452 addptr(rsp, 16 * 2 + 8); 8453 pop(r12); 8454 } 8455 8456 // S. Gueron / Information Processing Letters 112 (2012) 184 8457 // Algorithm 4: Computing carry-less multiplication using a precomputed lookup table. 8458 // Input: A 32 bit value B = [byte3, byte2, byte1, byte0]. 8459 // Output: the 64-bit carry-less product of B * CONST 8460 void MacroAssembler::crc32c_ipl_alg4(Register in, uint32_t n, 8461 Register tmp1, Register tmp2, Register tmp3) { 8462 lea(tmp3, ExternalAddress(StubRoutines::crc32c_table_addr())); 8463 if (n > 0) { 8464 addq(tmp3, n * 256 * 8); 8465 } 8466 // Q1 = TABLEExt[n][B & 0xFF]; 8467 movl(tmp1, in); 8468 andl(tmp1, 0x000000FF); 8469 shll(tmp1, 3); 8470 addq(tmp1, tmp3); 8471 movq(tmp1, Address(tmp1, 0)); 8472 8473 // Q2 = TABLEExt[n][B >> 8 & 0xFF]; 8474 movl(tmp2, in); 8475 shrl(tmp2, 8); 8476 andl(tmp2, 0x000000FF); 8477 shll(tmp2, 3); 8478 addq(tmp2, tmp3); 8479 movq(tmp2, Address(tmp2, 0)); 8480 8481 shlq(tmp2, 8); 8482 xorq(tmp1, tmp2); 8483 8484 // Q3 = TABLEExt[n][B >> 16 & 0xFF]; 8485 movl(tmp2, in); 8486 shrl(tmp2, 16); 8487 andl(tmp2, 0x000000FF); 8488 shll(tmp2, 3); 8489 addq(tmp2, tmp3); 8490 movq(tmp2, Address(tmp2, 0)); 8491 8492 shlq(tmp2, 16); 8493 xorq(tmp1, tmp2); 8494 8495 // Q4 = TABLEExt[n][B >> 24 & 0xFF]; 8496 shrl(in, 24); 8497 andl(in, 0x000000FF); 8498 shll(in, 3); 8499 addq(in, tmp3); 8500 movq(in, Address(in, 0)); 8501 8502 shlq(in, 24); 8503 xorq(in, tmp1); 8504 // return Q1 ^ Q2 << 8 ^ Q3 << 16 ^ Q4 << 24; 8505 } 8506 8507 void MacroAssembler::crc32c_pclmulqdq(XMMRegister w_xtmp1, 8508 Register in_out, 8509 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, 8510 XMMRegister w_xtmp2, 8511 Register tmp1, 8512 Register n_tmp2, Register n_tmp3) { 8513 if (is_pclmulqdq_supported) { 8514 movdl(w_xtmp1, in_out); // modified blindly 8515 8516 movl(tmp1, const_or_pre_comp_const_index); 8517 movdl(w_xtmp2, tmp1); 8518 pclmulqdq(w_xtmp1, w_xtmp2, 0); 8519 8520 movdq(in_out, w_xtmp1); 8521 } else { 8522 crc32c_ipl_alg4(in_out, const_or_pre_comp_const_index, tmp1, n_tmp2, n_tmp3); 8523 } 8524 } 8525 8526 // Recombination Alternative 2: No bit-reflections 8527 // T1 = (CRC_A * U1) << 1 8528 // T2 = (CRC_B * U2) << 1 8529 // C1 = T1 >> 32 8530 // C2 = T2 >> 32 8531 // T1 = T1 & 0xFFFFFFFF 8532 // T2 = T2 & 0xFFFFFFFF 8533 // T1 = CRC32(0, T1) 8534 // T2 = CRC32(0, T2) 8535 // C1 = C1 ^ T1 8536 // C2 = C2 ^ T2 8537 // CRC = C1 ^ C2 ^ CRC_C 8538 void MacroAssembler::crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, 8539 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 8540 Register tmp1, Register tmp2, 8541 Register n_tmp3) { 8542 crc32c_pclmulqdq(w_xtmp1, in_out, const_or_pre_comp_const_index_u1, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 8543 crc32c_pclmulqdq(w_xtmp2, in1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 8544 shlq(in_out, 1); 8545 movl(tmp1, in_out); 8546 shrq(in_out, 32); 8547 xorl(tmp2, tmp2); 8548 crc32(tmp2, tmp1, 4); 8549 xorl(in_out, tmp2); // we don't care about upper 32 bit contents here 8550 shlq(in1, 1); 8551 movl(tmp1, in1); 8552 shrq(in1, 32); 8553 xorl(tmp2, tmp2); 8554 crc32(tmp2, tmp1, 4); 8555 xorl(in1, tmp2); 8556 xorl(in_out, in1); 8557 xorl(in_out, in2); 8558 } 8559 8560 // Set N to predefined value 8561 // Subtract from a length of a buffer 8562 // execute in a loop: 8563 // CRC_A = 0xFFFFFFFF, CRC_B = 0, CRC_C = 0 8564 // for i = 1 to N do 8565 // CRC_A = CRC32(CRC_A, A[i]) 8566 // CRC_B = CRC32(CRC_B, B[i]) 8567 // CRC_C = CRC32(CRC_C, C[i]) 8568 // end for 8569 // Recombine 8570 void MacroAssembler::crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, 8571 Register in_out1, Register in_out2, Register in_out3, 8572 Register tmp1, Register tmp2, Register tmp3, 8573 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 8574 Register tmp4, Register tmp5, 8575 Register n_tmp6) { 8576 Label L_processPartitions; 8577 Label L_processPartition; 8578 Label L_exit; 8579 8580 bind(L_processPartitions); 8581 cmpl(in_out1, 3 * size); 8582 jcc(Assembler::less, L_exit); 8583 xorl(tmp1, tmp1); 8584 xorl(tmp2, tmp2); 8585 movq(tmp3, in_out2); 8586 addq(tmp3, size); 8587 8588 bind(L_processPartition); 8589 crc32(in_out3, Address(in_out2, 0), 8); 8590 crc32(tmp1, Address(in_out2, size), 8); 8591 crc32(tmp2, Address(in_out2, size * 2), 8); 8592 addq(in_out2, 8); 8593 cmpq(in_out2, tmp3); 8594 jcc(Assembler::less, L_processPartition); 8595 crc32c_rec_alt2(const_or_pre_comp_const_index_u1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, in_out3, tmp1, tmp2, 8596 w_xtmp1, w_xtmp2, w_xtmp3, 8597 tmp4, tmp5, 8598 n_tmp6); 8599 addq(in_out2, 2 * size); 8600 subl(in_out1, 3 * size); 8601 jmp(L_processPartitions); 8602 8603 bind(L_exit); 8604 } 8605 #else 8606 void MacroAssembler::crc32c_ipl_alg4(Register in_out, uint32_t n, 8607 Register tmp1, Register tmp2, Register tmp3, 8608 XMMRegister xtmp1, XMMRegister xtmp2) { 8609 lea(tmp3, ExternalAddress(StubRoutines::crc32c_table_addr())); 8610 if (n > 0) { 8611 addl(tmp3, n * 256 * 8); 8612 } 8613 // Q1 = TABLEExt[n][B & 0xFF]; 8614 movl(tmp1, in_out); 8615 andl(tmp1, 0x000000FF); 8616 shll(tmp1, 3); 8617 addl(tmp1, tmp3); 8618 movq(xtmp1, Address(tmp1, 0)); 8619 8620 // Q2 = TABLEExt[n][B >> 8 & 0xFF]; 8621 movl(tmp2, in_out); 8622 shrl(tmp2, 8); 8623 andl(tmp2, 0x000000FF); 8624 shll(tmp2, 3); 8625 addl(tmp2, tmp3); 8626 movq(xtmp2, Address(tmp2, 0)); 8627 8628 psllq(xtmp2, 8); 8629 pxor(xtmp1, xtmp2); 8630 8631 // Q3 = TABLEExt[n][B >> 16 & 0xFF]; 8632 movl(tmp2, in_out); 8633 shrl(tmp2, 16); 8634 andl(tmp2, 0x000000FF); 8635 shll(tmp2, 3); 8636 addl(tmp2, tmp3); 8637 movq(xtmp2, Address(tmp2, 0)); 8638 8639 psllq(xtmp2, 16); 8640 pxor(xtmp1, xtmp2); 8641 8642 // Q4 = TABLEExt[n][B >> 24 & 0xFF]; 8643 shrl(in_out, 24); 8644 andl(in_out, 0x000000FF); 8645 shll(in_out, 3); 8646 addl(in_out, tmp3); 8647 movq(xtmp2, Address(in_out, 0)); 8648 8649 psllq(xtmp2, 24); 8650 pxor(xtmp1, xtmp2); // Result in CXMM 8651 // return Q1 ^ Q2 << 8 ^ Q3 << 16 ^ Q4 << 24; 8652 } 8653 8654 void MacroAssembler::crc32c_pclmulqdq(XMMRegister w_xtmp1, 8655 Register in_out, 8656 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, 8657 XMMRegister w_xtmp2, 8658 Register tmp1, 8659 Register n_tmp2, Register n_tmp3) { 8660 if (is_pclmulqdq_supported) { 8661 movdl(w_xtmp1, in_out); 8662 8663 movl(tmp1, const_or_pre_comp_const_index); 8664 movdl(w_xtmp2, tmp1); 8665 pclmulqdq(w_xtmp1, w_xtmp2, 0); 8666 // Keep result in XMM since GPR is 32 bit in length 8667 } else { 8668 crc32c_ipl_alg4(in_out, const_or_pre_comp_const_index, tmp1, n_tmp2, n_tmp3, w_xtmp1, w_xtmp2); 8669 } 8670 } 8671 8672 void MacroAssembler::crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, 8673 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 8674 Register tmp1, Register tmp2, 8675 Register n_tmp3) { 8676 crc32c_pclmulqdq(w_xtmp1, in_out, const_or_pre_comp_const_index_u1, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 8677 crc32c_pclmulqdq(w_xtmp2, in1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 8678 8679 psllq(w_xtmp1, 1); 8680 movdl(tmp1, w_xtmp1); 8681 psrlq(w_xtmp1, 32); 8682 movdl(in_out, w_xtmp1); 8683 8684 xorl(tmp2, tmp2); 8685 crc32(tmp2, tmp1, 4); 8686 xorl(in_out, tmp2); 8687 8688 psllq(w_xtmp2, 1); 8689 movdl(tmp1, w_xtmp2); 8690 psrlq(w_xtmp2, 32); 8691 movdl(in1, w_xtmp2); 8692 8693 xorl(tmp2, tmp2); 8694 crc32(tmp2, tmp1, 4); 8695 xorl(in1, tmp2); 8696 xorl(in_out, in1); 8697 xorl(in_out, in2); 8698 } 8699 8700 void MacroAssembler::crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, 8701 Register in_out1, Register in_out2, Register in_out3, 8702 Register tmp1, Register tmp2, Register tmp3, 8703 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 8704 Register tmp4, Register tmp5, 8705 Register n_tmp6) { 8706 Label L_processPartitions; 8707 Label L_processPartition; 8708 Label L_exit; 8709 8710 bind(L_processPartitions); 8711 cmpl(in_out1, 3 * size); 8712 jcc(Assembler::less, L_exit); 8713 xorl(tmp1, tmp1); 8714 xorl(tmp2, tmp2); 8715 movl(tmp3, in_out2); 8716 addl(tmp3, size); 8717 8718 bind(L_processPartition); 8719 crc32(in_out3, Address(in_out2, 0), 4); 8720 crc32(tmp1, Address(in_out2, size), 4); 8721 crc32(tmp2, Address(in_out2, size*2), 4); 8722 crc32(in_out3, Address(in_out2, 0+4), 4); 8723 crc32(tmp1, Address(in_out2, size+4), 4); 8724 crc32(tmp2, Address(in_out2, size*2+4), 4); 8725 addl(in_out2, 8); 8726 cmpl(in_out2, tmp3); 8727 jcc(Assembler::less, L_processPartition); 8728 8729 push(tmp3); 8730 push(in_out1); 8731 push(in_out2); 8732 tmp4 = tmp3; 8733 tmp5 = in_out1; 8734 n_tmp6 = in_out2; 8735 8736 crc32c_rec_alt2(const_or_pre_comp_const_index_u1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, in_out3, tmp1, tmp2, 8737 w_xtmp1, w_xtmp2, w_xtmp3, 8738 tmp4, tmp5, 8739 n_tmp6); 8740 8741 pop(in_out2); 8742 pop(in_out1); 8743 pop(tmp3); 8744 8745 addl(in_out2, 2 * size); 8746 subl(in_out1, 3 * size); 8747 jmp(L_processPartitions); 8748 8749 bind(L_exit); 8750 } 8751 #endif //LP64 8752 8753 #ifdef _LP64 8754 // Algorithm 2: Pipelined usage of the CRC32 instruction. 8755 // Input: A buffer I of L bytes. 8756 // Output: the CRC32C value of the buffer. 8757 // Notations: 8758 // Write L = 24N + r, with N = floor (L/24). 8759 // r = L mod 24 (0 <= r < 24). 8760 // Consider I as the concatenation of A|B|C|R, where A, B, C, each, 8761 // N quadwords, and R consists of r bytes. 8762 // A[j] = I [8j+7:8j], j= 0, 1, ..., N-1 8763 // B[j] = I [N + 8j+7:N + 8j], j= 0, 1, ..., N-1 8764 // C[j] = I [2N + 8j+7:2N + 8j], j= 0, 1, ..., N-1 8765 // if r > 0 R[j] = I [3N +j], j= 0, 1, ...,r-1 8766 void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, 8767 Register tmp1, Register tmp2, Register tmp3, 8768 Register tmp4, Register tmp5, Register tmp6, 8769 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 8770 bool is_pclmulqdq_supported) { 8771 uint32_t const_or_pre_comp_const_index[CRC32C_NUM_PRECOMPUTED_CONSTANTS]; 8772 Label L_wordByWord; 8773 Label L_byteByByteProlog; 8774 Label L_byteByByte; 8775 Label L_exit; 8776 8777 if (is_pclmulqdq_supported ) { 8778 const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::_crc32c_table_addr; 8779 const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::_crc32c_table_addr+1); 8780 8781 const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 2); 8782 const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 3); 8783 8784 const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 4); 8785 const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 5); 8786 assert((CRC32C_NUM_PRECOMPUTED_CONSTANTS - 1 ) == 5, "Checking whether you declared all of the constants based on the number of \"chunks\""); 8787 } else { 8788 const_or_pre_comp_const_index[0] = 1; 8789 const_or_pre_comp_const_index[1] = 0; 8790 8791 const_or_pre_comp_const_index[2] = 3; 8792 const_or_pre_comp_const_index[3] = 2; 8793 8794 const_or_pre_comp_const_index[4] = 5; 8795 const_or_pre_comp_const_index[5] = 4; 8796 } 8797 crc32c_proc_chunk(CRC32C_HIGH, const_or_pre_comp_const_index[0], const_or_pre_comp_const_index[1], is_pclmulqdq_supported, 8798 in2, in1, in_out, 8799 tmp1, tmp2, tmp3, 8800 w_xtmp1, w_xtmp2, w_xtmp3, 8801 tmp4, tmp5, 8802 tmp6); 8803 crc32c_proc_chunk(CRC32C_MIDDLE, const_or_pre_comp_const_index[2], const_or_pre_comp_const_index[3], is_pclmulqdq_supported, 8804 in2, in1, in_out, 8805 tmp1, tmp2, tmp3, 8806 w_xtmp1, w_xtmp2, w_xtmp3, 8807 tmp4, tmp5, 8808 tmp6); 8809 crc32c_proc_chunk(CRC32C_LOW, const_or_pre_comp_const_index[4], const_or_pre_comp_const_index[5], is_pclmulqdq_supported, 8810 in2, in1, in_out, 8811 tmp1, tmp2, tmp3, 8812 w_xtmp1, w_xtmp2, w_xtmp3, 8813 tmp4, tmp5, 8814 tmp6); 8815 movl(tmp1, in2); 8816 andl(tmp1, 0x00000007); 8817 negl(tmp1); 8818 addl(tmp1, in2); 8819 addq(tmp1, in1); 8820 8821 cmpq(in1, tmp1); 8822 jccb(Assembler::greaterEqual, L_byteByByteProlog); 8823 align(16); 8824 BIND(L_wordByWord); 8825 crc32(in_out, Address(in1, 0), 8); 8826 addq(in1, 8); 8827 cmpq(in1, tmp1); 8828 jcc(Assembler::less, L_wordByWord); 8829 8830 BIND(L_byteByByteProlog); 8831 andl(in2, 0x00000007); 8832 movl(tmp2, 1); 8833 8834 cmpl(tmp2, in2); 8835 jccb(Assembler::greater, L_exit); 8836 BIND(L_byteByByte); 8837 crc32(in_out, Address(in1, 0), 1); 8838 incq(in1); 8839 incl(tmp2); 8840 cmpl(tmp2, in2); 8841 jcc(Assembler::lessEqual, L_byteByByte); 8842 8843 BIND(L_exit); 8844 } 8845 #else 8846 void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, 8847 Register tmp1, Register tmp2, Register tmp3, 8848 Register tmp4, Register tmp5, Register tmp6, 8849 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 8850 bool is_pclmulqdq_supported) { 8851 uint32_t const_or_pre_comp_const_index[CRC32C_NUM_PRECOMPUTED_CONSTANTS]; 8852 Label L_wordByWord; 8853 Label L_byteByByteProlog; 8854 Label L_byteByByte; 8855 Label L_exit; 8856 8857 if (is_pclmulqdq_supported) { 8858 const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::_crc32c_table_addr; 8859 const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 1); 8860 8861 const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 2); 8862 const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 3); 8863 8864 const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 4); 8865 const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 5); 8866 } else { 8867 const_or_pre_comp_const_index[0] = 1; 8868 const_or_pre_comp_const_index[1] = 0; 8869 8870 const_or_pre_comp_const_index[2] = 3; 8871 const_or_pre_comp_const_index[3] = 2; 8872 8873 const_or_pre_comp_const_index[4] = 5; 8874 const_or_pre_comp_const_index[5] = 4; 8875 } 8876 crc32c_proc_chunk(CRC32C_HIGH, const_or_pre_comp_const_index[0], const_or_pre_comp_const_index[1], is_pclmulqdq_supported, 8877 in2, in1, in_out, 8878 tmp1, tmp2, tmp3, 8879 w_xtmp1, w_xtmp2, w_xtmp3, 8880 tmp4, tmp5, 8881 tmp6); 8882 crc32c_proc_chunk(CRC32C_MIDDLE, const_or_pre_comp_const_index[2], const_or_pre_comp_const_index[3], is_pclmulqdq_supported, 8883 in2, in1, in_out, 8884 tmp1, tmp2, tmp3, 8885 w_xtmp1, w_xtmp2, w_xtmp3, 8886 tmp4, tmp5, 8887 tmp6); 8888 crc32c_proc_chunk(CRC32C_LOW, const_or_pre_comp_const_index[4], const_or_pre_comp_const_index[5], is_pclmulqdq_supported, 8889 in2, in1, in_out, 8890 tmp1, tmp2, tmp3, 8891 w_xtmp1, w_xtmp2, w_xtmp3, 8892 tmp4, tmp5, 8893 tmp6); 8894 movl(tmp1, in2); 8895 andl(tmp1, 0x00000007); 8896 negl(tmp1); 8897 addl(tmp1, in2); 8898 addl(tmp1, in1); 8899 8900 BIND(L_wordByWord); 8901 cmpl(in1, tmp1); 8902 jcc(Assembler::greaterEqual, L_byteByByteProlog); 8903 crc32(in_out, Address(in1,0), 4); 8904 addl(in1, 4); 8905 jmp(L_wordByWord); 8906 8907 BIND(L_byteByByteProlog); 8908 andl(in2, 0x00000007); 8909 movl(tmp2, 1); 8910 8911 BIND(L_byteByByte); 8912 cmpl(tmp2, in2); 8913 jccb(Assembler::greater, L_exit); 8914 movb(tmp1, Address(in1, 0)); 8915 crc32(in_out, tmp1, 1); 8916 incl(in1); 8917 incl(tmp2); 8918 jmp(L_byteByByte); 8919 8920 BIND(L_exit); 8921 } 8922 #endif // LP64 8923 #undef BIND 8924 #undef BLOCK_COMMENT 8925 8926 // Compress char[] array to byte[]. 8927 // Intrinsic for java.lang.StringUTF16.compress(char[] src, int srcOff, byte[] dst, int dstOff, int len) 8928 // Return the array length if every element in array can be encoded, 8929 // otherwise, the index of first non-latin1 (> 0xff) character. 8930 // @IntrinsicCandidate 8931 // public static int compress(char[] src, int srcOff, byte[] dst, int dstOff, int len) { 8932 // for (int i = 0; i < len; i++) { 8933 // char c = src[srcOff]; 8934 // if (c > 0xff) { 8935 // return i; // return index of non-latin1 char 8936 // } 8937 // dst[dstOff] = (byte)c; 8938 // srcOff++; 8939 // dstOff++; 8940 // } 8941 // return len; 8942 // } 8943 void MacroAssembler::char_array_compress(Register src, Register dst, Register len, 8944 XMMRegister tmp1Reg, XMMRegister tmp2Reg, 8945 XMMRegister tmp3Reg, XMMRegister tmp4Reg, 8946 Register tmp5, Register result, KRegister mask1, KRegister mask2) { 8947 Label copy_chars_loop, done, reset_sp, copy_tail; 8948 8949 // rsi: src 8950 // rdi: dst 8951 // rdx: len 8952 // rcx: tmp5 8953 // rax: result 8954 8955 // rsi holds start addr of source char[] to be compressed 8956 // rdi holds start addr of destination byte[] 8957 // rdx holds length 8958 8959 assert(len != result, ""); 8960 8961 // save length for return 8962 movl(result, len); 8963 8964 if ((AVX3Threshold == 0) && (UseAVX > 2) && // AVX512 8965 VM_Version::supports_avx512vlbw() && 8966 VM_Version::supports_bmi2()) { 8967 8968 Label copy_32_loop, copy_loop_tail, below_threshold, reset_for_copy_tail; 8969 8970 // alignment 8971 Label post_alignment; 8972 8973 // if length of the string is less than 32, handle it the old fashioned way 8974 testl(len, -32); 8975 jcc(Assembler::zero, below_threshold); 8976 8977 // First check whether a character is compressible ( <= 0xFF). 8978 // Create mask to test for Unicode chars inside zmm vector 8979 movl(tmp5, 0x00FF); 8980 evpbroadcastw(tmp2Reg, tmp5, Assembler::AVX_512bit); 8981 8982 testl(len, -64); 8983 jccb(Assembler::zero, post_alignment); 8984 8985 movl(tmp5, dst); 8986 andl(tmp5, (32 - 1)); 8987 negl(tmp5); 8988 andl(tmp5, (32 - 1)); 8989 8990 // bail out when there is nothing to be done 8991 testl(tmp5, 0xFFFFFFFF); 8992 jccb(Assembler::zero, post_alignment); 8993 8994 // ~(~0 << len), where len is the # of remaining elements to process 8995 movl(len, 0xFFFFFFFF); 8996 shlxl(len, len, tmp5); 8997 notl(len); 8998 kmovdl(mask2, len); 8999 movl(len, result); 9000 9001 evmovdquw(tmp1Reg, mask2, Address(src, 0), /*merge*/ false, Assembler::AVX_512bit); 9002 evpcmpw(mask1, mask2, tmp1Reg, tmp2Reg, Assembler::le, /*signed*/ false, Assembler::AVX_512bit); 9003 ktestd(mask1, mask2); 9004 jcc(Assembler::carryClear, copy_tail); 9005 9006 evpmovwb(Address(dst, 0), mask2, tmp1Reg, Assembler::AVX_512bit); 9007 9008 addptr(src, tmp5); 9009 addptr(src, tmp5); 9010 addptr(dst, tmp5); 9011 subl(len, tmp5); 9012 9013 bind(post_alignment); 9014 // end of alignment 9015 9016 movl(tmp5, len); 9017 andl(tmp5, (32 - 1)); // tail count (in chars) 9018 andl(len, ~(32 - 1)); // vector count (in chars) 9019 jccb(Assembler::zero, copy_loop_tail); 9020 9021 lea(src, Address(src, len, Address::times_2)); 9022 lea(dst, Address(dst, len, Address::times_1)); 9023 negptr(len); 9024 9025 bind(copy_32_loop); 9026 evmovdquw(tmp1Reg, Address(src, len, Address::times_2), Assembler::AVX_512bit); 9027 evpcmpuw(mask1, tmp1Reg, tmp2Reg, Assembler::le, Assembler::AVX_512bit); 9028 kortestdl(mask1, mask1); 9029 jccb(Assembler::carryClear, reset_for_copy_tail); 9030 9031 // All elements in current processed chunk are valid candidates for 9032 // compression. Write a truncated byte elements to the memory. 9033 evpmovwb(Address(dst, len, Address::times_1), tmp1Reg, Assembler::AVX_512bit); 9034 addptr(len, 32); 9035 jccb(Assembler::notZero, copy_32_loop); 9036 9037 bind(copy_loop_tail); 9038 // bail out when there is nothing to be done 9039 testl(tmp5, 0xFFFFFFFF); 9040 jcc(Assembler::zero, done); 9041 9042 movl(len, tmp5); 9043 9044 // ~(~0 << len), where len is the # of remaining elements to process 9045 movl(tmp5, 0xFFFFFFFF); 9046 shlxl(tmp5, tmp5, len); 9047 notl(tmp5); 9048 9049 kmovdl(mask2, tmp5); 9050 9051 evmovdquw(tmp1Reg, mask2, Address(src, 0), /*merge*/ false, Assembler::AVX_512bit); 9052 evpcmpw(mask1, mask2, tmp1Reg, tmp2Reg, Assembler::le, /*signed*/ false, Assembler::AVX_512bit); 9053 ktestd(mask1, mask2); 9054 jcc(Assembler::carryClear, copy_tail); 9055 9056 evpmovwb(Address(dst, 0), mask2, tmp1Reg, Assembler::AVX_512bit); 9057 jmp(done); 9058 9059 bind(reset_for_copy_tail); 9060 lea(src, Address(src, tmp5, Address::times_2)); 9061 lea(dst, Address(dst, tmp5, Address::times_1)); 9062 subptr(len, tmp5); 9063 jmp(copy_chars_loop); 9064 9065 bind(below_threshold); 9066 } 9067 9068 if (UseSSE42Intrinsics) { 9069 Label copy_32_loop, copy_16, copy_tail_sse, reset_for_copy_tail; 9070 9071 // vectored compression 9072 testl(len, 0xfffffff8); 9073 jcc(Assembler::zero, copy_tail); 9074 9075 movl(tmp5, 0xff00ff00); // create mask to test for Unicode chars in vectors 9076 movdl(tmp1Reg, tmp5); 9077 pshufd(tmp1Reg, tmp1Reg, 0); // store Unicode mask in tmp1Reg 9078 9079 andl(len, 0xfffffff0); 9080 jccb(Assembler::zero, copy_16); 9081 9082 // compress 16 chars per iter 9083 pxor(tmp4Reg, tmp4Reg); 9084 9085 lea(src, Address(src, len, Address::times_2)); 9086 lea(dst, Address(dst, len, Address::times_1)); 9087 negptr(len); 9088 9089 bind(copy_32_loop); 9090 movdqu(tmp2Reg, Address(src, len, Address::times_2)); // load 1st 8 characters 9091 por(tmp4Reg, tmp2Reg); 9092 movdqu(tmp3Reg, Address(src, len, Address::times_2, 16)); // load next 8 characters 9093 por(tmp4Reg, tmp3Reg); 9094 ptest(tmp4Reg, tmp1Reg); // check for Unicode chars in next vector 9095 jccb(Assembler::notZero, reset_for_copy_tail); 9096 packuswb(tmp2Reg, tmp3Reg); // only ASCII chars; compress each to 1 byte 9097 movdqu(Address(dst, len, Address::times_1), tmp2Reg); 9098 addptr(len, 16); 9099 jccb(Assembler::notZero, copy_32_loop); 9100 9101 // compress next vector of 8 chars (if any) 9102 bind(copy_16); 9103 // len = 0 9104 testl(result, 0x00000008); // check if there's a block of 8 chars to compress 9105 jccb(Assembler::zero, copy_tail_sse); 9106 9107 pxor(tmp3Reg, tmp3Reg); 9108 9109 movdqu(tmp2Reg, Address(src, 0)); 9110 ptest(tmp2Reg, tmp1Reg); // check for Unicode chars in vector 9111 jccb(Assembler::notZero, reset_for_copy_tail); 9112 packuswb(tmp2Reg, tmp3Reg); // only LATIN1 chars; compress each to 1 byte 9113 movq(Address(dst, 0), tmp2Reg); 9114 addptr(src, 16); 9115 addptr(dst, 8); 9116 jmpb(copy_tail_sse); 9117 9118 bind(reset_for_copy_tail); 9119 movl(tmp5, result); 9120 andl(tmp5, 0x0000000f); 9121 lea(src, Address(src, tmp5, Address::times_2)); 9122 lea(dst, Address(dst, tmp5, Address::times_1)); 9123 subptr(len, tmp5); 9124 jmpb(copy_chars_loop); 9125 9126 bind(copy_tail_sse); 9127 movl(len, result); 9128 andl(len, 0x00000007); // tail count (in chars) 9129 } 9130 // compress 1 char per iter 9131 bind(copy_tail); 9132 testl(len, len); 9133 jccb(Assembler::zero, done); 9134 lea(src, Address(src, len, Address::times_2)); 9135 lea(dst, Address(dst, len, Address::times_1)); 9136 negptr(len); 9137 9138 bind(copy_chars_loop); 9139 load_unsigned_short(tmp5, Address(src, len, Address::times_2)); 9140 testl(tmp5, 0xff00); // check if Unicode char 9141 jccb(Assembler::notZero, reset_sp); 9142 movb(Address(dst, len, Address::times_1), tmp5); // ASCII char; compress to 1 byte 9143 increment(len); 9144 jccb(Assembler::notZero, copy_chars_loop); 9145 9146 // add len then return (len will be zero if compress succeeded, otherwise negative) 9147 bind(reset_sp); 9148 addl(result, len); 9149 9150 bind(done); 9151 } 9152 9153 // Inflate byte[] array to char[]. 9154 // ..\jdk\src\java.base\share\classes\java\lang\StringLatin1.java 9155 // @IntrinsicCandidate 9156 // private static void inflate(byte[] src, int srcOff, char[] dst, int dstOff, int len) { 9157 // for (int i = 0; i < len; i++) { 9158 // dst[dstOff++] = (char)(src[srcOff++] & 0xff); 9159 // } 9160 // } 9161 void MacroAssembler::byte_array_inflate(Register src, Register dst, Register len, 9162 XMMRegister tmp1, Register tmp2, KRegister mask) { 9163 Label copy_chars_loop, done, below_threshold, avx3_threshold; 9164 // rsi: src 9165 // rdi: dst 9166 // rdx: len 9167 // rcx: tmp2 9168 9169 // rsi holds start addr of source byte[] to be inflated 9170 // rdi holds start addr of destination char[] 9171 // rdx holds length 9172 assert_different_registers(src, dst, len, tmp2); 9173 movl(tmp2, len); 9174 if ((UseAVX > 2) && // AVX512 9175 VM_Version::supports_avx512vlbw() && 9176 VM_Version::supports_bmi2()) { 9177 9178 Label copy_32_loop, copy_tail; 9179 Register tmp3_aliased = len; 9180 9181 // if length of the string is less than 16, handle it in an old fashioned way 9182 testl(len, -16); 9183 jcc(Assembler::zero, below_threshold); 9184 9185 testl(len, -1 * AVX3Threshold); 9186 jcc(Assembler::zero, avx3_threshold); 9187 9188 // In order to use only one arithmetic operation for the main loop we use 9189 // this pre-calculation 9190 andl(tmp2, (32 - 1)); // tail count (in chars), 32 element wide loop 9191 andl(len, -32); // vector count 9192 jccb(Assembler::zero, copy_tail); 9193 9194 lea(src, Address(src, len, Address::times_1)); 9195 lea(dst, Address(dst, len, Address::times_2)); 9196 negptr(len); 9197 9198 9199 // inflate 32 chars per iter 9200 bind(copy_32_loop); 9201 vpmovzxbw(tmp1, Address(src, len, Address::times_1), Assembler::AVX_512bit); 9202 evmovdquw(Address(dst, len, Address::times_2), tmp1, Assembler::AVX_512bit); 9203 addptr(len, 32); 9204 jcc(Assembler::notZero, copy_32_loop); 9205 9206 bind(copy_tail); 9207 // bail out when there is nothing to be done 9208 testl(tmp2, -1); // we don't destroy the contents of tmp2 here 9209 jcc(Assembler::zero, done); 9210 9211 // ~(~0 << length), where length is the # of remaining elements to process 9212 movl(tmp3_aliased, -1); 9213 shlxl(tmp3_aliased, tmp3_aliased, tmp2); 9214 notl(tmp3_aliased); 9215 kmovdl(mask, tmp3_aliased); 9216 evpmovzxbw(tmp1, mask, Address(src, 0), Assembler::AVX_512bit); 9217 evmovdquw(Address(dst, 0), mask, tmp1, /*merge*/ true, Assembler::AVX_512bit); 9218 9219 jmp(done); 9220 bind(avx3_threshold); 9221 } 9222 if (UseSSE42Intrinsics) { 9223 Label copy_16_loop, copy_8_loop, copy_bytes, copy_new_tail, copy_tail; 9224 9225 if (UseAVX > 1) { 9226 andl(tmp2, (16 - 1)); 9227 andl(len, -16); 9228 jccb(Assembler::zero, copy_new_tail); 9229 } else { 9230 andl(tmp2, 0x00000007); // tail count (in chars) 9231 andl(len, 0xfffffff8); // vector count (in chars) 9232 jccb(Assembler::zero, copy_tail); 9233 } 9234 9235 // vectored inflation 9236 lea(src, Address(src, len, Address::times_1)); 9237 lea(dst, Address(dst, len, Address::times_2)); 9238 negptr(len); 9239 9240 if (UseAVX > 1) { 9241 bind(copy_16_loop); 9242 vpmovzxbw(tmp1, Address(src, len, Address::times_1), Assembler::AVX_256bit); 9243 vmovdqu(Address(dst, len, Address::times_2), tmp1); 9244 addptr(len, 16); 9245 jcc(Assembler::notZero, copy_16_loop); 9246 9247 bind(below_threshold); 9248 bind(copy_new_tail); 9249 movl(len, tmp2); 9250 andl(tmp2, 0x00000007); 9251 andl(len, 0xFFFFFFF8); 9252 jccb(Assembler::zero, copy_tail); 9253 9254 pmovzxbw(tmp1, Address(src, 0)); 9255 movdqu(Address(dst, 0), tmp1); 9256 addptr(src, 8); 9257 addptr(dst, 2 * 8); 9258 9259 jmp(copy_tail, true); 9260 } 9261 9262 // inflate 8 chars per iter 9263 bind(copy_8_loop); 9264 pmovzxbw(tmp1, Address(src, len, Address::times_1)); // unpack to 8 words 9265 movdqu(Address(dst, len, Address::times_2), tmp1); 9266 addptr(len, 8); 9267 jcc(Assembler::notZero, copy_8_loop); 9268 9269 bind(copy_tail); 9270 movl(len, tmp2); 9271 9272 cmpl(len, 4); 9273 jccb(Assembler::less, copy_bytes); 9274 9275 movdl(tmp1, Address(src, 0)); // load 4 byte chars 9276 pmovzxbw(tmp1, tmp1); 9277 movq(Address(dst, 0), tmp1); 9278 subptr(len, 4); 9279 addptr(src, 4); 9280 addptr(dst, 8); 9281 9282 bind(copy_bytes); 9283 } else { 9284 bind(below_threshold); 9285 } 9286 9287 testl(len, len); 9288 jccb(Assembler::zero, done); 9289 lea(src, Address(src, len, Address::times_1)); 9290 lea(dst, Address(dst, len, Address::times_2)); 9291 negptr(len); 9292 9293 // inflate 1 char per iter 9294 bind(copy_chars_loop); 9295 load_unsigned_byte(tmp2, Address(src, len, Address::times_1)); // load byte char 9296 movw(Address(dst, len, Address::times_2), tmp2); // inflate byte char to word 9297 increment(len); 9298 jcc(Assembler::notZero, copy_chars_loop); 9299 9300 bind(done); 9301 } 9302 9303 9304 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, bool merge, int vector_len) { 9305 switch(type) { 9306 case T_BYTE: 9307 case T_BOOLEAN: 9308 evmovdqub(dst, kmask, src, merge, vector_len); 9309 break; 9310 case T_CHAR: 9311 case T_SHORT: 9312 evmovdquw(dst, kmask, src, merge, vector_len); 9313 break; 9314 case T_INT: 9315 case T_FLOAT: 9316 evmovdqul(dst, kmask, src, merge, vector_len); 9317 break; 9318 case T_LONG: 9319 case T_DOUBLE: 9320 evmovdquq(dst, kmask, src, merge, vector_len); 9321 break; 9322 default: 9323 fatal("Unexpected type argument %s", type2name(type)); 9324 break; 9325 } 9326 } 9327 9328 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, bool merge, int vector_len) { 9329 switch(type) { 9330 case T_BYTE: 9331 case T_BOOLEAN: 9332 evmovdqub(dst, kmask, src, merge, vector_len); 9333 break; 9334 case T_CHAR: 9335 case T_SHORT: 9336 evmovdquw(dst, kmask, src, merge, vector_len); 9337 break; 9338 case T_INT: 9339 case T_FLOAT: 9340 evmovdqul(dst, kmask, src, merge, vector_len); 9341 break; 9342 case T_LONG: 9343 case T_DOUBLE: 9344 evmovdquq(dst, kmask, src, merge, vector_len); 9345 break; 9346 default: 9347 fatal("Unexpected type argument %s", type2name(type)); 9348 break; 9349 } 9350 } 9351 9352 void MacroAssembler::knot(uint masklen, KRegister dst, KRegister src, KRegister ktmp, Register rtmp) { 9353 switch(masklen) { 9354 case 2: 9355 knotbl(dst, src); 9356 movl(rtmp, 3); 9357 kmovbl(ktmp, rtmp); 9358 kandbl(dst, ktmp, dst); 9359 break; 9360 case 4: 9361 knotbl(dst, src); 9362 movl(rtmp, 15); 9363 kmovbl(ktmp, rtmp); 9364 kandbl(dst, ktmp, dst); 9365 break; 9366 case 8: 9367 knotbl(dst, src); 9368 break; 9369 case 16: 9370 knotwl(dst, src); 9371 break; 9372 case 32: 9373 knotdl(dst, src); 9374 break; 9375 case 64: 9376 knotql(dst, src); 9377 break; 9378 default: 9379 fatal("Unexpected vector length %d", masklen); 9380 break; 9381 } 9382 } 9383 9384 void MacroAssembler::kand(BasicType type, KRegister dst, KRegister src1, KRegister src2) { 9385 switch(type) { 9386 case T_BOOLEAN: 9387 case T_BYTE: 9388 kandbl(dst, src1, src2); 9389 break; 9390 case T_CHAR: 9391 case T_SHORT: 9392 kandwl(dst, src1, src2); 9393 break; 9394 case T_INT: 9395 case T_FLOAT: 9396 kanddl(dst, src1, src2); 9397 break; 9398 case T_LONG: 9399 case T_DOUBLE: 9400 kandql(dst, src1, src2); 9401 break; 9402 default: 9403 fatal("Unexpected type argument %s", type2name(type)); 9404 break; 9405 } 9406 } 9407 9408 void MacroAssembler::kor(BasicType type, KRegister dst, KRegister src1, KRegister src2) { 9409 switch(type) { 9410 case T_BOOLEAN: 9411 case T_BYTE: 9412 korbl(dst, src1, src2); 9413 break; 9414 case T_CHAR: 9415 case T_SHORT: 9416 korwl(dst, src1, src2); 9417 break; 9418 case T_INT: 9419 case T_FLOAT: 9420 kordl(dst, src1, src2); 9421 break; 9422 case T_LONG: 9423 case T_DOUBLE: 9424 korql(dst, src1, src2); 9425 break; 9426 default: 9427 fatal("Unexpected type argument %s", type2name(type)); 9428 break; 9429 } 9430 } 9431 9432 void MacroAssembler::kxor(BasicType type, KRegister dst, KRegister src1, KRegister src2) { 9433 switch(type) { 9434 case T_BOOLEAN: 9435 case T_BYTE: 9436 kxorbl(dst, src1, src2); 9437 break; 9438 case T_CHAR: 9439 case T_SHORT: 9440 kxorwl(dst, src1, src2); 9441 break; 9442 case T_INT: 9443 case T_FLOAT: 9444 kxordl(dst, src1, src2); 9445 break; 9446 case T_LONG: 9447 case T_DOUBLE: 9448 kxorql(dst, src1, src2); 9449 break; 9450 default: 9451 fatal("Unexpected type argument %s", type2name(type)); 9452 break; 9453 } 9454 } 9455 9456 void MacroAssembler::evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9457 switch(type) { 9458 case T_BOOLEAN: 9459 case T_BYTE: 9460 evpermb(dst, mask, nds, src, merge, vector_len); break; 9461 case T_CHAR: 9462 case T_SHORT: 9463 evpermw(dst, mask, nds, src, merge, vector_len); break; 9464 case T_INT: 9465 case T_FLOAT: 9466 evpermd(dst, mask, nds, src, merge, vector_len); break; 9467 case T_LONG: 9468 case T_DOUBLE: 9469 evpermq(dst, mask, nds, src, merge, vector_len); break; 9470 default: 9471 fatal("Unexpected type argument %s", type2name(type)); break; 9472 } 9473 } 9474 9475 void MacroAssembler::evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9476 switch(type) { 9477 case T_BOOLEAN: 9478 case T_BYTE: 9479 evpermb(dst, mask, nds, src, merge, vector_len); break; 9480 case T_CHAR: 9481 case T_SHORT: 9482 evpermw(dst, mask, nds, src, merge, vector_len); break; 9483 case T_INT: 9484 case T_FLOAT: 9485 evpermd(dst, mask, nds, src, merge, vector_len); break; 9486 case T_LONG: 9487 case T_DOUBLE: 9488 evpermq(dst, mask, nds, src, merge, vector_len); break; 9489 default: 9490 fatal("Unexpected type argument %s", type2name(type)); break; 9491 } 9492 } 9493 9494 void MacroAssembler::evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9495 switch(type) { 9496 case T_BYTE: 9497 evpminsb(dst, mask, nds, src, merge, vector_len); break; 9498 case T_SHORT: 9499 evpminsw(dst, mask, nds, src, merge, vector_len); break; 9500 case T_INT: 9501 evpminsd(dst, mask, nds, src, merge, vector_len); break; 9502 case T_LONG: 9503 evpminsq(dst, mask, nds, src, merge, vector_len); break; 9504 default: 9505 fatal("Unexpected type argument %s", type2name(type)); break; 9506 } 9507 } 9508 9509 void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9510 switch(type) { 9511 case T_BYTE: 9512 evpmaxsb(dst, mask, nds, src, merge, vector_len); break; 9513 case T_SHORT: 9514 evpmaxsw(dst, mask, nds, src, merge, vector_len); break; 9515 case T_INT: 9516 evpmaxsd(dst, mask, nds, src, merge, vector_len); break; 9517 case T_LONG: 9518 evpmaxsq(dst, mask, nds, src, merge, vector_len); break; 9519 default: 9520 fatal("Unexpected type argument %s", type2name(type)); break; 9521 } 9522 } 9523 9524 void MacroAssembler::evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9525 switch(type) { 9526 case T_BYTE: 9527 evpminsb(dst, mask, nds, src, merge, vector_len); break; 9528 case T_SHORT: 9529 evpminsw(dst, mask, nds, src, merge, vector_len); break; 9530 case T_INT: 9531 evpminsd(dst, mask, nds, src, merge, vector_len); break; 9532 case T_LONG: 9533 evpminsq(dst, mask, nds, src, merge, vector_len); break; 9534 default: 9535 fatal("Unexpected type argument %s", type2name(type)); break; 9536 } 9537 } 9538 9539 void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9540 switch(type) { 9541 case T_BYTE: 9542 evpmaxsb(dst, mask, nds, src, merge, vector_len); break; 9543 case T_SHORT: 9544 evpmaxsw(dst, mask, nds, src, merge, vector_len); break; 9545 case T_INT: 9546 evpmaxsd(dst, mask, nds, src, merge, vector_len); break; 9547 case T_LONG: 9548 evpmaxsq(dst, mask, nds, src, merge, vector_len); break; 9549 default: 9550 fatal("Unexpected type argument %s", type2name(type)); break; 9551 } 9552 } 9553 9554 void MacroAssembler::evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9555 switch(type) { 9556 case T_INT: 9557 evpxord(dst, mask, nds, src, merge, vector_len); break; 9558 case T_LONG: 9559 evpxorq(dst, mask, nds, src, merge, vector_len); break; 9560 default: 9561 fatal("Unexpected type argument %s", type2name(type)); break; 9562 } 9563 } 9564 9565 void MacroAssembler::evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9566 switch(type) { 9567 case T_INT: 9568 evpxord(dst, mask, nds, src, merge, vector_len); break; 9569 case T_LONG: 9570 evpxorq(dst, mask, nds, src, merge, vector_len); break; 9571 default: 9572 fatal("Unexpected type argument %s", type2name(type)); break; 9573 } 9574 } 9575 9576 void MacroAssembler::evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9577 switch(type) { 9578 case T_INT: 9579 Assembler::evpord(dst, mask, nds, src, merge, vector_len); break; 9580 case T_LONG: 9581 evporq(dst, mask, nds, src, merge, vector_len); break; 9582 default: 9583 fatal("Unexpected type argument %s", type2name(type)); break; 9584 } 9585 } 9586 9587 void MacroAssembler::evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9588 switch(type) { 9589 case T_INT: 9590 Assembler::evpord(dst, mask, nds, src, merge, vector_len); break; 9591 case T_LONG: 9592 evporq(dst, mask, nds, src, merge, vector_len); break; 9593 default: 9594 fatal("Unexpected type argument %s", type2name(type)); break; 9595 } 9596 } 9597 9598 void MacroAssembler::evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9599 switch(type) { 9600 case T_INT: 9601 evpandd(dst, mask, nds, src, merge, vector_len); break; 9602 case T_LONG: 9603 evpandq(dst, mask, nds, src, merge, vector_len); break; 9604 default: 9605 fatal("Unexpected type argument %s", type2name(type)); break; 9606 } 9607 } 9608 9609 void MacroAssembler::evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9610 switch(type) { 9611 case T_INT: 9612 evpandd(dst, mask, nds, src, merge, vector_len); break; 9613 case T_LONG: 9614 evpandq(dst, mask, nds, src, merge, vector_len); break; 9615 default: 9616 fatal("Unexpected type argument %s", type2name(type)); break; 9617 } 9618 } 9619 9620 void MacroAssembler::kortest(uint masklen, KRegister src1, KRegister src2) { 9621 switch(masklen) { 9622 case 8: 9623 kortestbl(src1, src2); 9624 break; 9625 case 16: 9626 kortestwl(src1, src2); 9627 break; 9628 case 32: 9629 kortestdl(src1, src2); 9630 break; 9631 case 64: 9632 kortestql(src1, src2); 9633 break; 9634 default: 9635 fatal("Unexpected mask length %d", masklen); 9636 break; 9637 } 9638 } 9639 9640 9641 void MacroAssembler::ktest(uint masklen, KRegister src1, KRegister src2) { 9642 switch(masklen) { 9643 case 8: 9644 ktestbl(src1, src2); 9645 break; 9646 case 16: 9647 ktestwl(src1, src2); 9648 break; 9649 case 32: 9650 ktestdl(src1, src2); 9651 break; 9652 case 64: 9653 ktestql(src1, src2); 9654 break; 9655 default: 9656 fatal("Unexpected mask length %d", masklen); 9657 break; 9658 } 9659 } 9660 9661 void MacroAssembler::evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc) { 9662 switch(type) { 9663 case T_INT: 9664 evprold(dst, mask, src, shift, merge, vlen_enc); break; 9665 case T_LONG: 9666 evprolq(dst, mask, src, shift, merge, vlen_enc); break; 9667 default: 9668 fatal("Unexpected type argument %s", type2name(type)); break; 9669 break; 9670 } 9671 } 9672 9673 void MacroAssembler::evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc) { 9674 switch(type) { 9675 case T_INT: 9676 evprord(dst, mask, src, shift, merge, vlen_enc); break; 9677 case T_LONG: 9678 evprorq(dst, mask, src, shift, merge, vlen_enc); break; 9679 default: 9680 fatal("Unexpected type argument %s", type2name(type)); break; 9681 } 9682 } 9683 9684 void MacroAssembler::evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc) { 9685 switch(type) { 9686 case T_INT: 9687 evprolvd(dst, mask, src1, src2, merge, vlen_enc); break; 9688 case T_LONG: 9689 evprolvq(dst, mask, src1, src2, merge, vlen_enc); break; 9690 default: 9691 fatal("Unexpected type argument %s", type2name(type)); break; 9692 } 9693 } 9694 9695 void MacroAssembler::evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc) { 9696 switch(type) { 9697 case T_INT: 9698 evprorvd(dst, mask, src1, src2, merge, vlen_enc); break; 9699 case T_LONG: 9700 evprorvq(dst, mask, src1, src2, merge, vlen_enc); break; 9701 default: 9702 fatal("Unexpected type argument %s", type2name(type)); break; 9703 } 9704 } 9705 9706 void MacroAssembler::evpandq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 9707 assert(rscratch != noreg || always_reachable(src), "missing"); 9708 9709 if (reachable(src)) { 9710 evpandq(dst, nds, as_Address(src), vector_len); 9711 } else { 9712 lea(rscratch, src); 9713 evpandq(dst, nds, Address(rscratch, 0), vector_len); 9714 } 9715 } 9716 9717 void MacroAssembler::evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch) { 9718 assert(rscratch != noreg || always_reachable(src), "missing"); 9719 9720 if (reachable(src)) { 9721 Assembler::evpaddq(dst, mask, nds, as_Address(src), merge, vector_len); 9722 } else { 9723 lea(rscratch, src); 9724 Assembler::evpaddq(dst, mask, nds, Address(rscratch, 0), merge, vector_len); 9725 } 9726 } 9727 9728 void MacroAssembler::evporq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 9729 assert(rscratch != noreg || always_reachable(src), "missing"); 9730 9731 if (reachable(src)) { 9732 evporq(dst, nds, as_Address(src), vector_len); 9733 } else { 9734 lea(rscratch, src); 9735 evporq(dst, nds, Address(rscratch, 0), vector_len); 9736 } 9737 } 9738 9739 void MacroAssembler::vpshufb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 9740 assert(rscratch != noreg || always_reachable(src), "missing"); 9741 9742 if (reachable(src)) { 9743 vpshufb(dst, nds, as_Address(src), vector_len); 9744 } else { 9745 lea(rscratch, src); 9746 vpshufb(dst, nds, Address(rscratch, 0), vector_len); 9747 } 9748 } 9749 9750 void MacroAssembler::vpor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 9751 assert(rscratch != noreg || always_reachable(src), "missing"); 9752 9753 if (reachable(src)) { 9754 Assembler::vpor(dst, nds, as_Address(src), vector_len); 9755 } else { 9756 lea(rscratch, src); 9757 Assembler::vpor(dst, nds, Address(rscratch, 0), vector_len); 9758 } 9759 } 9760 9761 void MacroAssembler::vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, AddressLiteral src3, int vector_len, Register rscratch) { 9762 assert(rscratch != noreg || always_reachable(src3), "missing"); 9763 9764 if (reachable(src3)) { 9765 vpternlogq(dst, imm8, src2, as_Address(src3), vector_len); 9766 } else { 9767 lea(rscratch, src3); 9768 vpternlogq(dst, imm8, src2, Address(rscratch, 0), vector_len); 9769 } 9770 } 9771 9772 #if COMPILER2_OR_JVMCI 9773 9774 void MacroAssembler::fill_masked(BasicType bt, Address dst, XMMRegister xmm, KRegister mask, 9775 Register length, Register temp, int vec_enc) { 9776 // Computing mask for predicated vector store. 9777 movptr(temp, -1); 9778 bzhiq(temp, temp, length); 9779 kmov(mask, temp); 9780 evmovdqu(bt, mask, dst, xmm, true, vec_enc); 9781 } 9782 9783 // Set memory operation for length "less than" 64 bytes. 9784 void MacroAssembler::fill64_masked(uint shift, Register dst, int disp, 9785 XMMRegister xmm, KRegister mask, Register length, 9786 Register temp, bool use64byteVector) { 9787 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 9788 const BasicType type[] = { T_BYTE, T_SHORT, T_INT, T_LONG}; 9789 if (!use64byteVector) { 9790 fill32(dst, disp, xmm); 9791 subptr(length, 32 >> shift); 9792 fill32_masked(shift, dst, disp + 32, xmm, mask, length, temp); 9793 } else { 9794 assert(MaxVectorSize == 64, "vector length != 64"); 9795 fill_masked(type[shift], Address(dst, disp), xmm, mask, length, temp, Assembler::AVX_512bit); 9796 } 9797 } 9798 9799 9800 void MacroAssembler::fill32_masked(uint shift, Register dst, int disp, 9801 XMMRegister xmm, KRegister mask, Register length, 9802 Register temp) { 9803 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 9804 const BasicType type[] = { T_BYTE, T_SHORT, T_INT, T_LONG}; 9805 fill_masked(type[shift], Address(dst, disp), xmm, mask, length, temp, Assembler::AVX_256bit); 9806 } 9807 9808 9809 void MacroAssembler::fill32(Address dst, XMMRegister xmm) { 9810 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 9811 vmovdqu(dst, xmm); 9812 } 9813 9814 void MacroAssembler::fill32(Register dst, int disp, XMMRegister xmm) { 9815 fill32(Address(dst, disp), xmm); 9816 } 9817 9818 void MacroAssembler::fill64(Address dst, XMMRegister xmm, bool use64byteVector) { 9819 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 9820 if (!use64byteVector) { 9821 fill32(dst, xmm); 9822 fill32(dst.plus_disp(32), xmm); 9823 } else { 9824 evmovdquq(dst, xmm, Assembler::AVX_512bit); 9825 } 9826 } 9827 9828 void MacroAssembler::fill64(Register dst, int disp, XMMRegister xmm, bool use64byteVector) { 9829 fill64(Address(dst, disp), xmm, use64byteVector); 9830 } 9831 9832 #ifdef _LP64 9833 void MacroAssembler::generate_fill_avx3(BasicType type, Register to, Register value, 9834 Register count, Register rtmp, XMMRegister xtmp) { 9835 Label L_exit; 9836 Label L_fill_start; 9837 Label L_fill_64_bytes; 9838 Label L_fill_96_bytes; 9839 Label L_fill_128_bytes; 9840 Label L_fill_128_bytes_loop; 9841 Label L_fill_128_loop_header; 9842 Label L_fill_128_bytes_loop_header; 9843 Label L_fill_128_bytes_loop_pre_header; 9844 Label L_fill_zmm_sequence; 9845 9846 int shift = -1; 9847 int avx3threshold = VM_Version::avx3_threshold(); 9848 switch(type) { 9849 case T_BYTE: shift = 0; 9850 break; 9851 case T_SHORT: shift = 1; 9852 break; 9853 case T_INT: shift = 2; 9854 break; 9855 /* Uncomment when LONG fill stubs are supported. 9856 case T_LONG: shift = 3; 9857 break; 9858 */ 9859 default: 9860 fatal("Unhandled type: %s\n", type2name(type)); 9861 } 9862 9863 if ((avx3threshold != 0) || (MaxVectorSize == 32)) { 9864 9865 if (MaxVectorSize == 64) { 9866 cmpq(count, avx3threshold >> shift); 9867 jcc(Assembler::greater, L_fill_zmm_sequence); 9868 } 9869 9870 evpbroadcast(type, xtmp, value, Assembler::AVX_256bit); 9871 9872 bind(L_fill_start); 9873 9874 cmpq(count, 32 >> shift); 9875 jccb(Assembler::greater, L_fill_64_bytes); 9876 fill32_masked(shift, to, 0, xtmp, k2, count, rtmp); 9877 jmp(L_exit); 9878 9879 bind(L_fill_64_bytes); 9880 cmpq(count, 64 >> shift); 9881 jccb(Assembler::greater, L_fill_96_bytes); 9882 fill64_masked(shift, to, 0, xtmp, k2, count, rtmp); 9883 jmp(L_exit); 9884 9885 bind(L_fill_96_bytes); 9886 cmpq(count, 96 >> shift); 9887 jccb(Assembler::greater, L_fill_128_bytes); 9888 fill64(to, 0, xtmp); 9889 subq(count, 64 >> shift); 9890 fill32_masked(shift, to, 64, xtmp, k2, count, rtmp); 9891 jmp(L_exit); 9892 9893 bind(L_fill_128_bytes); 9894 cmpq(count, 128 >> shift); 9895 jccb(Assembler::greater, L_fill_128_bytes_loop_pre_header); 9896 fill64(to, 0, xtmp); 9897 fill32(to, 64, xtmp); 9898 subq(count, 96 >> shift); 9899 fill32_masked(shift, to, 96, xtmp, k2, count, rtmp); 9900 jmp(L_exit); 9901 9902 bind(L_fill_128_bytes_loop_pre_header); 9903 { 9904 mov(rtmp, to); 9905 andq(rtmp, 31); 9906 jccb(Assembler::zero, L_fill_128_bytes_loop_header); 9907 negq(rtmp); 9908 addq(rtmp, 32); 9909 mov64(r8, -1L); 9910 bzhiq(r8, r8, rtmp); 9911 kmovql(k2, r8); 9912 evmovdqu(T_BYTE, k2, Address(to, 0), xtmp, true, Assembler::AVX_256bit); 9913 addq(to, rtmp); 9914 shrq(rtmp, shift); 9915 subq(count, rtmp); 9916 } 9917 9918 cmpq(count, 128 >> shift); 9919 jcc(Assembler::less, L_fill_start); 9920 9921 bind(L_fill_128_bytes_loop_header); 9922 subq(count, 128 >> shift); 9923 9924 align32(); 9925 bind(L_fill_128_bytes_loop); 9926 fill64(to, 0, xtmp); 9927 fill64(to, 64, xtmp); 9928 addq(to, 128); 9929 subq(count, 128 >> shift); 9930 jccb(Assembler::greaterEqual, L_fill_128_bytes_loop); 9931 9932 addq(count, 128 >> shift); 9933 jcc(Assembler::zero, L_exit); 9934 jmp(L_fill_start); 9935 } 9936 9937 if (MaxVectorSize == 64) { 9938 // Sequence using 64 byte ZMM register. 9939 Label L_fill_128_bytes_zmm; 9940 Label L_fill_192_bytes_zmm; 9941 Label L_fill_192_bytes_loop_zmm; 9942 Label L_fill_192_bytes_loop_header_zmm; 9943 Label L_fill_192_bytes_loop_pre_header_zmm; 9944 Label L_fill_start_zmm_sequence; 9945 9946 bind(L_fill_zmm_sequence); 9947 evpbroadcast(type, xtmp, value, Assembler::AVX_512bit); 9948 9949 bind(L_fill_start_zmm_sequence); 9950 cmpq(count, 64 >> shift); 9951 jccb(Assembler::greater, L_fill_128_bytes_zmm); 9952 fill64_masked(shift, to, 0, xtmp, k2, count, rtmp, true); 9953 jmp(L_exit); 9954 9955 bind(L_fill_128_bytes_zmm); 9956 cmpq(count, 128 >> shift); 9957 jccb(Assembler::greater, L_fill_192_bytes_zmm); 9958 fill64(to, 0, xtmp, true); 9959 subq(count, 64 >> shift); 9960 fill64_masked(shift, to, 64, xtmp, k2, count, rtmp, true); 9961 jmp(L_exit); 9962 9963 bind(L_fill_192_bytes_zmm); 9964 cmpq(count, 192 >> shift); 9965 jccb(Assembler::greater, L_fill_192_bytes_loop_pre_header_zmm); 9966 fill64(to, 0, xtmp, true); 9967 fill64(to, 64, xtmp, true); 9968 subq(count, 128 >> shift); 9969 fill64_masked(shift, to, 128, xtmp, k2, count, rtmp, true); 9970 jmp(L_exit); 9971 9972 bind(L_fill_192_bytes_loop_pre_header_zmm); 9973 { 9974 movq(rtmp, to); 9975 andq(rtmp, 63); 9976 jccb(Assembler::zero, L_fill_192_bytes_loop_header_zmm); 9977 negq(rtmp); 9978 addq(rtmp, 64); 9979 mov64(r8, -1L); 9980 bzhiq(r8, r8, rtmp); 9981 kmovql(k2, r8); 9982 evmovdqu(T_BYTE, k2, Address(to, 0), xtmp, true, Assembler::AVX_512bit); 9983 addq(to, rtmp); 9984 shrq(rtmp, shift); 9985 subq(count, rtmp); 9986 } 9987 9988 cmpq(count, 192 >> shift); 9989 jcc(Assembler::less, L_fill_start_zmm_sequence); 9990 9991 bind(L_fill_192_bytes_loop_header_zmm); 9992 subq(count, 192 >> shift); 9993 9994 align32(); 9995 bind(L_fill_192_bytes_loop_zmm); 9996 fill64(to, 0, xtmp, true); 9997 fill64(to, 64, xtmp, true); 9998 fill64(to, 128, xtmp, true); 9999 addq(to, 192); 10000 subq(count, 192 >> shift); 10001 jccb(Assembler::greaterEqual, L_fill_192_bytes_loop_zmm); 10002 10003 addq(count, 192 >> shift); 10004 jcc(Assembler::zero, L_exit); 10005 jmp(L_fill_start_zmm_sequence); 10006 } 10007 bind(L_exit); 10008 } 10009 #endif 10010 #endif //COMPILER2_OR_JVMCI 10011 10012 10013 #ifdef _LP64 10014 void MacroAssembler::convert_f2i(Register dst, XMMRegister src) { 10015 Label done; 10016 cvttss2sil(dst, src); 10017 // Conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub 10018 cmpl(dst, 0x80000000); // float_sign_flip 10019 jccb(Assembler::notEqual, done); 10020 subptr(rsp, 8); 10021 movflt(Address(rsp, 0), src); 10022 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::f2i_fixup()))); 10023 pop(dst); 10024 bind(done); 10025 } 10026 10027 void MacroAssembler::convert_d2i(Register dst, XMMRegister src) { 10028 Label done; 10029 cvttsd2sil(dst, src); 10030 // Conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub 10031 cmpl(dst, 0x80000000); // float_sign_flip 10032 jccb(Assembler::notEqual, done); 10033 subptr(rsp, 8); 10034 movdbl(Address(rsp, 0), src); 10035 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::d2i_fixup()))); 10036 pop(dst); 10037 bind(done); 10038 } 10039 10040 void MacroAssembler::convert_f2l(Register dst, XMMRegister src) { 10041 Label done; 10042 cvttss2siq(dst, src); 10043 cmp64(dst, ExternalAddress((address) StubRoutines::x86::double_sign_flip())); 10044 jccb(Assembler::notEqual, done); 10045 subptr(rsp, 8); 10046 movflt(Address(rsp, 0), src); 10047 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::f2l_fixup()))); 10048 pop(dst); 10049 bind(done); 10050 } 10051 10052 void MacroAssembler::round_float(Register dst, XMMRegister src, Register rtmp, Register rcx) { 10053 // Following code is line by line assembly translation rounding algorithm. 10054 // Please refer to java.lang.Math.round(float) algorithm for details. 10055 const int32_t FloatConsts_EXP_BIT_MASK = 0x7F800000; 10056 const int32_t FloatConsts_SIGNIFICAND_WIDTH = 24; 10057 const int32_t FloatConsts_EXP_BIAS = 127; 10058 const int32_t FloatConsts_SIGNIF_BIT_MASK = 0x007FFFFF; 10059 const int32_t MINUS_32 = 0xFFFFFFE0; 10060 Label L_special_case, L_block1, L_exit; 10061 movl(rtmp, FloatConsts_EXP_BIT_MASK); 10062 movdl(dst, src); 10063 andl(dst, rtmp); 10064 sarl(dst, FloatConsts_SIGNIFICAND_WIDTH - 1); 10065 movl(rtmp, FloatConsts_SIGNIFICAND_WIDTH - 2 + FloatConsts_EXP_BIAS); 10066 subl(rtmp, dst); 10067 movl(rcx, rtmp); 10068 movl(dst, MINUS_32); 10069 testl(rtmp, dst); 10070 jccb(Assembler::notEqual, L_special_case); 10071 movdl(dst, src); 10072 andl(dst, FloatConsts_SIGNIF_BIT_MASK); 10073 orl(dst, FloatConsts_SIGNIF_BIT_MASK + 1); 10074 movdl(rtmp, src); 10075 testl(rtmp, rtmp); 10076 jccb(Assembler::greaterEqual, L_block1); 10077 negl(dst); 10078 bind(L_block1); 10079 sarl(dst); 10080 addl(dst, 0x1); 10081 sarl(dst, 0x1); 10082 jmp(L_exit); 10083 bind(L_special_case); 10084 convert_f2i(dst, src); 10085 bind(L_exit); 10086 } 10087 10088 void MacroAssembler::round_double(Register dst, XMMRegister src, Register rtmp, Register rcx) { 10089 // Following code is line by line assembly translation rounding algorithm. 10090 // Please refer to java.lang.Math.round(double) algorithm for details. 10091 const int64_t DoubleConsts_EXP_BIT_MASK = 0x7FF0000000000000L; 10092 const int64_t DoubleConsts_SIGNIFICAND_WIDTH = 53; 10093 const int64_t DoubleConsts_EXP_BIAS = 1023; 10094 const int64_t DoubleConsts_SIGNIF_BIT_MASK = 0x000FFFFFFFFFFFFFL; 10095 const int64_t MINUS_64 = 0xFFFFFFFFFFFFFFC0L; 10096 Label L_special_case, L_block1, L_exit; 10097 mov64(rtmp, DoubleConsts_EXP_BIT_MASK); 10098 movq(dst, src); 10099 andq(dst, rtmp); 10100 sarq(dst, DoubleConsts_SIGNIFICAND_WIDTH - 1); 10101 mov64(rtmp, DoubleConsts_SIGNIFICAND_WIDTH - 2 + DoubleConsts_EXP_BIAS); 10102 subq(rtmp, dst); 10103 movq(rcx, rtmp); 10104 mov64(dst, MINUS_64); 10105 testq(rtmp, dst); 10106 jccb(Assembler::notEqual, L_special_case); 10107 movq(dst, src); 10108 mov64(rtmp, DoubleConsts_SIGNIF_BIT_MASK); 10109 andq(dst, rtmp); 10110 mov64(rtmp, DoubleConsts_SIGNIF_BIT_MASK + 1); 10111 orq(dst, rtmp); 10112 movq(rtmp, src); 10113 testq(rtmp, rtmp); 10114 jccb(Assembler::greaterEqual, L_block1); 10115 negq(dst); 10116 bind(L_block1); 10117 sarq(dst); 10118 addq(dst, 0x1); 10119 sarq(dst, 0x1); 10120 jmp(L_exit); 10121 bind(L_special_case); 10122 convert_d2l(dst, src); 10123 bind(L_exit); 10124 } 10125 10126 void MacroAssembler::convert_d2l(Register dst, XMMRegister src) { 10127 Label done; 10128 cvttsd2siq(dst, src); 10129 cmp64(dst, ExternalAddress((address) StubRoutines::x86::double_sign_flip())); 10130 jccb(Assembler::notEqual, done); 10131 subptr(rsp, 8); 10132 movdbl(Address(rsp, 0), src); 10133 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::d2l_fixup()))); 10134 pop(dst); 10135 bind(done); 10136 } 10137 10138 void MacroAssembler::cache_wb(Address line) 10139 { 10140 // 64 bit cpus always support clflush 10141 assert(VM_Version::supports_clflush(), "clflush should be available"); 10142 bool optimized = VM_Version::supports_clflushopt(); 10143 bool no_evict = VM_Version::supports_clwb(); 10144 10145 // prefer clwb (writeback without evict) otherwise 10146 // prefer clflushopt (potentially parallel writeback with evict) 10147 // otherwise fallback on clflush (serial writeback with evict) 10148 10149 if (optimized) { 10150 if (no_evict) { 10151 clwb(line); 10152 } else { 10153 clflushopt(line); 10154 } 10155 } else { 10156 // no need for fence when using CLFLUSH 10157 clflush(line); 10158 } 10159 } 10160 10161 void MacroAssembler::cache_wbsync(bool is_pre) 10162 { 10163 assert(VM_Version::supports_clflush(), "clflush should be available"); 10164 bool optimized = VM_Version::supports_clflushopt(); 10165 bool no_evict = VM_Version::supports_clwb(); 10166 10167 // pick the correct implementation 10168 10169 if (!is_pre && (optimized || no_evict)) { 10170 // need an sfence for post flush when using clflushopt or clwb 10171 // otherwise no no need for any synchroniaztion 10172 10173 sfence(); 10174 } 10175 } 10176 10177 #endif // _LP64 10178 10179 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) { 10180 switch (cond) { 10181 // Note some conditions are synonyms for others 10182 case Assembler::zero: return Assembler::notZero; 10183 case Assembler::notZero: return Assembler::zero; 10184 case Assembler::less: return Assembler::greaterEqual; 10185 case Assembler::lessEqual: return Assembler::greater; 10186 case Assembler::greater: return Assembler::lessEqual; 10187 case Assembler::greaterEqual: return Assembler::less; 10188 case Assembler::below: return Assembler::aboveEqual; 10189 case Assembler::belowEqual: return Assembler::above; 10190 case Assembler::above: return Assembler::belowEqual; 10191 case Assembler::aboveEqual: return Assembler::below; 10192 case Assembler::overflow: return Assembler::noOverflow; 10193 case Assembler::noOverflow: return Assembler::overflow; 10194 case Assembler::negative: return Assembler::positive; 10195 case Assembler::positive: return Assembler::negative; 10196 case Assembler::parity: return Assembler::noParity; 10197 case Assembler::noParity: return Assembler::parity; 10198 } 10199 ShouldNotReachHere(); return Assembler::overflow; 10200 } 10201 10202 SkipIfEqual::SkipIfEqual( 10203 MacroAssembler* masm, const bool* flag_addr, bool value, Register rscratch) { 10204 _masm = masm; 10205 _masm->cmp8(ExternalAddress((address)flag_addr), value, rscratch); 10206 _masm->jcc(Assembler::equal, _label); 10207 } 10208 10209 SkipIfEqual::~SkipIfEqual() { 10210 _masm->bind(_label); 10211 } 10212 10213 // 32-bit Windows has its own fast-path implementation 10214 // of get_thread 10215 #if !defined(WIN32) || defined(_LP64) 10216 10217 // This is simply a call to Thread::current() 10218 void MacroAssembler::get_thread(Register thread) { 10219 if (thread != rax) { 10220 push(rax); 10221 } 10222 LP64_ONLY(push(rdi);) 10223 LP64_ONLY(push(rsi);) 10224 push(rdx); 10225 push(rcx); 10226 #ifdef _LP64 10227 push(r8); 10228 push(r9); 10229 push(r10); 10230 push(r11); 10231 #endif 10232 10233 MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, Thread::current), 0); 10234 10235 #ifdef _LP64 10236 pop(r11); 10237 pop(r10); 10238 pop(r9); 10239 pop(r8); 10240 #endif 10241 pop(rcx); 10242 pop(rdx); 10243 LP64_ONLY(pop(rsi);) 10244 LP64_ONLY(pop(rdi);) 10245 if (thread != rax) { 10246 mov(thread, rax); 10247 pop(rax); 10248 } 10249 } 10250 10251 10252 #endif // !WIN32 || _LP64 10253 10254 void MacroAssembler::check_stack_alignment(Register sp, const char* msg, unsigned bias, Register tmp) { 10255 Label L_stack_ok; 10256 if (bias == 0) { 10257 testptr(sp, 2 * wordSize - 1); 10258 } else { 10259 // lea(tmp, Address(rsp, bias); 10260 mov(tmp, sp); 10261 addptr(tmp, bias); 10262 testptr(tmp, 2 * wordSize - 1); 10263 } 10264 jcc(Assembler::equal, L_stack_ok); 10265 block_comment(msg); 10266 stop(msg); 10267 bind(L_stack_ok); 10268 } 10269 10270 // Implements lightweight-locking. 10271 // 10272 // obj: the object to be locked 10273 // reg_rax: rax 10274 // thread: the thread which attempts to lock obj 10275 // tmp: a temporary register 10276 void MacroAssembler::lightweight_lock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow) { 10277 assert(reg_rax == rax, ""); 10278 assert_different_registers(obj, reg_rax, thread, tmp); 10279 10280 Label push; 10281 const Register top = tmp; 10282 10283 // Preload the markWord. It is important that this is the first 10284 // instruction emitted as it is part of C1's null check semantics. 10285 movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes())); 10286 10287 // Load top. 10288 movl(top, Address(thread, JavaThread::lock_stack_top_offset())); 10289 10290 // Check if the lock-stack is full. 10291 cmpl(top, LockStack::end_offset()); 10292 jcc(Assembler::greaterEqual, slow); 10293 10294 // Check for recursion. 10295 cmpptr(obj, Address(thread, top, Address::times_1, -oopSize)); 10296 jcc(Assembler::equal, push); 10297 10298 // Check header for monitor (0b10). 10299 testptr(reg_rax, markWord::monitor_value); 10300 jcc(Assembler::notZero, slow); 10301 10302 // Try to lock. Transition lock bits 0b01 => 0b00 10303 movptr(tmp, reg_rax); 10304 andptr(tmp, ~(int32_t)markWord::unlocked_value); 10305 orptr(reg_rax, markWord::unlocked_value); 10306 lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes())); 10307 jcc(Assembler::notEqual, slow); 10308 10309 // Restore top, CAS clobbers register. 10310 movl(top, Address(thread, JavaThread::lock_stack_top_offset())); 10311 10312 bind(push); 10313 // After successful lock, push object on lock-stack. 10314 movptr(Address(thread, top), obj); 10315 incrementl(top, oopSize); 10316 movl(Address(thread, JavaThread::lock_stack_top_offset()), top); 10317 } 10318 10319 // Implements lightweight-unlocking. 10320 // 10321 // obj: the object to be unlocked 10322 // reg_rax: rax 10323 // thread: the thread 10324 // tmp: a temporary register 10325 // 10326 // x86_32 Note: reg_rax and thread may alias each other due to limited register 10327 // availiability. 10328 void MacroAssembler::lightweight_unlock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow) { 10329 assert(reg_rax == rax, ""); 10330 assert_different_registers(obj, reg_rax, tmp); 10331 LP64_ONLY(assert_different_registers(obj, reg_rax, thread, tmp);) 10332 10333 Label unlocked, push_and_slow; 10334 const Register top = tmp; 10335 10336 // Check if obj is top of lock-stack. 10337 movl(top, Address(thread, JavaThread::lock_stack_top_offset())); 10338 cmpptr(obj, Address(thread, top, Address::times_1, -oopSize)); 10339 jcc(Assembler::notEqual, slow); 10340 10341 // Pop lock-stack. 10342 DEBUG_ONLY(movptr(Address(thread, top, Address::times_1, -oopSize), 0);) 10343 subl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize); 10344 10345 // Check if recursive. 10346 cmpptr(obj, Address(thread, top, Address::times_1, -2 * oopSize)); 10347 jcc(Assembler::equal, unlocked); 10348 10349 // Not recursive. Check header for monitor (0b10). 10350 movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes())); 10351 testptr(reg_rax, markWord::monitor_value); 10352 jcc(Assembler::notZero, push_and_slow); 10353 10354 #ifdef ASSERT 10355 // Check header not unlocked (0b01). 10356 Label not_unlocked; 10357 testptr(reg_rax, markWord::unlocked_value); 10358 jcc(Assembler::zero, not_unlocked); 10359 stop("lightweight_unlock already unlocked"); 10360 bind(not_unlocked); 10361 #endif 10362 10363 // Try to unlock. Transition lock bits 0b00 => 0b01 10364 movptr(tmp, reg_rax); 10365 orptr(tmp, markWord::unlocked_value); 10366 lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes())); 10367 jcc(Assembler::equal, unlocked); 10368 10369 bind(push_and_slow); 10370 // Restore lock-stack and handle the unlock in runtime. 10371 if (thread == reg_rax) { 10372 // On x86_32 we may lose the thread. 10373 get_thread(thread); 10374 } 10375 #ifdef ASSERT 10376 movl(top, Address(thread, JavaThread::lock_stack_top_offset())); 10377 movptr(Address(thread, top), obj); 10378 #endif 10379 addl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize); 10380 jmp(slow); 10381 10382 bind(unlocked); 10383 }