1 /* 2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "asm/assembler.inline.hpp" 28 #include "code/compiledIC.hpp" 29 #include "compiler/compiler_globals.hpp" 30 #include "compiler/disassembler.hpp" 31 #include "ci/ciInlineKlass.hpp" 32 #include "crc32c.h" 33 #include "gc/shared/barrierSet.hpp" 34 #include "gc/shared/barrierSetAssembler.hpp" 35 #include "gc/shared/collectedHeap.inline.hpp" 36 #include "gc/shared/tlab_globals.hpp" 37 #include "interpreter/bytecodeHistogram.hpp" 38 #include "interpreter/interpreter.hpp" 39 #include "jvm.h" 40 #include "memory/resourceArea.hpp" 41 #include "memory/universe.hpp" 42 #include "oops/accessDecorators.hpp" 43 #include "oops/compressedKlass.inline.hpp" 44 #include "oops/compressedOops.inline.hpp" 45 #include "oops/klass.inline.hpp" 46 #include "oops/resolvedFieldEntry.hpp" 47 #include "prims/methodHandles.hpp" 48 #include "runtime/continuation.hpp" 49 #include "runtime/interfaceSupport.inline.hpp" 50 #include "runtime/javaThread.hpp" 51 #include "runtime/jniHandles.hpp" 52 #include "runtime/objectMonitor.hpp" 53 #include "runtime/os.hpp" 54 #include "runtime/safepoint.hpp" 55 #include "runtime/safepointMechanism.hpp" 56 #include "runtime/sharedRuntime.hpp" 57 #include "runtime/signature_cc.hpp" 58 #include "runtime/stubRoutines.hpp" 59 #include "utilities/checkedCast.hpp" 60 #include "utilities/macros.hpp" 61 #include "vmreg_x86.inline.hpp" 62 #ifdef COMPILER2 63 #include "opto/output.hpp" 64 #endif 65 66 #ifdef PRODUCT 67 #define BLOCK_COMMENT(str) /* nothing */ 68 #define STOP(error) stop(error) 69 #else 70 #define BLOCK_COMMENT(str) block_comment(str) 71 #define STOP(error) block_comment(error); stop(error) 72 #endif 73 74 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 75 76 #ifdef ASSERT 77 bool AbstractAssembler::pd_check_instruction_mark() { return true; } 78 #endif 79 80 static const Assembler::Condition reverse[] = { 81 Assembler::noOverflow /* overflow = 0x0 */ , 82 Assembler::overflow /* noOverflow = 0x1 */ , 83 Assembler::aboveEqual /* carrySet = 0x2, below = 0x2 */ , 84 Assembler::below /* aboveEqual = 0x3, carryClear = 0x3 */ , 85 Assembler::notZero /* zero = 0x4, equal = 0x4 */ , 86 Assembler::zero /* notZero = 0x5, notEqual = 0x5 */ , 87 Assembler::above /* belowEqual = 0x6 */ , 88 Assembler::belowEqual /* above = 0x7 */ , 89 Assembler::positive /* negative = 0x8 */ , 90 Assembler::negative /* positive = 0x9 */ , 91 Assembler::noParity /* parity = 0xa */ , 92 Assembler::parity /* noParity = 0xb */ , 93 Assembler::greaterEqual /* less = 0xc */ , 94 Assembler::less /* greaterEqual = 0xd */ , 95 Assembler::greater /* lessEqual = 0xe */ , 96 Assembler::lessEqual /* greater = 0xf, */ 97 98 }; 99 100 101 // Implementation of MacroAssembler 102 103 // First all the versions that have distinct versions depending on 32/64 bit 104 // Unless the difference is trivial (1 line or so). 105 106 #ifndef _LP64 107 108 // 32bit versions 109 110 Address MacroAssembler::as_Address(AddressLiteral adr) { 111 return Address(adr.target(), adr.rspec()); 112 } 113 114 Address MacroAssembler::as_Address(ArrayAddress adr, Register rscratch) { 115 assert(rscratch == noreg, ""); 116 return Address::make_array(adr); 117 } 118 119 void MacroAssembler::call_VM_leaf_base(address entry_point, 120 int number_of_arguments) { 121 call(RuntimeAddress(entry_point)); 122 increment(rsp, number_of_arguments * wordSize); 123 } 124 125 void MacroAssembler::cmpklass(Address src1, Metadata* obj) { 126 cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate()); 127 } 128 129 130 void MacroAssembler::cmpklass(Register src1, Metadata* obj) { 131 cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate()); 132 } 133 134 void MacroAssembler::cmpoop(Address src1, jobject obj) { 135 cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate()); 136 } 137 138 void MacroAssembler::cmpoop(Register src1, jobject obj, Register rscratch) { 139 assert(rscratch == noreg, "redundant"); 140 cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate()); 141 } 142 143 void MacroAssembler::extend_sign(Register hi, Register lo) { 144 // According to Intel Doc. AP-526, "Integer Divide", p.18. 145 if (VM_Version::is_P6() && hi == rdx && lo == rax) { 146 cdql(); 147 } else { 148 movl(hi, lo); 149 sarl(hi, 31); 150 } 151 } 152 153 void MacroAssembler::jC2(Register tmp, Label& L) { 154 // set parity bit if FPU flag C2 is set (via rax) 155 save_rax(tmp); 156 fwait(); fnstsw_ax(); 157 sahf(); 158 restore_rax(tmp); 159 // branch 160 jcc(Assembler::parity, L); 161 } 162 163 void MacroAssembler::jnC2(Register tmp, Label& L) { 164 // set parity bit if FPU flag C2 is set (via rax) 165 save_rax(tmp); 166 fwait(); fnstsw_ax(); 167 sahf(); 168 restore_rax(tmp); 169 // branch 170 jcc(Assembler::noParity, L); 171 } 172 173 // 32bit can do a case table jump in one instruction but we no longer allow the base 174 // to be installed in the Address class 175 void MacroAssembler::jump(ArrayAddress entry, Register rscratch) { 176 assert(rscratch == noreg, "not needed"); 177 jmp(as_Address(entry, noreg)); 178 } 179 180 // Note: y_lo will be destroyed 181 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) { 182 // Long compare for Java (semantics as described in JVM spec.) 183 Label high, low, done; 184 185 cmpl(x_hi, y_hi); 186 jcc(Assembler::less, low); 187 jcc(Assembler::greater, high); 188 // x_hi is the return register 189 xorl(x_hi, x_hi); 190 cmpl(x_lo, y_lo); 191 jcc(Assembler::below, low); 192 jcc(Assembler::equal, done); 193 194 bind(high); 195 xorl(x_hi, x_hi); 196 increment(x_hi); 197 jmp(done); 198 199 bind(low); 200 xorl(x_hi, x_hi); 201 decrementl(x_hi); 202 203 bind(done); 204 } 205 206 void MacroAssembler::lea(Register dst, AddressLiteral src) { 207 mov_literal32(dst, (int32_t)src.target(), src.rspec()); 208 } 209 210 void MacroAssembler::lea(Address dst, AddressLiteral adr, Register rscratch) { 211 assert(rscratch == noreg, "not needed"); 212 213 // leal(dst, as_Address(adr)); 214 // see note in movl as to why we must use a move 215 mov_literal32(dst, (int32_t)adr.target(), adr.rspec()); 216 } 217 218 void MacroAssembler::leave() { 219 mov(rsp, rbp); 220 pop(rbp); 221 } 222 223 void MacroAssembler::lmul(int x_rsp_offset, int y_rsp_offset) { 224 // Multiplication of two Java long values stored on the stack 225 // as illustrated below. Result is in rdx:rax. 226 // 227 // rsp ---> [ ?? ] \ \ 228 // .... | y_rsp_offset | 229 // [ y_lo ] / (in bytes) | x_rsp_offset 230 // [ y_hi ] | (in bytes) 231 // .... | 232 // [ x_lo ] / 233 // [ x_hi ] 234 // .... 235 // 236 // Basic idea: lo(result) = lo(x_lo * y_lo) 237 // hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) + lo(x_lo * y_hi) 238 Address x_hi(rsp, x_rsp_offset + wordSize); Address x_lo(rsp, x_rsp_offset); 239 Address y_hi(rsp, y_rsp_offset + wordSize); Address y_lo(rsp, y_rsp_offset); 240 Label quick; 241 // load x_hi, y_hi and check if quick 242 // multiplication is possible 243 movl(rbx, x_hi); 244 movl(rcx, y_hi); 245 movl(rax, rbx); 246 orl(rbx, rcx); // rbx, = 0 <=> x_hi = 0 and y_hi = 0 247 jcc(Assembler::zero, quick); // if rbx, = 0 do quick multiply 248 // do full multiplication 249 // 1st step 250 mull(y_lo); // x_hi * y_lo 251 movl(rbx, rax); // save lo(x_hi * y_lo) in rbx, 252 // 2nd step 253 movl(rax, x_lo); 254 mull(rcx); // x_lo * y_hi 255 addl(rbx, rax); // add lo(x_lo * y_hi) to rbx, 256 // 3rd step 257 bind(quick); // note: rbx, = 0 if quick multiply! 258 movl(rax, x_lo); 259 mull(y_lo); // x_lo * y_lo 260 addl(rdx, rbx); // correct hi(x_lo * y_lo) 261 } 262 263 void MacroAssembler::lneg(Register hi, Register lo) { 264 negl(lo); 265 adcl(hi, 0); 266 negl(hi); 267 } 268 269 void MacroAssembler::lshl(Register hi, Register lo) { 270 // Java shift left long support (semantics as described in JVM spec., p.305) 271 // (basic idea for shift counts s >= n: x << s == (x << n) << (s - n)) 272 // shift value is in rcx ! 273 assert(hi != rcx, "must not use rcx"); 274 assert(lo != rcx, "must not use rcx"); 275 const Register s = rcx; // shift count 276 const int n = BitsPerWord; 277 Label L; 278 andl(s, 0x3f); // s := s & 0x3f (s < 0x40) 279 cmpl(s, n); // if (s < n) 280 jcc(Assembler::less, L); // else (s >= n) 281 movl(hi, lo); // x := x << n 282 xorl(lo, lo); 283 // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n! 284 bind(L); // s (mod n) < n 285 shldl(hi, lo); // x := x << s 286 shll(lo); 287 } 288 289 290 void MacroAssembler::lshr(Register hi, Register lo, bool sign_extension) { 291 // Java shift right long support (semantics as described in JVM spec., p.306 & p.310) 292 // (basic idea for shift counts s >= n: x >> s == (x >> n) >> (s - n)) 293 assert(hi != rcx, "must not use rcx"); 294 assert(lo != rcx, "must not use rcx"); 295 const Register s = rcx; // shift count 296 const int n = BitsPerWord; 297 Label L; 298 andl(s, 0x3f); // s := s & 0x3f (s < 0x40) 299 cmpl(s, n); // if (s < n) 300 jcc(Assembler::less, L); // else (s >= n) 301 movl(lo, hi); // x := x >> n 302 if (sign_extension) sarl(hi, 31); 303 else xorl(hi, hi); 304 // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n! 305 bind(L); // s (mod n) < n 306 shrdl(lo, hi); // x := x >> s 307 if (sign_extension) sarl(hi); 308 else shrl(hi); 309 } 310 311 void MacroAssembler::movoop(Register dst, jobject obj) { 312 mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate()); 313 } 314 315 void MacroAssembler::movoop(Address dst, jobject obj, Register rscratch) { 316 assert(rscratch == noreg, "redundant"); 317 mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate()); 318 } 319 320 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { 321 mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate()); 322 } 323 324 void MacroAssembler::mov_metadata(Address dst, Metadata* obj, Register rscratch) { 325 assert(rscratch == noreg, "redundant"); 326 mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate()); 327 } 328 329 void MacroAssembler::movptr(Register dst, AddressLiteral src) { 330 if (src.is_lval()) { 331 mov_literal32(dst, (intptr_t)src.target(), src.rspec()); 332 } else { 333 movl(dst, as_Address(src)); 334 } 335 } 336 337 void MacroAssembler::movptr(ArrayAddress dst, Register src, Register rscratch) { 338 assert(rscratch == noreg, "redundant"); 339 movl(as_Address(dst, noreg), src); 340 } 341 342 void MacroAssembler::movptr(Register dst, ArrayAddress src) { 343 movl(dst, as_Address(src, noreg)); 344 } 345 346 void MacroAssembler::movptr(Address dst, intptr_t src, Register rscratch) { 347 assert(rscratch == noreg, "redundant"); 348 movl(dst, src); 349 } 350 351 void MacroAssembler::pushoop(jobject obj, Register rscratch) { 352 assert(rscratch == noreg, "redundant"); 353 push_literal32((int32_t)obj, oop_Relocation::spec_for_immediate()); 354 } 355 356 void MacroAssembler::pushklass(Metadata* obj, Register rscratch) { 357 assert(rscratch == noreg, "redundant"); 358 push_literal32((int32_t)obj, metadata_Relocation::spec_for_immediate()); 359 } 360 361 void MacroAssembler::pushptr(AddressLiteral src, Register rscratch) { 362 assert(rscratch == noreg, "redundant"); 363 if (src.is_lval()) { 364 push_literal32((int32_t)src.target(), src.rspec()); 365 } else { 366 pushl(as_Address(src)); 367 } 368 } 369 370 static void pass_arg0(MacroAssembler* masm, Register arg) { 371 masm->push(arg); 372 } 373 374 static void pass_arg1(MacroAssembler* masm, Register arg) { 375 masm->push(arg); 376 } 377 378 static void pass_arg2(MacroAssembler* masm, Register arg) { 379 masm->push(arg); 380 } 381 382 static void pass_arg3(MacroAssembler* masm, Register arg) { 383 masm->push(arg); 384 } 385 386 #ifndef PRODUCT 387 extern "C" void findpc(intptr_t x); 388 #endif 389 390 void MacroAssembler::debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg) { 391 // In order to get locks to work, we need to fake a in_VM state 392 JavaThread* thread = JavaThread::current(); 393 JavaThreadState saved_state = thread->thread_state(); 394 thread->set_thread_state(_thread_in_vm); 395 if (ShowMessageBoxOnError) { 396 JavaThread* thread = JavaThread::current(); 397 JavaThreadState saved_state = thread->thread_state(); 398 thread->set_thread_state(_thread_in_vm); 399 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 400 ttyLocker ttyl; 401 BytecodeCounter::print(); 402 } 403 // To see where a verify_oop failed, get $ebx+40/X for this frame. 404 // This is the value of eip which points to where verify_oop will return. 405 if (os::message_box(msg, "Execution stopped, print registers?")) { 406 print_state32(rdi, rsi, rbp, rsp, rbx, rdx, rcx, rax, eip); 407 BREAKPOINT; 408 } 409 } 410 fatal("DEBUG MESSAGE: %s", msg); 411 } 412 413 void MacroAssembler::print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip) { 414 ttyLocker ttyl; 415 DebuggingContext debugging{}; 416 tty->print_cr("eip = 0x%08x", eip); 417 #ifndef PRODUCT 418 if ((WizardMode || Verbose) && PrintMiscellaneous) { 419 tty->cr(); 420 findpc(eip); 421 tty->cr(); 422 } 423 #endif 424 #define PRINT_REG(rax) \ 425 { tty->print("%s = ", #rax); os::print_location(tty, rax); } 426 PRINT_REG(rax); 427 PRINT_REG(rbx); 428 PRINT_REG(rcx); 429 PRINT_REG(rdx); 430 PRINT_REG(rdi); 431 PRINT_REG(rsi); 432 PRINT_REG(rbp); 433 PRINT_REG(rsp); 434 #undef PRINT_REG 435 // Print some words near top of staack. 436 int* dump_sp = (int*) rsp; 437 for (int col1 = 0; col1 < 8; col1++) { 438 tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 439 os::print_location(tty, *dump_sp++); 440 } 441 for (int row = 0; row < 16; row++) { 442 tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 443 for (int col = 0; col < 8; col++) { 444 tty->print(" 0x%08x", *dump_sp++); 445 } 446 tty->cr(); 447 } 448 // Print some instructions around pc: 449 Disassembler::decode((address)eip-64, (address)eip); 450 tty->print_cr("--------"); 451 Disassembler::decode((address)eip, (address)eip+32); 452 } 453 454 void MacroAssembler::stop(const char* msg) { 455 // push address of message 456 ExternalAddress message((address)msg); 457 pushptr(message.addr(), noreg); 458 { Label L; call(L, relocInfo::none); bind(L); } // push eip 459 pusha(); // push registers 460 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32))); 461 hlt(); 462 } 463 464 void MacroAssembler::warn(const char* msg) { 465 push_CPU_state(); 466 467 // push address of message 468 ExternalAddress message((address)msg); 469 pushptr(message.addr(), noreg); 470 471 call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning))); 472 addl(rsp, wordSize); // discard argument 473 pop_CPU_state(); 474 } 475 476 void MacroAssembler::print_state() { 477 { Label L; call(L, relocInfo::none); bind(L); } // push eip 478 pusha(); // push registers 479 480 push_CPU_state(); 481 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::print_state32))); 482 pop_CPU_state(); 483 484 popa(); 485 addl(rsp, wordSize); 486 } 487 488 #else // _LP64 489 490 // 64 bit versions 491 492 Address MacroAssembler::as_Address(AddressLiteral adr) { 493 // amd64 always does this as a pc-rel 494 // we can be absolute or disp based on the instruction type 495 // jmp/call are displacements others are absolute 496 assert(!adr.is_lval(), "must be rval"); 497 assert(reachable(adr), "must be"); 498 return Address(checked_cast<int32_t>(adr.target() - pc()), adr.target(), adr.reloc()); 499 500 } 501 502 Address MacroAssembler::as_Address(ArrayAddress adr, Register rscratch) { 503 AddressLiteral base = adr.base(); 504 lea(rscratch, base); 505 Address index = adr.index(); 506 assert(index._disp == 0, "must not have disp"); // maybe it can? 507 Address array(rscratch, index._index, index._scale, index._disp); 508 return array; 509 } 510 511 void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) { 512 Label L, E; 513 514 #ifdef _WIN64 515 // Windows always allocates space for it's register args 516 assert(num_args <= 4, "only register arguments supported"); 517 subq(rsp, frame::arg_reg_save_area_bytes); 518 #endif 519 520 // Align stack if necessary 521 testl(rsp, 15); 522 jcc(Assembler::zero, L); 523 524 subq(rsp, 8); 525 call(RuntimeAddress(entry_point)); 526 addq(rsp, 8); 527 jmp(E); 528 529 bind(L); 530 call(RuntimeAddress(entry_point)); 531 532 bind(E); 533 534 #ifdef _WIN64 535 // restore stack pointer 536 addq(rsp, frame::arg_reg_save_area_bytes); 537 #endif 538 539 } 540 541 void MacroAssembler::cmp64(Register src1, AddressLiteral src2, Register rscratch) { 542 assert(!src2.is_lval(), "should use cmpptr"); 543 assert(rscratch != noreg || always_reachable(src2), "missing"); 544 545 if (reachable(src2)) { 546 cmpq(src1, as_Address(src2)); 547 } else { 548 lea(rscratch, src2); 549 Assembler::cmpq(src1, Address(rscratch, 0)); 550 } 551 } 552 553 int MacroAssembler::corrected_idivq(Register reg) { 554 // Full implementation of Java ldiv and lrem; checks for special 555 // case as described in JVM spec., p.243 & p.271. The function 556 // returns the (pc) offset of the idivl instruction - may be needed 557 // for implicit exceptions. 558 // 559 // normal case special case 560 // 561 // input : rax: dividend min_long 562 // reg: divisor (may not be eax/edx) -1 563 // 564 // output: rax: quotient (= rax idiv reg) min_long 565 // rdx: remainder (= rax irem reg) 0 566 assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register"); 567 static const int64_t min_long = 0x8000000000000000; 568 Label normal_case, special_case; 569 570 // check for special case 571 cmp64(rax, ExternalAddress((address) &min_long), rdx /*rscratch*/); 572 jcc(Assembler::notEqual, normal_case); 573 xorl(rdx, rdx); // prepare rdx for possible special case (where 574 // remainder = 0) 575 cmpq(reg, -1); 576 jcc(Assembler::equal, special_case); 577 578 // handle normal case 579 bind(normal_case); 580 cdqq(); 581 int idivq_offset = offset(); 582 idivq(reg); 583 584 // normal and special case exit 585 bind(special_case); 586 587 return idivq_offset; 588 } 589 590 void MacroAssembler::decrementq(Register reg, int value) { 591 if (value == min_jint) { subq(reg, value); return; } 592 if (value < 0) { incrementq(reg, -value); return; } 593 if (value == 0) { ; return; } 594 if (value == 1 && UseIncDec) { decq(reg) ; return; } 595 /* else */ { subq(reg, value) ; return; } 596 } 597 598 void MacroAssembler::decrementq(Address dst, int value) { 599 if (value == min_jint) { subq(dst, value); return; } 600 if (value < 0) { incrementq(dst, -value); return; } 601 if (value == 0) { ; return; } 602 if (value == 1 && UseIncDec) { decq(dst) ; return; } 603 /* else */ { subq(dst, value) ; return; } 604 } 605 606 void MacroAssembler::incrementq(AddressLiteral dst, Register rscratch) { 607 assert(rscratch != noreg || always_reachable(dst), "missing"); 608 609 if (reachable(dst)) { 610 incrementq(as_Address(dst)); 611 } else { 612 lea(rscratch, dst); 613 incrementq(Address(rscratch, 0)); 614 } 615 } 616 617 void MacroAssembler::incrementq(Register reg, int value) { 618 if (value == min_jint) { addq(reg, value); return; } 619 if (value < 0) { decrementq(reg, -value); return; } 620 if (value == 0) { ; return; } 621 if (value == 1 && UseIncDec) { incq(reg) ; return; } 622 /* else */ { addq(reg, value) ; return; } 623 } 624 625 void MacroAssembler::incrementq(Address dst, int value) { 626 if (value == min_jint) { addq(dst, value); return; } 627 if (value < 0) { decrementq(dst, -value); return; } 628 if (value == 0) { ; return; } 629 if (value == 1 && UseIncDec) { incq(dst) ; return; } 630 /* else */ { addq(dst, value) ; return; } 631 } 632 633 // 32bit can do a case table jump in one instruction but we no longer allow the base 634 // to be installed in the Address class 635 void MacroAssembler::jump(ArrayAddress entry, Register rscratch) { 636 lea(rscratch, entry.base()); 637 Address dispatch = entry.index(); 638 assert(dispatch._base == noreg, "must be"); 639 dispatch._base = rscratch; 640 jmp(dispatch); 641 } 642 643 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) { 644 ShouldNotReachHere(); // 64bit doesn't use two regs 645 cmpq(x_lo, y_lo); 646 } 647 648 void MacroAssembler::lea(Register dst, AddressLiteral src) { 649 mov_literal64(dst, (intptr_t)src.target(), src.rspec()); 650 } 651 652 void MacroAssembler::lea(Address dst, AddressLiteral adr, Register rscratch) { 653 lea(rscratch, adr); 654 movptr(dst, rscratch); 655 } 656 657 void MacroAssembler::leave() { 658 // %%% is this really better? Why not on 32bit too? 659 emit_int8((unsigned char)0xC9); // LEAVE 660 } 661 662 void MacroAssembler::lneg(Register hi, Register lo) { 663 ShouldNotReachHere(); // 64bit doesn't use two regs 664 negq(lo); 665 } 666 667 void MacroAssembler::movoop(Register dst, jobject obj) { 668 mov_literal64(dst, (intptr_t)obj, oop_Relocation::spec_for_immediate()); 669 } 670 671 void MacroAssembler::movoop(Address dst, jobject obj, Register rscratch) { 672 mov_literal64(rscratch, (intptr_t)obj, oop_Relocation::spec_for_immediate()); 673 movq(dst, rscratch); 674 } 675 676 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { 677 mov_literal64(dst, (intptr_t)obj, metadata_Relocation::spec_for_immediate()); 678 } 679 680 void MacroAssembler::mov_metadata(Address dst, Metadata* obj, Register rscratch) { 681 mov_literal64(rscratch, (intptr_t)obj, metadata_Relocation::spec_for_immediate()); 682 movq(dst, rscratch); 683 } 684 685 void MacroAssembler::movptr(Register dst, AddressLiteral src) { 686 if (src.is_lval()) { 687 mov_literal64(dst, (intptr_t)src.target(), src.rspec()); 688 } else { 689 if (reachable(src)) { 690 movq(dst, as_Address(src)); 691 } else { 692 lea(dst, src); 693 movq(dst, Address(dst, 0)); 694 } 695 } 696 } 697 698 void MacroAssembler::movptr(ArrayAddress dst, Register src, Register rscratch) { 699 movq(as_Address(dst, rscratch), src); 700 } 701 702 void MacroAssembler::movptr(Register dst, ArrayAddress src) { 703 movq(dst, as_Address(src, dst /*rscratch*/)); 704 } 705 706 // src should NEVER be a real pointer. Use AddressLiteral for true pointers 707 void MacroAssembler::movptr(Address dst, intptr_t src, Register rscratch) { 708 if (is_simm32(src)) { 709 movptr(dst, checked_cast<int32_t>(src)); 710 } else { 711 mov64(rscratch, src); 712 movq(dst, rscratch); 713 } 714 } 715 716 void MacroAssembler::pushoop(jobject obj, Register rscratch) { 717 movoop(rscratch, obj); 718 push(rscratch); 719 } 720 721 void MacroAssembler::pushklass(Metadata* obj, Register rscratch) { 722 mov_metadata(rscratch, obj); 723 push(rscratch); 724 } 725 726 void MacroAssembler::pushptr(AddressLiteral src, Register rscratch) { 727 lea(rscratch, src); 728 if (src.is_lval()) { 729 push(rscratch); 730 } else { 731 pushq(Address(rscratch, 0)); 732 } 733 } 734 735 void MacroAssembler::reset_last_Java_frame(bool clear_fp) { 736 reset_last_Java_frame(r15_thread, clear_fp); 737 } 738 739 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 740 Register last_java_fp, 741 address last_java_pc, 742 Register rscratch) { 743 set_last_Java_frame(r15_thread, last_java_sp, last_java_fp, last_java_pc, rscratch); 744 } 745 746 static void pass_arg0(MacroAssembler* masm, Register arg) { 747 if (c_rarg0 != arg ) { 748 masm->mov(c_rarg0, arg); 749 } 750 } 751 752 static void pass_arg1(MacroAssembler* masm, Register arg) { 753 if (c_rarg1 != arg ) { 754 masm->mov(c_rarg1, arg); 755 } 756 } 757 758 static void pass_arg2(MacroAssembler* masm, Register arg) { 759 if (c_rarg2 != arg ) { 760 masm->mov(c_rarg2, arg); 761 } 762 } 763 764 static void pass_arg3(MacroAssembler* masm, Register arg) { 765 if (c_rarg3 != arg ) { 766 masm->mov(c_rarg3, arg); 767 } 768 } 769 770 void MacroAssembler::stop(const char* msg) { 771 if (ShowMessageBoxOnError) { 772 address rip = pc(); 773 pusha(); // get regs on stack 774 lea(c_rarg1, InternalAddress(rip)); 775 movq(c_rarg2, rsp); // pass pointer to regs array 776 } 777 lea(c_rarg0, ExternalAddress((address) msg)); 778 andq(rsp, -16); // align stack as required by ABI 779 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); 780 hlt(); 781 } 782 783 void MacroAssembler::warn(const char* msg) { 784 push(rbp); 785 movq(rbp, rsp); 786 andq(rsp, -16); // align stack as required by push_CPU_state and call 787 push_CPU_state(); // keeps alignment at 16 bytes 788 789 lea(c_rarg0, ExternalAddress((address) msg)); 790 call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning))); 791 792 pop_CPU_state(); 793 mov(rsp, rbp); 794 pop(rbp); 795 } 796 797 void MacroAssembler::print_state() { 798 address rip = pc(); 799 pusha(); // get regs on stack 800 push(rbp); 801 movq(rbp, rsp); 802 andq(rsp, -16); // align stack as required by push_CPU_state and call 803 push_CPU_state(); // keeps alignment at 16 bytes 804 805 lea(c_rarg0, InternalAddress(rip)); 806 lea(c_rarg1, Address(rbp, wordSize)); // pass pointer to regs array 807 call_VM_leaf(CAST_FROM_FN_PTR(address, MacroAssembler::print_state64), c_rarg0, c_rarg1); 808 809 pop_CPU_state(); 810 mov(rsp, rbp); 811 pop(rbp); 812 popa(); 813 } 814 815 #ifndef PRODUCT 816 extern "C" void findpc(intptr_t x); 817 #endif 818 819 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) { 820 // In order to get locks to work, we need to fake a in_VM state 821 if (ShowMessageBoxOnError) { 822 JavaThread* thread = JavaThread::current(); 823 JavaThreadState saved_state = thread->thread_state(); 824 thread->set_thread_state(_thread_in_vm); 825 #ifndef PRODUCT 826 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 827 ttyLocker ttyl; 828 BytecodeCounter::print(); 829 } 830 #endif 831 // To see where a verify_oop failed, get $ebx+40/X for this frame. 832 // XXX correct this offset for amd64 833 // This is the value of eip which points to where verify_oop will return. 834 if (os::message_box(msg, "Execution stopped, print registers?")) { 835 print_state64(pc, regs); 836 BREAKPOINT; 837 } 838 } 839 fatal("DEBUG MESSAGE: %s", msg); 840 } 841 842 void MacroAssembler::print_state64(int64_t pc, int64_t regs[]) { 843 ttyLocker ttyl; 844 DebuggingContext debugging{}; 845 tty->print_cr("rip = 0x%016lx", (intptr_t)pc); 846 #ifndef PRODUCT 847 tty->cr(); 848 findpc(pc); 849 tty->cr(); 850 #endif 851 #define PRINT_REG(rax, value) \ 852 { tty->print("%s = ", #rax); os::print_location(tty, value); } 853 PRINT_REG(rax, regs[15]); 854 PRINT_REG(rbx, regs[12]); 855 PRINT_REG(rcx, regs[14]); 856 PRINT_REG(rdx, regs[13]); 857 PRINT_REG(rdi, regs[8]); 858 PRINT_REG(rsi, regs[9]); 859 PRINT_REG(rbp, regs[10]); 860 // rsp is actually not stored by pusha(), compute the old rsp from regs (rsp after pusha): regs + 16 = old rsp 861 PRINT_REG(rsp, (intptr_t)(®s[16])); 862 PRINT_REG(r8 , regs[7]); 863 PRINT_REG(r9 , regs[6]); 864 PRINT_REG(r10, regs[5]); 865 PRINT_REG(r11, regs[4]); 866 PRINT_REG(r12, regs[3]); 867 PRINT_REG(r13, regs[2]); 868 PRINT_REG(r14, regs[1]); 869 PRINT_REG(r15, regs[0]); 870 #undef PRINT_REG 871 // Print some words near the top of the stack. 872 int64_t* rsp = ®s[16]; 873 int64_t* dump_sp = rsp; 874 for (int col1 = 0; col1 < 8; col1++) { 875 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 876 os::print_location(tty, *dump_sp++); 877 } 878 for (int row = 0; row < 25; row++) { 879 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 880 for (int col = 0; col < 4; col++) { 881 tty->print(" 0x%016lx", (intptr_t)*dump_sp++); 882 } 883 tty->cr(); 884 } 885 // Print some instructions around pc: 886 Disassembler::decode((address)pc-64, (address)pc); 887 tty->print_cr("--------"); 888 Disassembler::decode((address)pc, (address)pc+32); 889 } 890 891 // The java_calling_convention describes stack locations as ideal slots on 892 // a frame with no abi restrictions. Since we must observe abi restrictions 893 // (like the placement of the register window) the slots must be biased by 894 // the following value. 895 static int reg2offset_in(VMReg r) { 896 // Account for saved rbp and return address 897 // This should really be in_preserve_stack_slots 898 return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size; 899 } 900 901 static int reg2offset_out(VMReg r) { 902 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 903 } 904 905 // A long move 906 void MacroAssembler::long_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 907 908 // The calling conventions assures us that each VMregpair is either 909 // all really one physical register or adjacent stack slots. 910 911 if (src.is_single_phys_reg() ) { 912 if (dst.is_single_phys_reg()) { 913 if (dst.first() != src.first()) { 914 mov(dst.first()->as_Register(), src.first()->as_Register()); 915 } 916 } else { 917 assert(dst.is_single_reg(), "not a stack pair: (%s, %s), (%s, %s)", 918 src.first()->name(), src.second()->name(), dst.first()->name(), dst.second()->name()); 919 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_Register()); 920 } 921 } else if (dst.is_single_phys_reg()) { 922 assert(src.is_single_reg(), "not a stack pair"); 923 movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 924 } else { 925 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs"); 926 movq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 927 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 928 } 929 } 930 931 // A double move 932 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 933 934 // The calling conventions assures us that each VMregpair is either 935 // all really one physical register or adjacent stack slots. 936 937 if (src.is_single_phys_reg() ) { 938 if (dst.is_single_phys_reg()) { 939 // In theory these overlap but the ordering is such that this is likely a nop 940 if ( src.first() != dst.first()) { 941 movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister()); 942 } 943 } else { 944 assert(dst.is_single_reg(), "not a stack pair"); 945 movdbl(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_XMMRegister()); 946 } 947 } else if (dst.is_single_phys_reg()) { 948 assert(src.is_single_reg(), "not a stack pair"); 949 movdbl(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 950 } else { 951 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs"); 952 movq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 953 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 954 } 955 } 956 957 958 // A float arg may have to do float reg int reg conversion 959 void MacroAssembler::float_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 960 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move"); 961 962 // The calling conventions assures us that each VMregpair is either 963 // all really one physical register or adjacent stack slots. 964 965 if (src.first()->is_stack()) { 966 if (dst.first()->is_stack()) { 967 movl(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 968 movptr(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 969 } else { 970 // stack to reg 971 assert(dst.first()->is_XMMRegister(), "only expect xmm registers as parameters"); 972 movflt(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 973 } 974 } else if (dst.first()->is_stack()) { 975 // reg to stack 976 assert(src.first()->is_XMMRegister(), "only expect xmm registers as parameters"); 977 movflt(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_XMMRegister()); 978 } else { 979 // reg to reg 980 // In theory these overlap but the ordering is such that this is likely a nop 981 if ( src.first() != dst.first()) { 982 movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister()); 983 } 984 } 985 } 986 987 // On 64 bit we will store integer like items to the stack as 988 // 64 bits items (x86_32/64 abi) even though java would only store 989 // 32bits for a parameter. On 32bit it will simply be 32 bits 990 // So this routine will do 32->32 on 32bit and 32->64 on 64bit 991 void MacroAssembler::move32_64(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 992 if (src.first()->is_stack()) { 993 if (dst.first()->is_stack()) { 994 // stack to stack 995 movslq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 996 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 997 } else { 998 // stack to reg 999 movslq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 1000 } 1001 } else if (dst.first()->is_stack()) { 1002 // reg to stack 1003 // Do we really have to sign extend??? 1004 // __ movslq(src.first()->as_Register(), src.first()->as_Register()); 1005 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_Register()); 1006 } else { 1007 // Do we really have to sign extend??? 1008 // __ movslq(dst.first()->as_Register(), src.first()->as_Register()); 1009 if (dst.first() != src.first()) { 1010 movq(dst.first()->as_Register(), src.first()->as_Register()); 1011 } 1012 } 1013 } 1014 1015 void MacroAssembler::move_ptr(VMRegPair src, VMRegPair dst) { 1016 if (src.first()->is_stack()) { 1017 if (dst.first()->is_stack()) { 1018 // stack to stack 1019 movq(rax, Address(rbp, reg2offset_in(src.first()))); 1020 movq(Address(rsp, reg2offset_out(dst.first())), rax); 1021 } else { 1022 // stack to reg 1023 movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()))); 1024 } 1025 } else if (dst.first()->is_stack()) { 1026 // reg to stack 1027 movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register()); 1028 } else { 1029 if (dst.first() != src.first()) { 1030 movq(dst.first()->as_Register(), src.first()->as_Register()); 1031 } 1032 } 1033 } 1034 1035 // An oop arg. Must pass a handle not the oop itself 1036 void MacroAssembler::object_move(OopMap* map, 1037 int oop_handle_offset, 1038 int framesize_in_slots, 1039 VMRegPair src, 1040 VMRegPair dst, 1041 bool is_receiver, 1042 int* receiver_offset) { 1043 1044 // must pass a handle. First figure out the location we use as a handle 1045 1046 Register rHandle = dst.first()->is_stack() ? rax : dst.first()->as_Register(); 1047 1048 // See if oop is null if it is we need no handle 1049 1050 if (src.first()->is_stack()) { 1051 1052 // Oop is already on the stack as an argument 1053 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 1054 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots)); 1055 if (is_receiver) { 1056 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size; 1057 } 1058 1059 cmpptr(Address(rbp, reg2offset_in(src.first())), NULL_WORD); 1060 lea(rHandle, Address(rbp, reg2offset_in(src.first()))); 1061 // conditionally move a null 1062 cmovptr(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first()))); 1063 } else { 1064 1065 // Oop is in a register we must store it to the space we reserve 1066 // on the stack for oop_handles and pass a handle if oop is non-null 1067 1068 const Register rOop = src.first()->as_Register(); 1069 int oop_slot; 1070 if (rOop == j_rarg0) 1071 oop_slot = 0; 1072 else if (rOop == j_rarg1) 1073 oop_slot = 1; 1074 else if (rOop == j_rarg2) 1075 oop_slot = 2; 1076 else if (rOop == j_rarg3) 1077 oop_slot = 3; 1078 else if (rOop == j_rarg4) 1079 oop_slot = 4; 1080 else { 1081 assert(rOop == j_rarg5, "wrong register"); 1082 oop_slot = 5; 1083 } 1084 1085 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset; 1086 int offset = oop_slot*VMRegImpl::stack_slot_size; 1087 1088 map->set_oop(VMRegImpl::stack2reg(oop_slot)); 1089 // Store oop in handle area, may be null 1090 movptr(Address(rsp, offset), rOop); 1091 if (is_receiver) { 1092 *receiver_offset = offset; 1093 } 1094 1095 cmpptr(rOop, NULL_WORD); 1096 lea(rHandle, Address(rsp, offset)); 1097 // conditionally move a null from the handle area where it was just stored 1098 cmovptr(Assembler::equal, rHandle, Address(rsp, offset)); 1099 } 1100 1101 // If arg is on the stack then place it otherwise it is already in correct reg. 1102 if (dst.first()->is_stack()) { 1103 movptr(Address(rsp, reg2offset_out(dst.first())), rHandle); 1104 } 1105 } 1106 1107 #endif // _LP64 1108 1109 // Now versions that are common to 32/64 bit 1110 1111 void MacroAssembler::addptr(Register dst, int32_t imm32) { 1112 LP64_ONLY(addq(dst, imm32)) NOT_LP64(addl(dst, imm32)); 1113 } 1114 1115 void MacroAssembler::addptr(Register dst, Register src) { 1116 LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); 1117 } 1118 1119 void MacroAssembler::addptr(Address dst, Register src) { 1120 LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); 1121 } 1122 1123 void MacroAssembler::addsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 1124 assert(rscratch != noreg || always_reachable(src), "missing"); 1125 1126 if (reachable(src)) { 1127 Assembler::addsd(dst, as_Address(src)); 1128 } else { 1129 lea(rscratch, src); 1130 Assembler::addsd(dst, Address(rscratch, 0)); 1131 } 1132 } 1133 1134 void MacroAssembler::addss(XMMRegister dst, AddressLiteral src, Register rscratch) { 1135 assert(rscratch != noreg || always_reachable(src), "missing"); 1136 1137 if (reachable(src)) { 1138 addss(dst, as_Address(src)); 1139 } else { 1140 lea(rscratch, src); 1141 addss(dst, Address(rscratch, 0)); 1142 } 1143 } 1144 1145 void MacroAssembler::addpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 1146 assert(rscratch != noreg || always_reachable(src), "missing"); 1147 1148 if (reachable(src)) { 1149 Assembler::addpd(dst, as_Address(src)); 1150 } else { 1151 lea(rscratch, src); 1152 Assembler::addpd(dst, Address(rscratch, 0)); 1153 } 1154 } 1155 1156 // See 8273459. Function for ensuring 64-byte alignment, intended for stubs only. 1157 // Stub code is generated once and never copied. 1158 // NMethods can't use this because they get copied and we can't force alignment > 32 bytes. 1159 void MacroAssembler::align64() { 1160 align(64, (uint)(uintptr_t)pc()); 1161 } 1162 1163 void MacroAssembler::align32() { 1164 align(32, (uint)(uintptr_t)pc()); 1165 } 1166 1167 void MacroAssembler::align(uint modulus) { 1168 // 8273459: Ensure alignment is possible with current segment alignment 1169 assert(modulus <= (uintx)CodeEntryAlignment, "Alignment must be <= CodeEntryAlignment"); 1170 align(modulus, offset()); 1171 } 1172 1173 void MacroAssembler::align(uint modulus, uint target) { 1174 if (target % modulus != 0) { 1175 nop(modulus - (target % modulus)); 1176 } 1177 } 1178 1179 void MacroAssembler::push_f(XMMRegister r) { 1180 subptr(rsp, wordSize); 1181 movflt(Address(rsp, 0), r); 1182 } 1183 1184 void MacroAssembler::pop_f(XMMRegister r) { 1185 movflt(r, Address(rsp, 0)); 1186 addptr(rsp, wordSize); 1187 } 1188 1189 void MacroAssembler::push_d(XMMRegister r) { 1190 subptr(rsp, 2 * wordSize); 1191 movdbl(Address(rsp, 0), r); 1192 } 1193 1194 void MacroAssembler::pop_d(XMMRegister r) { 1195 movdbl(r, Address(rsp, 0)); 1196 addptr(rsp, 2 * Interpreter::stackElementSize); 1197 } 1198 1199 void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 1200 // Used in sign-masking with aligned address. 1201 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 1202 assert(rscratch != noreg || always_reachable(src), "missing"); 1203 1204 if (reachable(src)) { 1205 Assembler::andpd(dst, as_Address(src)); 1206 } else { 1207 lea(rscratch, src); 1208 Assembler::andpd(dst, Address(rscratch, 0)); 1209 } 1210 } 1211 1212 void MacroAssembler::andps(XMMRegister dst, AddressLiteral src, Register rscratch) { 1213 // Used in sign-masking with aligned address. 1214 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 1215 assert(rscratch != noreg || always_reachable(src), "missing"); 1216 1217 if (reachable(src)) { 1218 Assembler::andps(dst, as_Address(src)); 1219 } else { 1220 lea(rscratch, src); 1221 Assembler::andps(dst, Address(rscratch, 0)); 1222 } 1223 } 1224 1225 void MacroAssembler::andptr(Register dst, int32_t imm32) { 1226 LP64_ONLY(andq(dst, imm32)) NOT_LP64(andl(dst, imm32)); 1227 } 1228 1229 #ifdef _LP64 1230 void MacroAssembler::andq(Register dst, AddressLiteral src, Register rscratch) { 1231 assert(rscratch != noreg || always_reachable(src), "missing"); 1232 1233 if (reachable(src)) { 1234 andq(dst, as_Address(src)); 1235 } else { 1236 lea(rscratch, src); 1237 andq(dst, Address(rscratch, 0)); 1238 } 1239 } 1240 #endif 1241 1242 void MacroAssembler::atomic_incl(Address counter_addr) { 1243 lock(); 1244 incrementl(counter_addr); 1245 } 1246 1247 void MacroAssembler::atomic_incl(AddressLiteral counter_addr, Register rscratch) { 1248 assert(rscratch != noreg || always_reachable(counter_addr), "missing"); 1249 1250 if (reachable(counter_addr)) { 1251 atomic_incl(as_Address(counter_addr)); 1252 } else { 1253 lea(rscratch, counter_addr); 1254 atomic_incl(Address(rscratch, 0)); 1255 } 1256 } 1257 1258 #ifdef _LP64 1259 void MacroAssembler::atomic_incq(Address counter_addr) { 1260 lock(); 1261 incrementq(counter_addr); 1262 } 1263 1264 void MacroAssembler::atomic_incq(AddressLiteral counter_addr, Register rscratch) { 1265 assert(rscratch != noreg || always_reachable(counter_addr), "missing"); 1266 1267 if (reachable(counter_addr)) { 1268 atomic_incq(as_Address(counter_addr)); 1269 } else { 1270 lea(rscratch, counter_addr); 1271 atomic_incq(Address(rscratch, 0)); 1272 } 1273 } 1274 #endif 1275 1276 // Writes to stack successive pages until offset reached to check for 1277 // stack overflow + shadow pages. This clobbers tmp. 1278 void MacroAssembler::bang_stack_size(Register size, Register tmp) { 1279 movptr(tmp, rsp); 1280 // Bang stack for total size given plus shadow page size. 1281 // Bang one page at a time because large size can bang beyond yellow and 1282 // red zones. 1283 Label loop; 1284 bind(loop); 1285 movl(Address(tmp, (-(int)os::vm_page_size())), size ); 1286 subptr(tmp, (int)os::vm_page_size()); 1287 subl(size, (int)os::vm_page_size()); 1288 jcc(Assembler::greater, loop); 1289 1290 // Bang down shadow pages too. 1291 // At this point, (tmp-0) is the last address touched, so don't 1292 // touch it again. (It was touched as (tmp-pagesize) but then tmp 1293 // was post-decremented.) Skip this address by starting at i=1, and 1294 // touch a few more pages below. N.B. It is important to touch all 1295 // the way down including all pages in the shadow zone. 1296 for (int i = 1; i < ((int)StackOverflow::stack_shadow_zone_size() / (int)os::vm_page_size()); i++) { 1297 // this could be any sized move but this is can be a debugging crumb 1298 // so the bigger the better. 1299 movptr(Address(tmp, (-i*(int)os::vm_page_size())), size ); 1300 } 1301 } 1302 1303 void MacroAssembler::reserved_stack_check() { 1304 // testing if reserved zone needs to be enabled 1305 Label no_reserved_zone_enabling; 1306 Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); 1307 NOT_LP64(get_thread(rsi);) 1308 1309 cmpptr(rsp, Address(thread, JavaThread::reserved_stack_activation_offset())); 1310 jcc(Assembler::below, no_reserved_zone_enabling); 1311 1312 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), thread); 1313 jump(RuntimeAddress(SharedRuntime::throw_delayed_StackOverflowError_entry())); 1314 should_not_reach_here(); 1315 1316 bind(no_reserved_zone_enabling); 1317 } 1318 1319 void MacroAssembler::c2bool(Register x) { 1320 // implements x == 0 ? 0 : 1 1321 // note: must only look at least-significant byte of x 1322 // since C-style booleans are stored in one byte 1323 // only! (was bug) 1324 andl(x, 0xFF); 1325 setb(Assembler::notZero, x); 1326 } 1327 1328 // Wouldn't need if AddressLiteral version had new name 1329 void MacroAssembler::call(Label& L, relocInfo::relocType rtype) { 1330 Assembler::call(L, rtype); 1331 } 1332 1333 void MacroAssembler::call(Register entry) { 1334 Assembler::call(entry); 1335 } 1336 1337 void MacroAssembler::call(AddressLiteral entry, Register rscratch) { 1338 assert(rscratch != noreg || always_reachable(entry), "missing"); 1339 1340 if (reachable(entry)) { 1341 Assembler::call_literal(entry.target(), entry.rspec()); 1342 } else { 1343 lea(rscratch, entry); 1344 Assembler::call(rscratch); 1345 } 1346 } 1347 1348 void MacroAssembler::ic_call(address entry, jint method_index) { 1349 RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index); 1350 #ifdef _LP64 1351 // Needs full 64-bit immediate for later patching. 1352 mov64(rax, (int64_t)Universe::non_oop_word()); 1353 #else 1354 movptr(rax, (intptr_t)Universe::non_oop_word()); 1355 #endif 1356 call(AddressLiteral(entry, rh)); 1357 } 1358 1359 int MacroAssembler::ic_check_size() { 1360 return LP64_ONLY(14) NOT_LP64(12); 1361 } 1362 1363 int MacroAssembler::ic_check(int end_alignment) { 1364 Register receiver = LP64_ONLY(j_rarg0) NOT_LP64(rcx); 1365 Register data = rax; 1366 Register temp = LP64_ONLY(rscratch1) NOT_LP64(rbx); 1367 1368 // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed 1369 // before the inline cache check, so we don't have to execute any nop instructions when dispatching 1370 // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align 1371 // before the inline cache check here, and not after 1372 align(end_alignment, offset() + ic_check_size()); 1373 1374 int uep_offset = offset(); 1375 1376 if (UseCompressedClassPointers) { 1377 movl(temp, Address(receiver, oopDesc::klass_offset_in_bytes())); 1378 cmpl(temp, Address(data, CompiledICData::speculated_klass_offset())); 1379 } else { 1380 movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes())); 1381 cmpptr(temp, Address(data, CompiledICData::speculated_klass_offset())); 1382 } 1383 1384 // if inline cache check fails, then jump to runtime routine 1385 jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 1386 assert((offset() % end_alignment) == 0, "Misaligned verified entry point"); 1387 1388 return uep_offset; 1389 } 1390 1391 void MacroAssembler::emit_static_call_stub() { 1392 // Static stub relocation also tags the Method* in the code-stream. 1393 mov_metadata(rbx, (Metadata*) nullptr); // Method is zapped till fixup time. 1394 // This is recognized as unresolved by relocs/nativeinst/ic code. 1395 jump(RuntimeAddress(pc())); 1396 } 1397 1398 // Implementation of call_VM versions 1399 1400 void MacroAssembler::call_VM(Register oop_result, 1401 address entry_point, 1402 bool check_exceptions) { 1403 Label C, E; 1404 call(C, relocInfo::none); 1405 jmp(E); 1406 1407 bind(C); 1408 call_VM_helper(oop_result, entry_point, 0, check_exceptions); 1409 ret(0); 1410 1411 bind(E); 1412 } 1413 1414 void MacroAssembler::call_VM(Register oop_result, 1415 address entry_point, 1416 Register arg_1, 1417 bool check_exceptions) { 1418 Label C, E; 1419 call(C, relocInfo::none); 1420 jmp(E); 1421 1422 bind(C); 1423 pass_arg1(this, arg_1); 1424 call_VM_helper(oop_result, entry_point, 1, check_exceptions); 1425 ret(0); 1426 1427 bind(E); 1428 } 1429 1430 void MacroAssembler::call_VM(Register oop_result, 1431 address entry_point, 1432 Register arg_1, 1433 Register arg_2, 1434 bool check_exceptions) { 1435 Label C, E; 1436 call(C, relocInfo::none); 1437 jmp(E); 1438 1439 bind(C); 1440 1441 LP64_ONLY(assert_different_registers(arg_1, c_rarg2)); 1442 1443 pass_arg2(this, arg_2); 1444 pass_arg1(this, arg_1); 1445 call_VM_helper(oop_result, entry_point, 2, check_exceptions); 1446 ret(0); 1447 1448 bind(E); 1449 } 1450 1451 void MacroAssembler::call_VM(Register oop_result, 1452 address entry_point, 1453 Register arg_1, 1454 Register arg_2, 1455 Register arg_3, 1456 bool check_exceptions) { 1457 Label C, E; 1458 call(C, relocInfo::none); 1459 jmp(E); 1460 1461 bind(C); 1462 1463 LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3)); 1464 LP64_ONLY(assert_different_registers(arg_2, c_rarg3)); 1465 pass_arg3(this, arg_3); 1466 pass_arg2(this, arg_2); 1467 pass_arg1(this, arg_1); 1468 call_VM_helper(oop_result, entry_point, 3, check_exceptions); 1469 ret(0); 1470 1471 bind(E); 1472 } 1473 1474 void MacroAssembler::call_VM(Register oop_result, 1475 Register last_java_sp, 1476 address entry_point, 1477 int number_of_arguments, 1478 bool check_exceptions) { 1479 Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg); 1480 call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions); 1481 } 1482 1483 void MacroAssembler::call_VM(Register oop_result, 1484 Register last_java_sp, 1485 address entry_point, 1486 Register arg_1, 1487 bool check_exceptions) { 1488 pass_arg1(this, arg_1); 1489 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 1490 } 1491 1492 void MacroAssembler::call_VM(Register oop_result, 1493 Register last_java_sp, 1494 address entry_point, 1495 Register arg_1, 1496 Register arg_2, 1497 bool check_exceptions) { 1498 1499 LP64_ONLY(assert_different_registers(arg_1, c_rarg2)); 1500 pass_arg2(this, arg_2); 1501 pass_arg1(this, arg_1); 1502 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 1503 } 1504 1505 void MacroAssembler::call_VM(Register oop_result, 1506 Register last_java_sp, 1507 address entry_point, 1508 Register arg_1, 1509 Register arg_2, 1510 Register arg_3, 1511 bool check_exceptions) { 1512 LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3)); 1513 LP64_ONLY(assert_different_registers(arg_2, c_rarg3)); 1514 pass_arg3(this, arg_3); 1515 pass_arg2(this, arg_2); 1516 pass_arg1(this, arg_1); 1517 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 1518 } 1519 1520 void MacroAssembler::super_call_VM(Register oop_result, 1521 Register last_java_sp, 1522 address entry_point, 1523 int number_of_arguments, 1524 bool check_exceptions) { 1525 Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg); 1526 MacroAssembler::call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions); 1527 } 1528 1529 void MacroAssembler::super_call_VM(Register oop_result, 1530 Register last_java_sp, 1531 address entry_point, 1532 Register arg_1, 1533 bool check_exceptions) { 1534 pass_arg1(this, arg_1); 1535 super_call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 1536 } 1537 1538 void MacroAssembler::super_call_VM(Register oop_result, 1539 Register last_java_sp, 1540 address entry_point, 1541 Register arg_1, 1542 Register arg_2, 1543 bool check_exceptions) { 1544 1545 LP64_ONLY(assert_different_registers(arg_1, c_rarg2)); 1546 pass_arg2(this, arg_2); 1547 pass_arg1(this, arg_1); 1548 super_call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 1549 } 1550 1551 void MacroAssembler::super_call_VM(Register oop_result, 1552 Register last_java_sp, 1553 address entry_point, 1554 Register arg_1, 1555 Register arg_2, 1556 Register arg_3, 1557 bool check_exceptions) { 1558 LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3)); 1559 LP64_ONLY(assert_different_registers(arg_2, c_rarg3)); 1560 pass_arg3(this, arg_3); 1561 pass_arg2(this, arg_2); 1562 pass_arg1(this, arg_1); 1563 super_call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 1564 } 1565 1566 void MacroAssembler::call_VM_base(Register oop_result, 1567 Register java_thread, 1568 Register last_java_sp, 1569 address entry_point, 1570 int number_of_arguments, 1571 bool check_exceptions) { 1572 // determine java_thread register 1573 if (!java_thread->is_valid()) { 1574 #ifdef _LP64 1575 java_thread = r15_thread; 1576 #else 1577 java_thread = rdi; 1578 get_thread(java_thread); 1579 #endif // LP64 1580 } 1581 // determine last_java_sp register 1582 if (!last_java_sp->is_valid()) { 1583 last_java_sp = rsp; 1584 } 1585 // debugging support 1586 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 1587 LP64_ONLY(assert(java_thread == r15_thread, "unexpected register")); 1588 #ifdef ASSERT 1589 // TraceBytecodes does not use r12 but saves it over the call, so don't verify 1590 // r12 is the heapbase. 1591 LP64_ONLY(if (UseCompressedOops && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");) 1592 #endif // ASSERT 1593 1594 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"); 1595 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp"); 1596 1597 // push java thread (becomes first argument of C function) 1598 1599 NOT_LP64(push(java_thread); number_of_arguments++); 1600 LP64_ONLY(mov(c_rarg0, r15_thread)); 1601 1602 // set last Java frame before call 1603 assert(last_java_sp != rbp, "can't use ebp/rbp"); 1604 1605 // Only interpreter should have to set fp 1606 set_last_Java_frame(java_thread, last_java_sp, rbp, nullptr, rscratch1); 1607 1608 // do the call, remove parameters 1609 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments); 1610 1611 // restore the thread (cannot use the pushed argument since arguments 1612 // may be overwritten by C code generated by an optimizing compiler); 1613 // however can use the register value directly if it is callee saved. 1614 if (LP64_ONLY(true ||) java_thread == rdi || java_thread == rsi) { 1615 // rdi & rsi (also r15) are callee saved -> nothing to do 1616 #ifdef ASSERT 1617 guarantee(java_thread != rax, "change this code"); 1618 push(rax); 1619 { Label L; 1620 get_thread(rax); 1621 cmpptr(java_thread, rax); 1622 jcc(Assembler::equal, L); 1623 STOP("MacroAssembler::call_VM_base: rdi not callee saved?"); 1624 bind(L); 1625 } 1626 pop(rax); 1627 #endif 1628 } else { 1629 get_thread(java_thread); 1630 } 1631 // reset last Java frame 1632 // Only interpreter should have to clear fp 1633 reset_last_Java_frame(java_thread, true); 1634 1635 // C++ interp handles this in the interpreter 1636 check_and_handle_popframe(java_thread); 1637 check_and_handle_earlyret(java_thread); 1638 1639 if (check_exceptions) { 1640 // check for pending exceptions (java_thread is set upon return) 1641 cmpptr(Address(java_thread, Thread::pending_exception_offset()), NULL_WORD); 1642 #ifndef _LP64 1643 jump_cc(Assembler::notEqual, 1644 RuntimeAddress(StubRoutines::forward_exception_entry())); 1645 #else 1646 // This used to conditionally jump to forward_exception however it is 1647 // possible if we relocate that the branch will not reach. So we must jump 1648 // around so we can always reach 1649 1650 Label ok; 1651 jcc(Assembler::equal, ok); 1652 jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 1653 bind(ok); 1654 #endif // LP64 1655 } 1656 1657 // get oop result if there is one and reset the value in the thread 1658 if (oop_result->is_valid()) { 1659 get_vm_result(oop_result, java_thread); 1660 } 1661 } 1662 1663 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 1664 1665 // Calculate the value for last_Java_sp 1666 // somewhat subtle. call_VM does an intermediate call 1667 // which places a return address on the stack just under the 1668 // stack pointer as the user finished with it. This allows 1669 // use to retrieve last_Java_pc from last_Java_sp[-1]. 1670 // On 32bit we then have to push additional args on the stack to accomplish 1671 // the actual requested call. On 64bit call_VM only can use register args 1672 // so the only extra space is the return address that call_VM created. 1673 // This hopefully explains the calculations here. 1674 1675 #ifdef _LP64 1676 // We've pushed one address, correct last_Java_sp 1677 lea(rax, Address(rsp, wordSize)); 1678 #else 1679 lea(rax, Address(rsp, (1 + number_of_arguments) * wordSize)); 1680 #endif // LP64 1681 1682 call_VM_base(oop_result, noreg, rax, entry_point, number_of_arguments, check_exceptions); 1683 1684 } 1685 1686 // Use this method when MacroAssembler version of call_VM_leaf_base() should be called from Interpreter. 1687 void MacroAssembler::call_VM_leaf0(address entry_point) { 1688 MacroAssembler::call_VM_leaf_base(entry_point, 0); 1689 } 1690 1691 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) { 1692 call_VM_leaf_base(entry_point, number_of_arguments); 1693 } 1694 1695 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) { 1696 pass_arg0(this, arg_0); 1697 call_VM_leaf(entry_point, 1); 1698 } 1699 1700 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 1701 1702 LP64_ONLY(assert_different_registers(arg_0, c_rarg1)); 1703 pass_arg1(this, arg_1); 1704 pass_arg0(this, arg_0); 1705 call_VM_leaf(entry_point, 2); 1706 } 1707 1708 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { 1709 LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2)); 1710 LP64_ONLY(assert_different_registers(arg_1, c_rarg2)); 1711 pass_arg2(this, arg_2); 1712 pass_arg1(this, arg_1); 1713 pass_arg0(this, arg_0); 1714 call_VM_leaf(entry_point, 3); 1715 } 1716 1717 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { 1718 LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3)); 1719 LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3)); 1720 LP64_ONLY(assert_different_registers(arg_2, c_rarg3)); 1721 pass_arg3(this, arg_3); 1722 pass_arg2(this, arg_2); 1723 pass_arg1(this, arg_1); 1724 pass_arg0(this, arg_0); 1725 call_VM_leaf(entry_point, 3); 1726 } 1727 1728 void MacroAssembler::super_call_VM_leaf(address entry_point) { 1729 MacroAssembler::call_VM_leaf_base(entry_point, 1); 1730 } 1731 1732 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) { 1733 pass_arg0(this, arg_0); 1734 MacroAssembler::call_VM_leaf_base(entry_point, 1); 1735 } 1736 1737 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 1738 LP64_ONLY(assert_different_registers(arg_0, c_rarg1)); 1739 pass_arg1(this, arg_1); 1740 pass_arg0(this, arg_0); 1741 MacroAssembler::call_VM_leaf_base(entry_point, 2); 1742 } 1743 1744 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { 1745 LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2)); 1746 LP64_ONLY(assert_different_registers(arg_1, c_rarg2)); 1747 pass_arg2(this, arg_2); 1748 pass_arg1(this, arg_1); 1749 pass_arg0(this, arg_0); 1750 MacroAssembler::call_VM_leaf_base(entry_point, 3); 1751 } 1752 1753 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { 1754 LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3)); 1755 LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3)); 1756 LP64_ONLY(assert_different_registers(arg_2, c_rarg3)); 1757 pass_arg3(this, arg_3); 1758 pass_arg2(this, arg_2); 1759 pass_arg1(this, arg_1); 1760 pass_arg0(this, arg_0); 1761 MacroAssembler::call_VM_leaf_base(entry_point, 4); 1762 } 1763 1764 void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) { 1765 movptr(oop_result, Address(java_thread, JavaThread::vm_result_offset())); 1766 movptr(Address(java_thread, JavaThread::vm_result_offset()), NULL_WORD); 1767 verify_oop_msg(oop_result, "broken oop in call_VM_base"); 1768 } 1769 1770 void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) { 1771 movptr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset())); 1772 movptr(Address(java_thread, JavaThread::vm_result_2_offset()), NULL_WORD); 1773 } 1774 1775 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { 1776 } 1777 1778 void MacroAssembler::check_and_handle_popframe(Register java_thread) { 1779 } 1780 1781 void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm, Register rscratch) { 1782 assert(rscratch != noreg || always_reachable(src1), "missing"); 1783 1784 if (reachable(src1)) { 1785 cmpl(as_Address(src1), imm); 1786 } else { 1787 lea(rscratch, src1); 1788 cmpl(Address(rscratch, 0), imm); 1789 } 1790 } 1791 1792 void MacroAssembler::cmp32(Register src1, AddressLiteral src2, Register rscratch) { 1793 assert(!src2.is_lval(), "use cmpptr"); 1794 assert(rscratch != noreg || always_reachable(src2), "missing"); 1795 1796 if (reachable(src2)) { 1797 cmpl(src1, as_Address(src2)); 1798 } else { 1799 lea(rscratch, src2); 1800 cmpl(src1, Address(rscratch, 0)); 1801 } 1802 } 1803 1804 void MacroAssembler::cmp32(Register src1, int32_t imm) { 1805 Assembler::cmpl(src1, imm); 1806 } 1807 1808 void MacroAssembler::cmp32(Register src1, Address src2) { 1809 Assembler::cmpl(src1, src2); 1810 } 1811 1812 void MacroAssembler::cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) { 1813 ucomisd(opr1, opr2); 1814 1815 Label L; 1816 if (unordered_is_less) { 1817 movl(dst, -1); 1818 jcc(Assembler::parity, L); 1819 jcc(Assembler::below , L); 1820 movl(dst, 0); 1821 jcc(Assembler::equal , L); 1822 increment(dst); 1823 } else { // unordered is greater 1824 movl(dst, 1); 1825 jcc(Assembler::parity, L); 1826 jcc(Assembler::above , L); 1827 movl(dst, 0); 1828 jcc(Assembler::equal , L); 1829 decrementl(dst); 1830 } 1831 bind(L); 1832 } 1833 1834 void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) { 1835 ucomiss(opr1, opr2); 1836 1837 Label L; 1838 if (unordered_is_less) { 1839 movl(dst, -1); 1840 jcc(Assembler::parity, L); 1841 jcc(Assembler::below , L); 1842 movl(dst, 0); 1843 jcc(Assembler::equal , L); 1844 increment(dst); 1845 } else { // unordered is greater 1846 movl(dst, 1); 1847 jcc(Assembler::parity, L); 1848 jcc(Assembler::above , L); 1849 movl(dst, 0); 1850 jcc(Assembler::equal , L); 1851 decrementl(dst); 1852 } 1853 bind(L); 1854 } 1855 1856 1857 void MacroAssembler::cmp8(AddressLiteral src1, int imm, Register rscratch) { 1858 assert(rscratch != noreg || always_reachable(src1), "missing"); 1859 1860 if (reachable(src1)) { 1861 cmpb(as_Address(src1), imm); 1862 } else { 1863 lea(rscratch, src1); 1864 cmpb(Address(rscratch, 0), imm); 1865 } 1866 } 1867 1868 void MacroAssembler::cmpptr(Register src1, AddressLiteral src2, Register rscratch) { 1869 #ifdef _LP64 1870 assert(rscratch != noreg || always_reachable(src2), "missing"); 1871 1872 if (src2.is_lval()) { 1873 movptr(rscratch, src2); 1874 Assembler::cmpq(src1, rscratch); 1875 } else if (reachable(src2)) { 1876 cmpq(src1, as_Address(src2)); 1877 } else { 1878 lea(rscratch, src2); 1879 Assembler::cmpq(src1, Address(rscratch, 0)); 1880 } 1881 #else 1882 assert(rscratch == noreg, "not needed"); 1883 if (src2.is_lval()) { 1884 cmp_literal32(src1, (int32_t)src2.target(), src2.rspec()); 1885 } else { 1886 cmpl(src1, as_Address(src2)); 1887 } 1888 #endif // _LP64 1889 } 1890 1891 void MacroAssembler::cmpptr(Address src1, AddressLiteral src2, Register rscratch) { 1892 assert(src2.is_lval(), "not a mem-mem compare"); 1893 #ifdef _LP64 1894 // moves src2's literal address 1895 movptr(rscratch, src2); 1896 Assembler::cmpq(src1, rscratch); 1897 #else 1898 assert(rscratch == noreg, "not needed"); 1899 cmp_literal32(src1, (int32_t)src2.target(), src2.rspec()); 1900 #endif // _LP64 1901 } 1902 1903 void MacroAssembler::cmpoop(Register src1, Register src2) { 1904 cmpptr(src1, src2); 1905 } 1906 1907 void MacroAssembler::cmpoop(Register src1, Address src2) { 1908 cmpptr(src1, src2); 1909 } 1910 1911 #ifdef _LP64 1912 void MacroAssembler::cmpoop(Register src1, jobject src2, Register rscratch) { 1913 movoop(rscratch, src2); 1914 cmpptr(src1, rscratch); 1915 } 1916 #endif 1917 1918 void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch) { 1919 assert(rscratch != noreg || always_reachable(adr), "missing"); 1920 1921 if (reachable(adr)) { 1922 lock(); 1923 cmpxchgptr(reg, as_Address(adr)); 1924 } else { 1925 lea(rscratch, adr); 1926 lock(); 1927 cmpxchgptr(reg, Address(rscratch, 0)); 1928 } 1929 } 1930 1931 void MacroAssembler::cmpxchgptr(Register reg, Address adr) { 1932 LP64_ONLY(cmpxchgq(reg, adr)) NOT_LP64(cmpxchgl(reg, adr)); 1933 } 1934 1935 void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src, Register rscratch) { 1936 assert(rscratch != noreg || always_reachable(src), "missing"); 1937 1938 if (reachable(src)) { 1939 Assembler::comisd(dst, as_Address(src)); 1940 } else { 1941 lea(rscratch, src); 1942 Assembler::comisd(dst, Address(rscratch, 0)); 1943 } 1944 } 1945 1946 void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src, Register rscratch) { 1947 assert(rscratch != noreg || always_reachable(src), "missing"); 1948 1949 if (reachable(src)) { 1950 Assembler::comiss(dst, as_Address(src)); 1951 } else { 1952 lea(rscratch, src); 1953 Assembler::comiss(dst, Address(rscratch, 0)); 1954 } 1955 } 1956 1957 1958 void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr, Register rscratch) { 1959 assert(rscratch != noreg || always_reachable(counter_addr), "missing"); 1960 1961 Condition negated_cond = negate_condition(cond); 1962 Label L; 1963 jcc(negated_cond, L); 1964 pushf(); // Preserve flags 1965 atomic_incl(counter_addr, rscratch); 1966 popf(); 1967 bind(L); 1968 } 1969 1970 int MacroAssembler::corrected_idivl(Register reg) { 1971 // Full implementation of Java idiv and irem; checks for 1972 // special case as described in JVM spec., p.243 & p.271. 1973 // The function returns the (pc) offset of the idivl 1974 // instruction - may be needed for implicit exceptions. 1975 // 1976 // normal case special case 1977 // 1978 // input : rax,: dividend min_int 1979 // reg: divisor (may not be rax,/rdx) -1 1980 // 1981 // output: rax,: quotient (= rax, idiv reg) min_int 1982 // rdx: remainder (= rax, irem reg) 0 1983 assert(reg != rax && reg != rdx, "reg cannot be rax, or rdx register"); 1984 const int min_int = 0x80000000; 1985 Label normal_case, special_case; 1986 1987 // check for special case 1988 cmpl(rax, min_int); 1989 jcc(Assembler::notEqual, normal_case); 1990 xorl(rdx, rdx); // prepare rdx for possible special case (where remainder = 0) 1991 cmpl(reg, -1); 1992 jcc(Assembler::equal, special_case); 1993 1994 // handle normal case 1995 bind(normal_case); 1996 cdql(); 1997 int idivl_offset = offset(); 1998 idivl(reg); 1999 2000 // normal and special case exit 2001 bind(special_case); 2002 2003 return idivl_offset; 2004 } 2005 2006 2007 2008 void MacroAssembler::decrementl(Register reg, int value) { 2009 if (value == min_jint) {subl(reg, value) ; return; } 2010 if (value < 0) { incrementl(reg, -value); return; } 2011 if (value == 0) { ; return; } 2012 if (value == 1 && UseIncDec) { decl(reg) ; return; } 2013 /* else */ { subl(reg, value) ; return; } 2014 } 2015 2016 void MacroAssembler::decrementl(Address dst, int value) { 2017 if (value == min_jint) {subl(dst, value) ; return; } 2018 if (value < 0) { incrementl(dst, -value); return; } 2019 if (value == 0) { ; return; } 2020 if (value == 1 && UseIncDec) { decl(dst) ; return; } 2021 /* else */ { subl(dst, value) ; return; } 2022 } 2023 2024 void MacroAssembler::division_with_shift (Register reg, int shift_value) { 2025 assert(shift_value > 0, "illegal shift value"); 2026 Label _is_positive; 2027 testl (reg, reg); 2028 jcc (Assembler::positive, _is_positive); 2029 int offset = (1 << shift_value) - 1 ; 2030 2031 if (offset == 1) { 2032 incrementl(reg); 2033 } else { 2034 addl(reg, offset); 2035 } 2036 2037 bind (_is_positive); 2038 sarl(reg, shift_value); 2039 } 2040 2041 void MacroAssembler::divsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2042 assert(rscratch != noreg || always_reachable(src), "missing"); 2043 2044 if (reachable(src)) { 2045 Assembler::divsd(dst, as_Address(src)); 2046 } else { 2047 lea(rscratch, src); 2048 Assembler::divsd(dst, Address(rscratch, 0)); 2049 } 2050 } 2051 2052 void MacroAssembler::divss(XMMRegister dst, AddressLiteral src, Register rscratch) { 2053 assert(rscratch != noreg || always_reachable(src), "missing"); 2054 2055 if (reachable(src)) { 2056 Assembler::divss(dst, as_Address(src)); 2057 } else { 2058 lea(rscratch, src); 2059 Assembler::divss(dst, Address(rscratch, 0)); 2060 } 2061 } 2062 2063 void MacroAssembler::enter() { 2064 push(rbp); 2065 mov(rbp, rsp); 2066 } 2067 2068 void MacroAssembler::post_call_nop() { 2069 if (!Continuations::enabled()) { 2070 return; 2071 } 2072 InstructionMark im(this); 2073 relocate(post_call_nop_Relocation::spec()); 2074 InlineSkippedInstructionsCounter skipCounter(this); 2075 emit_int8((uint8_t)0x0f); 2076 emit_int8((uint8_t)0x1f); 2077 emit_int8((uint8_t)0x84); 2078 emit_int8((uint8_t)0x00); 2079 emit_int32(0x00); 2080 } 2081 2082 // A 5 byte nop that is safe for patching (see patch_verified_entry) 2083 void MacroAssembler::fat_nop() { 2084 if (UseAddressNop) { 2085 addr_nop_5(); 2086 } else { 2087 emit_int8((uint8_t)0x26); // es: 2088 emit_int8((uint8_t)0x2e); // cs: 2089 emit_int8((uint8_t)0x64); // fs: 2090 emit_int8((uint8_t)0x65); // gs: 2091 emit_int8((uint8_t)0x90); 2092 } 2093 } 2094 2095 #ifndef _LP64 2096 void MacroAssembler::fcmp(Register tmp) { 2097 fcmp(tmp, 1, true, true); 2098 } 2099 2100 void MacroAssembler::fcmp(Register tmp, int index, bool pop_left, bool pop_right) { 2101 assert(!pop_right || pop_left, "usage error"); 2102 if (VM_Version::supports_cmov()) { 2103 assert(tmp == noreg, "unneeded temp"); 2104 if (pop_left) { 2105 fucomip(index); 2106 } else { 2107 fucomi(index); 2108 } 2109 if (pop_right) { 2110 fpop(); 2111 } 2112 } else { 2113 assert(tmp != noreg, "need temp"); 2114 if (pop_left) { 2115 if (pop_right) { 2116 fcompp(); 2117 } else { 2118 fcomp(index); 2119 } 2120 } else { 2121 fcom(index); 2122 } 2123 // convert FPU condition into eflags condition via rax, 2124 save_rax(tmp); 2125 fwait(); fnstsw_ax(); 2126 sahf(); 2127 restore_rax(tmp); 2128 } 2129 // condition codes set as follows: 2130 // 2131 // CF (corresponds to C0) if x < y 2132 // PF (corresponds to C2) if unordered 2133 // ZF (corresponds to C3) if x = y 2134 } 2135 2136 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less) { 2137 fcmp2int(dst, unordered_is_less, 1, true, true); 2138 } 2139 2140 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right) { 2141 fcmp(VM_Version::supports_cmov() ? noreg : dst, index, pop_left, pop_right); 2142 Label L; 2143 if (unordered_is_less) { 2144 movl(dst, -1); 2145 jcc(Assembler::parity, L); 2146 jcc(Assembler::below , L); 2147 movl(dst, 0); 2148 jcc(Assembler::equal , L); 2149 increment(dst); 2150 } else { // unordered is greater 2151 movl(dst, 1); 2152 jcc(Assembler::parity, L); 2153 jcc(Assembler::above , L); 2154 movl(dst, 0); 2155 jcc(Assembler::equal , L); 2156 decrementl(dst); 2157 } 2158 bind(L); 2159 } 2160 2161 void MacroAssembler::fld_d(AddressLiteral src) { 2162 fld_d(as_Address(src)); 2163 } 2164 2165 void MacroAssembler::fld_s(AddressLiteral src) { 2166 fld_s(as_Address(src)); 2167 } 2168 2169 void MacroAssembler::fldcw(AddressLiteral src) { 2170 fldcw(as_Address(src)); 2171 } 2172 2173 void MacroAssembler::fpop() { 2174 ffree(); 2175 fincstp(); 2176 } 2177 2178 void MacroAssembler::fremr(Register tmp) { 2179 save_rax(tmp); 2180 { Label L; 2181 bind(L); 2182 fprem(); 2183 fwait(); fnstsw_ax(); 2184 sahf(); 2185 jcc(Assembler::parity, L); 2186 } 2187 restore_rax(tmp); 2188 // Result is in ST0. 2189 // Note: fxch & fpop to get rid of ST1 2190 // (otherwise FPU stack could overflow eventually) 2191 fxch(1); 2192 fpop(); 2193 } 2194 2195 void MacroAssembler::empty_FPU_stack() { 2196 if (VM_Version::supports_mmx()) { 2197 emms(); 2198 } else { 2199 for (int i = 8; i-- > 0; ) ffree(i); 2200 } 2201 } 2202 #endif // !LP64 2203 2204 void MacroAssembler::mulpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2205 assert(rscratch != noreg || always_reachable(src), "missing"); 2206 if (reachable(src)) { 2207 Assembler::mulpd(dst, as_Address(src)); 2208 } else { 2209 lea(rscratch, src); 2210 Assembler::mulpd(dst, Address(rscratch, 0)); 2211 } 2212 } 2213 2214 void MacroAssembler::load_float(Address src) { 2215 #ifdef _LP64 2216 movflt(xmm0, src); 2217 #else 2218 if (UseSSE >= 1) { 2219 movflt(xmm0, src); 2220 } else { 2221 fld_s(src); 2222 } 2223 #endif // LP64 2224 } 2225 2226 void MacroAssembler::store_float(Address dst) { 2227 #ifdef _LP64 2228 movflt(dst, xmm0); 2229 #else 2230 if (UseSSE >= 1) { 2231 movflt(dst, xmm0); 2232 } else { 2233 fstp_s(dst); 2234 } 2235 #endif // LP64 2236 } 2237 2238 void MacroAssembler::load_double(Address src) { 2239 #ifdef _LP64 2240 movdbl(xmm0, src); 2241 #else 2242 if (UseSSE >= 2) { 2243 movdbl(xmm0, src); 2244 } else { 2245 fld_d(src); 2246 } 2247 #endif // LP64 2248 } 2249 2250 void MacroAssembler::store_double(Address dst) { 2251 #ifdef _LP64 2252 movdbl(dst, xmm0); 2253 #else 2254 if (UseSSE >= 2) { 2255 movdbl(dst, xmm0); 2256 } else { 2257 fstp_d(dst); 2258 } 2259 #endif // LP64 2260 } 2261 2262 // dst = c = a * b + c 2263 void MacroAssembler::fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c) { 2264 Assembler::vfmadd231sd(c, a, b); 2265 if (dst != c) { 2266 movdbl(dst, c); 2267 } 2268 } 2269 2270 // dst = c = a * b + c 2271 void MacroAssembler::fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c) { 2272 Assembler::vfmadd231ss(c, a, b); 2273 if (dst != c) { 2274 movflt(dst, c); 2275 } 2276 } 2277 2278 // dst = c = a * b + c 2279 void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) { 2280 Assembler::vfmadd231pd(c, a, b, vector_len); 2281 if (dst != c) { 2282 vmovdqu(dst, c); 2283 } 2284 } 2285 2286 // dst = c = a * b + c 2287 void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) { 2288 Assembler::vfmadd231ps(c, a, b, vector_len); 2289 if (dst != c) { 2290 vmovdqu(dst, c); 2291 } 2292 } 2293 2294 // dst = c = a * b + c 2295 void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) { 2296 Assembler::vfmadd231pd(c, a, b, vector_len); 2297 if (dst != c) { 2298 vmovdqu(dst, c); 2299 } 2300 } 2301 2302 // dst = c = a * b + c 2303 void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) { 2304 Assembler::vfmadd231ps(c, a, b, vector_len); 2305 if (dst != c) { 2306 vmovdqu(dst, c); 2307 } 2308 } 2309 2310 void MacroAssembler::incrementl(AddressLiteral dst, Register rscratch) { 2311 assert(rscratch != noreg || always_reachable(dst), "missing"); 2312 2313 if (reachable(dst)) { 2314 incrementl(as_Address(dst)); 2315 } else { 2316 lea(rscratch, dst); 2317 incrementl(Address(rscratch, 0)); 2318 } 2319 } 2320 2321 void MacroAssembler::incrementl(ArrayAddress dst, Register rscratch) { 2322 incrementl(as_Address(dst, rscratch)); 2323 } 2324 2325 void MacroAssembler::incrementl(Register reg, int value) { 2326 if (value == min_jint) {addl(reg, value) ; return; } 2327 if (value < 0) { decrementl(reg, -value); return; } 2328 if (value == 0) { ; return; } 2329 if (value == 1 && UseIncDec) { incl(reg) ; return; } 2330 /* else */ { addl(reg, value) ; return; } 2331 } 2332 2333 void MacroAssembler::incrementl(Address dst, int value) { 2334 if (value == min_jint) {addl(dst, value) ; return; } 2335 if (value < 0) { decrementl(dst, -value); return; } 2336 if (value == 0) { ; return; } 2337 if (value == 1 && UseIncDec) { incl(dst) ; return; } 2338 /* else */ { addl(dst, value) ; return; } 2339 } 2340 2341 void MacroAssembler::jump(AddressLiteral dst, Register rscratch) { 2342 assert(rscratch != noreg || always_reachable(dst), "missing"); 2343 assert(!dst.rspec().reloc()->is_data(), "should not use ExternalAddress for jump"); 2344 if (reachable(dst)) { 2345 jmp_literal(dst.target(), dst.rspec()); 2346 } else { 2347 lea(rscratch, dst); 2348 jmp(rscratch); 2349 } 2350 } 2351 2352 void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst, Register rscratch) { 2353 assert(rscratch != noreg || always_reachable(dst), "missing"); 2354 assert(!dst.rspec().reloc()->is_data(), "should not use ExternalAddress for jump_cc"); 2355 if (reachable(dst)) { 2356 InstructionMark im(this); 2357 relocate(dst.reloc()); 2358 const int short_size = 2; 2359 const int long_size = 6; 2360 int offs = (intptr_t)dst.target() - ((intptr_t)pc()); 2361 if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) { 2362 // 0111 tttn #8-bit disp 2363 emit_int8(0x70 | cc); 2364 emit_int8((offs - short_size) & 0xFF); 2365 } else { 2366 // 0000 1111 1000 tttn #32-bit disp 2367 emit_int8(0x0F); 2368 emit_int8((unsigned char)(0x80 | cc)); 2369 emit_int32(offs - long_size); 2370 } 2371 } else { 2372 #ifdef ASSERT 2373 warning("reversing conditional branch"); 2374 #endif /* ASSERT */ 2375 Label skip; 2376 jccb(reverse[cc], skip); 2377 lea(rscratch, dst); 2378 Assembler::jmp(rscratch); 2379 bind(skip); 2380 } 2381 } 2382 2383 void MacroAssembler::ldmxcsr(AddressLiteral src, Register rscratch) { 2384 assert(rscratch != noreg || always_reachable(src), "missing"); 2385 2386 if (reachable(src)) { 2387 Assembler::ldmxcsr(as_Address(src)); 2388 } else { 2389 lea(rscratch, src); 2390 Assembler::ldmxcsr(Address(rscratch, 0)); 2391 } 2392 } 2393 2394 int MacroAssembler::load_signed_byte(Register dst, Address src) { 2395 int off; 2396 if (LP64_ONLY(true ||) VM_Version::is_P6()) { 2397 off = offset(); 2398 movsbl(dst, src); // movsxb 2399 } else { 2400 off = load_unsigned_byte(dst, src); 2401 shll(dst, 24); 2402 sarl(dst, 24); 2403 } 2404 return off; 2405 } 2406 2407 // Note: load_signed_short used to be called load_signed_word. 2408 // Although the 'w' in x86 opcodes refers to the term "word" in the assembler 2409 // manual, which means 16 bits, that usage is found nowhere in HotSpot code. 2410 // The term "word" in HotSpot means a 32- or 64-bit machine word. 2411 int MacroAssembler::load_signed_short(Register dst, Address src) { 2412 int off; 2413 if (LP64_ONLY(true ||) VM_Version::is_P6()) { 2414 // This is dubious to me since it seems safe to do a signed 16 => 64 bit 2415 // version but this is what 64bit has always done. This seems to imply 2416 // that users are only using 32bits worth. 2417 off = offset(); 2418 movswl(dst, src); // movsxw 2419 } else { 2420 off = load_unsigned_short(dst, src); 2421 shll(dst, 16); 2422 sarl(dst, 16); 2423 } 2424 return off; 2425 } 2426 2427 int MacroAssembler::load_unsigned_byte(Register dst, Address src) { 2428 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16, 2429 // and "3.9 Partial Register Penalties", p. 22). 2430 int off; 2431 if (LP64_ONLY(true || ) VM_Version::is_P6() || src.uses(dst)) { 2432 off = offset(); 2433 movzbl(dst, src); // movzxb 2434 } else { 2435 xorl(dst, dst); 2436 off = offset(); 2437 movb(dst, src); 2438 } 2439 return off; 2440 } 2441 2442 // Note: load_unsigned_short used to be called load_unsigned_word. 2443 int MacroAssembler::load_unsigned_short(Register dst, Address src) { 2444 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16, 2445 // and "3.9 Partial Register Penalties", p. 22). 2446 int off; 2447 if (LP64_ONLY(true ||) VM_Version::is_P6() || src.uses(dst)) { 2448 off = offset(); 2449 movzwl(dst, src); // movzxw 2450 } else { 2451 xorl(dst, dst); 2452 off = offset(); 2453 movw(dst, src); 2454 } 2455 return off; 2456 } 2457 2458 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) { 2459 switch (size_in_bytes) { 2460 #ifndef _LP64 2461 case 8: 2462 assert(dst2 != noreg, "second dest register required"); 2463 movl(dst, src); 2464 movl(dst2, src.plus_disp(BytesPerInt)); 2465 break; 2466 #else 2467 case 8: movq(dst, src); break; 2468 #endif 2469 case 4: movl(dst, src); break; 2470 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break; 2471 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break; 2472 default: ShouldNotReachHere(); 2473 } 2474 } 2475 2476 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) { 2477 switch (size_in_bytes) { 2478 #ifndef _LP64 2479 case 8: 2480 assert(src2 != noreg, "second source register required"); 2481 movl(dst, src); 2482 movl(dst.plus_disp(BytesPerInt), src2); 2483 break; 2484 #else 2485 case 8: movq(dst, src); break; 2486 #endif 2487 case 4: movl(dst, src); break; 2488 case 2: movw(dst, src); break; 2489 case 1: movb(dst, src); break; 2490 default: ShouldNotReachHere(); 2491 } 2492 } 2493 2494 void MacroAssembler::mov32(AddressLiteral dst, Register src, Register rscratch) { 2495 assert(rscratch != noreg || always_reachable(dst), "missing"); 2496 2497 if (reachable(dst)) { 2498 movl(as_Address(dst), src); 2499 } else { 2500 lea(rscratch, dst); 2501 movl(Address(rscratch, 0), src); 2502 } 2503 } 2504 2505 void MacroAssembler::mov32(Register dst, AddressLiteral src) { 2506 if (reachable(src)) { 2507 movl(dst, as_Address(src)); 2508 } else { 2509 lea(dst, src); 2510 movl(dst, Address(dst, 0)); 2511 } 2512 } 2513 2514 // C++ bool manipulation 2515 2516 void MacroAssembler::movbool(Register dst, Address src) { 2517 if(sizeof(bool) == 1) 2518 movb(dst, src); 2519 else if(sizeof(bool) == 2) 2520 movw(dst, src); 2521 else if(sizeof(bool) == 4) 2522 movl(dst, src); 2523 else 2524 // unsupported 2525 ShouldNotReachHere(); 2526 } 2527 2528 void MacroAssembler::movbool(Address dst, bool boolconst) { 2529 if(sizeof(bool) == 1) 2530 movb(dst, (int) boolconst); 2531 else if(sizeof(bool) == 2) 2532 movw(dst, (int) boolconst); 2533 else if(sizeof(bool) == 4) 2534 movl(dst, (int) boolconst); 2535 else 2536 // unsupported 2537 ShouldNotReachHere(); 2538 } 2539 2540 void MacroAssembler::movbool(Address dst, Register src) { 2541 if(sizeof(bool) == 1) 2542 movb(dst, src); 2543 else if(sizeof(bool) == 2) 2544 movw(dst, src); 2545 else if(sizeof(bool) == 4) 2546 movl(dst, src); 2547 else 2548 // unsupported 2549 ShouldNotReachHere(); 2550 } 2551 2552 void MacroAssembler::movdl(XMMRegister dst, AddressLiteral src, Register rscratch) { 2553 assert(rscratch != noreg || always_reachable(src), "missing"); 2554 2555 if (reachable(src)) { 2556 movdl(dst, as_Address(src)); 2557 } else { 2558 lea(rscratch, src); 2559 movdl(dst, Address(rscratch, 0)); 2560 } 2561 } 2562 2563 void MacroAssembler::movq(XMMRegister dst, AddressLiteral src, Register rscratch) { 2564 assert(rscratch != noreg || always_reachable(src), "missing"); 2565 2566 if (reachable(src)) { 2567 movq(dst, as_Address(src)); 2568 } else { 2569 lea(rscratch, src); 2570 movq(dst, Address(rscratch, 0)); 2571 } 2572 } 2573 2574 void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src, Register rscratch) { 2575 assert(rscratch != noreg || always_reachable(src), "missing"); 2576 2577 if (reachable(src)) { 2578 if (UseXmmLoadAndClearUpper) { 2579 movsd (dst, as_Address(src)); 2580 } else { 2581 movlpd(dst, as_Address(src)); 2582 } 2583 } else { 2584 lea(rscratch, src); 2585 if (UseXmmLoadAndClearUpper) { 2586 movsd (dst, Address(rscratch, 0)); 2587 } else { 2588 movlpd(dst, Address(rscratch, 0)); 2589 } 2590 } 2591 } 2592 2593 void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src, Register rscratch) { 2594 assert(rscratch != noreg || always_reachable(src), "missing"); 2595 2596 if (reachable(src)) { 2597 movss(dst, as_Address(src)); 2598 } else { 2599 lea(rscratch, src); 2600 movss(dst, Address(rscratch, 0)); 2601 } 2602 } 2603 2604 void MacroAssembler::movptr(Register dst, Register src) { 2605 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); 2606 } 2607 2608 void MacroAssembler::movptr(Register dst, Address src) { 2609 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); 2610 } 2611 2612 // src should NEVER be a real pointer. Use AddressLiteral for true pointers 2613 void MacroAssembler::movptr(Register dst, intptr_t src) { 2614 #ifdef _LP64 2615 if (is_uimm32(src)) { 2616 movl(dst, checked_cast<uint32_t>(src)); 2617 } else if (is_simm32(src)) { 2618 movq(dst, checked_cast<int32_t>(src)); 2619 } else { 2620 mov64(dst, src); 2621 } 2622 #else 2623 movl(dst, src); 2624 #endif 2625 } 2626 2627 void MacroAssembler::movptr(Address dst, Register src) { 2628 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); 2629 } 2630 2631 void MacroAssembler::movptr(Address dst, int32_t src) { 2632 LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); 2633 } 2634 2635 void MacroAssembler::movdqu(Address dst, XMMRegister src) { 2636 assert(((src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2637 Assembler::movdqu(dst, src); 2638 } 2639 2640 void MacroAssembler::movdqu(XMMRegister dst, Address src) { 2641 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2642 Assembler::movdqu(dst, src); 2643 } 2644 2645 void MacroAssembler::movdqu(XMMRegister dst, XMMRegister src) { 2646 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2647 Assembler::movdqu(dst, src); 2648 } 2649 2650 void MacroAssembler::movdqu(XMMRegister dst, AddressLiteral src, Register rscratch) { 2651 assert(rscratch != noreg || always_reachable(src), "missing"); 2652 2653 if (reachable(src)) { 2654 movdqu(dst, as_Address(src)); 2655 } else { 2656 lea(rscratch, src); 2657 movdqu(dst, Address(rscratch, 0)); 2658 } 2659 } 2660 2661 void MacroAssembler::vmovdqu(Address dst, XMMRegister src) { 2662 assert(((src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2663 Assembler::vmovdqu(dst, src); 2664 } 2665 2666 void MacroAssembler::vmovdqu(XMMRegister dst, Address src) { 2667 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2668 Assembler::vmovdqu(dst, src); 2669 } 2670 2671 void MacroAssembler::vmovdqu(XMMRegister dst, XMMRegister src) { 2672 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2673 Assembler::vmovdqu(dst, src); 2674 } 2675 2676 void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src, Register rscratch) { 2677 assert(rscratch != noreg || always_reachable(src), "missing"); 2678 2679 if (reachable(src)) { 2680 vmovdqu(dst, as_Address(src)); 2681 } 2682 else { 2683 lea(rscratch, src); 2684 vmovdqu(dst, Address(rscratch, 0)); 2685 } 2686 } 2687 2688 void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2689 assert(rscratch != noreg || always_reachable(src), "missing"); 2690 2691 if (vector_len == AVX_512bit) { 2692 evmovdquq(dst, src, AVX_512bit, rscratch); 2693 } else if (vector_len == AVX_256bit) { 2694 vmovdqu(dst, src, rscratch); 2695 } else { 2696 movdqu(dst, src, rscratch); 2697 } 2698 } 2699 2700 void MacroAssembler::kmov(KRegister dst, Address src) { 2701 if (VM_Version::supports_avx512bw()) { 2702 kmovql(dst, src); 2703 } else { 2704 assert(VM_Version::supports_evex(), ""); 2705 kmovwl(dst, src); 2706 } 2707 } 2708 2709 void MacroAssembler::kmov(Address dst, KRegister src) { 2710 if (VM_Version::supports_avx512bw()) { 2711 kmovql(dst, src); 2712 } else { 2713 assert(VM_Version::supports_evex(), ""); 2714 kmovwl(dst, src); 2715 } 2716 } 2717 2718 void MacroAssembler::kmov(KRegister dst, KRegister src) { 2719 if (VM_Version::supports_avx512bw()) { 2720 kmovql(dst, src); 2721 } else { 2722 assert(VM_Version::supports_evex(), ""); 2723 kmovwl(dst, src); 2724 } 2725 } 2726 2727 void MacroAssembler::kmov(Register dst, KRegister src) { 2728 if (VM_Version::supports_avx512bw()) { 2729 kmovql(dst, src); 2730 } else { 2731 assert(VM_Version::supports_evex(), ""); 2732 kmovwl(dst, src); 2733 } 2734 } 2735 2736 void MacroAssembler::kmov(KRegister dst, Register src) { 2737 if (VM_Version::supports_avx512bw()) { 2738 kmovql(dst, src); 2739 } else { 2740 assert(VM_Version::supports_evex(), ""); 2741 kmovwl(dst, src); 2742 } 2743 } 2744 2745 void MacroAssembler::kmovql(KRegister dst, AddressLiteral src, Register rscratch) { 2746 assert(rscratch != noreg || always_reachable(src), "missing"); 2747 2748 if (reachable(src)) { 2749 kmovql(dst, as_Address(src)); 2750 } else { 2751 lea(rscratch, src); 2752 kmovql(dst, Address(rscratch, 0)); 2753 } 2754 } 2755 2756 void MacroAssembler::kmovwl(KRegister dst, AddressLiteral src, Register rscratch) { 2757 assert(rscratch != noreg || always_reachable(src), "missing"); 2758 2759 if (reachable(src)) { 2760 kmovwl(dst, as_Address(src)); 2761 } else { 2762 lea(rscratch, src); 2763 kmovwl(dst, Address(rscratch, 0)); 2764 } 2765 } 2766 2767 void MacroAssembler::evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, 2768 int vector_len, Register rscratch) { 2769 assert(rscratch != noreg || always_reachable(src), "missing"); 2770 2771 if (reachable(src)) { 2772 Assembler::evmovdqub(dst, mask, as_Address(src), merge, vector_len); 2773 } else { 2774 lea(rscratch, src); 2775 Assembler::evmovdqub(dst, mask, Address(rscratch, 0), merge, vector_len); 2776 } 2777 } 2778 2779 void MacroAssembler::evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, 2780 int vector_len, Register rscratch) { 2781 assert(rscratch != noreg || always_reachable(src), "missing"); 2782 2783 if (reachable(src)) { 2784 Assembler::evmovdquw(dst, mask, as_Address(src), merge, vector_len); 2785 } else { 2786 lea(rscratch, src); 2787 Assembler::evmovdquw(dst, mask, Address(rscratch, 0), merge, vector_len); 2788 } 2789 } 2790 2791 void MacroAssembler::evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) { 2792 assert(rscratch != noreg || always_reachable(src), "missing"); 2793 2794 if (reachable(src)) { 2795 Assembler::evmovdqul(dst, mask, as_Address(src), merge, vector_len); 2796 } else { 2797 lea(rscratch, src); 2798 Assembler::evmovdqul(dst, mask, Address(rscratch, 0), merge, vector_len); 2799 } 2800 } 2801 2802 void MacroAssembler::evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) { 2803 assert(rscratch != noreg || always_reachable(src), "missing"); 2804 2805 if (reachable(src)) { 2806 Assembler::evmovdquq(dst, mask, as_Address(src), merge, vector_len); 2807 } else { 2808 lea(rscratch, src); 2809 Assembler::evmovdquq(dst, mask, Address(rscratch, 0), merge, vector_len); 2810 } 2811 } 2812 2813 void MacroAssembler::evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2814 assert(rscratch != noreg || always_reachable(src), "missing"); 2815 2816 if (reachable(src)) { 2817 Assembler::evmovdquq(dst, as_Address(src), vector_len); 2818 } else { 2819 lea(rscratch, src); 2820 Assembler::evmovdquq(dst, Address(rscratch, 0), vector_len); 2821 } 2822 } 2823 2824 void MacroAssembler::movdqa(XMMRegister dst, AddressLiteral src, Register rscratch) { 2825 assert(rscratch != noreg || always_reachable(src), "missing"); 2826 2827 if (reachable(src)) { 2828 Assembler::movdqa(dst, as_Address(src)); 2829 } else { 2830 lea(rscratch, src); 2831 Assembler::movdqa(dst, Address(rscratch, 0)); 2832 } 2833 } 2834 2835 void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2836 assert(rscratch != noreg || always_reachable(src), "missing"); 2837 2838 if (reachable(src)) { 2839 Assembler::movsd(dst, as_Address(src)); 2840 } else { 2841 lea(rscratch, src); 2842 Assembler::movsd(dst, Address(rscratch, 0)); 2843 } 2844 } 2845 2846 void MacroAssembler::movss(XMMRegister dst, AddressLiteral src, Register rscratch) { 2847 assert(rscratch != noreg || always_reachable(src), "missing"); 2848 2849 if (reachable(src)) { 2850 Assembler::movss(dst, as_Address(src)); 2851 } else { 2852 lea(rscratch, src); 2853 Assembler::movss(dst, Address(rscratch, 0)); 2854 } 2855 } 2856 2857 void MacroAssembler::movddup(XMMRegister dst, AddressLiteral src, Register rscratch) { 2858 assert(rscratch != noreg || always_reachable(src), "missing"); 2859 2860 if (reachable(src)) { 2861 Assembler::movddup(dst, as_Address(src)); 2862 } else { 2863 lea(rscratch, src); 2864 Assembler::movddup(dst, Address(rscratch, 0)); 2865 } 2866 } 2867 2868 void MacroAssembler::vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2869 assert(rscratch != noreg || always_reachable(src), "missing"); 2870 2871 if (reachable(src)) { 2872 Assembler::vmovddup(dst, as_Address(src), vector_len); 2873 } else { 2874 lea(rscratch, src); 2875 Assembler::vmovddup(dst, Address(rscratch, 0), vector_len); 2876 } 2877 } 2878 2879 void MacroAssembler::mulsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2880 assert(rscratch != noreg || always_reachable(src), "missing"); 2881 2882 if (reachable(src)) { 2883 Assembler::mulsd(dst, as_Address(src)); 2884 } else { 2885 lea(rscratch, src); 2886 Assembler::mulsd(dst, Address(rscratch, 0)); 2887 } 2888 } 2889 2890 void MacroAssembler::mulss(XMMRegister dst, AddressLiteral src, Register rscratch) { 2891 assert(rscratch != noreg || always_reachable(src), "missing"); 2892 2893 if (reachable(src)) { 2894 Assembler::mulss(dst, as_Address(src)); 2895 } else { 2896 lea(rscratch, src); 2897 Assembler::mulss(dst, Address(rscratch, 0)); 2898 } 2899 } 2900 2901 void MacroAssembler::null_check(Register reg, int offset) { 2902 if (needs_explicit_null_check(offset)) { 2903 // provoke OS null exception if reg is null by 2904 // accessing M[reg] w/o changing any (non-CC) registers 2905 // NOTE: cmpl is plenty here to provoke a segv 2906 cmpptr(rax, Address(reg, 0)); 2907 // Note: should probably use testl(rax, Address(reg, 0)); 2908 // may be shorter code (however, this version of 2909 // testl needs to be implemented first) 2910 } else { 2911 // nothing to do, (later) access of M[reg + offset] 2912 // will provoke OS null exception if reg is null 2913 } 2914 } 2915 2916 void MacroAssembler::test_markword_is_inline_type(Register markword, Label& is_inline_type) { 2917 andptr(markword, markWord::inline_type_mask_in_place); 2918 cmpptr(markword, markWord::inline_type_pattern); 2919 jcc(Assembler::equal, is_inline_type); 2920 } 2921 2922 void MacroAssembler::test_klass_is_inline_type(Register klass, Register temp_reg, Label& is_inline_type) { 2923 movl(temp_reg, Address(klass, Klass::access_flags_offset())); 2924 testl(temp_reg, JVM_ACC_IDENTITY); 2925 jcc(Assembler::zero, is_inline_type); 2926 } 2927 2928 void MacroAssembler::test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type) { 2929 testptr(object, object); 2930 jcc(Assembler::zero, not_inline_type); 2931 const int is_inline_type_mask = markWord::inline_type_pattern; 2932 movptr(tmp, Address(object, oopDesc::mark_offset_in_bytes())); 2933 andptr(tmp, is_inline_type_mask); 2934 cmpptr(tmp, is_inline_type_mask); 2935 jcc(Assembler::notEqual, not_inline_type); 2936 } 2937 2938 void MacroAssembler::test_klass_is_empty_inline_type(Register klass, Register temp_reg, Label& is_empty_inline_type) { 2939 #ifdef ASSERT 2940 { 2941 Label done_check; 2942 test_klass_is_inline_type(klass, temp_reg, done_check); 2943 stop("test_klass_is_empty_inline_type with non inline type klass"); 2944 bind(done_check); 2945 } 2946 #endif 2947 movl(temp_reg, Address(klass, InstanceKlass::misc_flags_offset())); 2948 testl(temp_reg, InstanceKlassFlags::is_empty_inline_type_value()); 2949 jcc(Assembler::notZero, is_empty_inline_type); 2950 } 2951 2952 void MacroAssembler::test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free_inline_type) { 2953 movl(temp_reg, flags); 2954 testl(temp_reg, 1 << ResolvedFieldEntry::is_null_free_inline_type_shift); 2955 jcc(Assembler::notEqual, is_null_free_inline_type); 2956 } 2957 2958 void MacroAssembler::test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free_inline_type) { 2959 movl(temp_reg, flags); 2960 testl(temp_reg, 1 << ResolvedFieldEntry::is_null_free_inline_type_shift); 2961 jcc(Assembler::equal, not_null_free_inline_type); 2962 } 2963 2964 void MacroAssembler::test_field_is_flat(Register flags, Register temp_reg, Label& is_flat) { 2965 movl(temp_reg, flags); 2966 testl(temp_reg, 1 << ResolvedFieldEntry::is_flat_shift); 2967 jcc(Assembler::notEqual, is_flat); 2968 } 2969 2970 void MacroAssembler::test_field_has_null_marker(Register flags, Register temp_reg, Label& has_null_marker) { 2971 movl(temp_reg, flags); 2972 testl(temp_reg, 1 << ResolvedFieldEntry::has_null_marker_shift); 2973 jcc(Assembler::notEqual, has_null_marker); 2974 } 2975 2976 void MacroAssembler::test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label) { 2977 Label test_mark_word; 2978 // load mark word 2979 movptr(temp_reg, Address(oop, oopDesc::mark_offset_in_bytes())); 2980 // check displaced 2981 testl(temp_reg, markWord::unlocked_value); 2982 jccb(Assembler::notZero, test_mark_word); 2983 // slow path use klass prototype 2984 push(rscratch1); 2985 load_prototype_header(temp_reg, oop, rscratch1); 2986 pop(rscratch1); 2987 2988 bind(test_mark_word); 2989 testl(temp_reg, test_bit); 2990 jcc((jmp_set) ? Assembler::notZero : Assembler::zero, jmp_label); 2991 } 2992 2993 void MacroAssembler::test_flat_array_oop(Register oop, Register temp_reg, 2994 Label& is_flat_array) { 2995 #ifdef _LP64 2996 test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, true, is_flat_array); 2997 #else 2998 load_klass(temp_reg, oop, noreg); 2999 movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset())); 3000 test_flat_array_layout(temp_reg, is_flat_array); 3001 #endif 3002 } 3003 3004 void MacroAssembler::test_non_flat_array_oop(Register oop, Register temp_reg, 3005 Label& is_non_flat_array) { 3006 #ifdef _LP64 3007 test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, false, is_non_flat_array); 3008 #else 3009 load_klass(temp_reg, oop, noreg); 3010 movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset())); 3011 test_non_flat_array_layout(temp_reg, is_non_flat_array); 3012 #endif 3013 } 3014 3015 void MacroAssembler::test_null_free_array_oop(Register oop, Register temp_reg, Label&is_null_free_array) { 3016 #ifdef _LP64 3017 test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, true, is_null_free_array); 3018 #else 3019 Unimplemented(); 3020 #endif 3021 } 3022 3023 void MacroAssembler::test_non_null_free_array_oop(Register oop, Register temp_reg, Label&is_non_null_free_array) { 3024 #ifdef _LP64 3025 test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, false, is_non_null_free_array); 3026 #else 3027 Unimplemented(); 3028 #endif 3029 } 3030 3031 void MacroAssembler::test_flat_array_layout(Register lh, Label& is_flat_array) { 3032 testl(lh, Klass::_lh_array_tag_flat_value_bit_inplace); 3033 jcc(Assembler::notZero, is_flat_array); 3034 } 3035 3036 void MacroAssembler::test_non_flat_array_layout(Register lh, Label& is_non_flat_array) { 3037 testl(lh, Klass::_lh_array_tag_flat_value_bit_inplace); 3038 jcc(Assembler::zero, is_non_flat_array); 3039 } 3040 3041 void MacroAssembler::os_breakpoint() { 3042 // instead of directly emitting a breakpoint, call os:breakpoint for better debugability 3043 // (e.g., MSVC can't call ps() otherwise) 3044 call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint))); 3045 } 3046 3047 void MacroAssembler::unimplemented(const char* what) { 3048 const char* buf = nullptr; 3049 { 3050 ResourceMark rm; 3051 stringStream ss; 3052 ss.print("unimplemented: %s", what); 3053 buf = code_string(ss.as_string()); 3054 } 3055 stop(buf); 3056 } 3057 3058 #ifdef _LP64 3059 #define XSTATE_BV 0x200 3060 #endif 3061 3062 void MacroAssembler::pop_CPU_state() { 3063 pop_FPU_state(); 3064 pop_IU_state(); 3065 } 3066 3067 void MacroAssembler::pop_FPU_state() { 3068 #ifndef _LP64 3069 frstor(Address(rsp, 0)); 3070 #else 3071 fxrstor(Address(rsp, 0)); 3072 #endif 3073 addptr(rsp, FPUStateSizeInWords * wordSize); 3074 } 3075 3076 void MacroAssembler::pop_IU_state() { 3077 popa(); 3078 LP64_ONLY(addq(rsp, 8)); 3079 popf(); 3080 } 3081 3082 // Save Integer and Float state 3083 // Warning: Stack must be 16 byte aligned (64bit) 3084 void MacroAssembler::push_CPU_state() { 3085 push_IU_state(); 3086 push_FPU_state(); 3087 } 3088 3089 void MacroAssembler::push_FPU_state() { 3090 subptr(rsp, FPUStateSizeInWords * wordSize); 3091 #ifndef _LP64 3092 fnsave(Address(rsp, 0)); 3093 fwait(); 3094 #else 3095 fxsave(Address(rsp, 0)); 3096 #endif // LP64 3097 } 3098 3099 void MacroAssembler::push_IU_state() { 3100 // Push flags first because pusha kills them 3101 pushf(); 3102 // Make sure rsp stays 16-byte aligned 3103 LP64_ONLY(subq(rsp, 8)); 3104 pusha(); 3105 } 3106 3107 void MacroAssembler::push_cont_fastpath() { 3108 if (!Continuations::enabled()) return; 3109 3110 #ifndef _LP64 3111 Register rthread = rax; 3112 Register rrealsp = rbx; 3113 push(rthread); 3114 push(rrealsp); 3115 3116 get_thread(rthread); 3117 3118 // The code below wants the original RSP. 3119 // Move it back after the pushes above. 3120 movptr(rrealsp, rsp); 3121 addptr(rrealsp, 2*wordSize); 3122 #else 3123 Register rthread = r15_thread; 3124 Register rrealsp = rsp; 3125 #endif 3126 3127 Label done; 3128 cmpptr(rrealsp, Address(rthread, JavaThread::cont_fastpath_offset())); 3129 jccb(Assembler::belowEqual, done); 3130 movptr(Address(rthread, JavaThread::cont_fastpath_offset()), rrealsp); 3131 bind(done); 3132 3133 #ifndef _LP64 3134 pop(rrealsp); 3135 pop(rthread); 3136 #endif 3137 } 3138 3139 void MacroAssembler::pop_cont_fastpath() { 3140 if (!Continuations::enabled()) return; 3141 3142 #ifndef _LP64 3143 Register rthread = rax; 3144 Register rrealsp = rbx; 3145 push(rthread); 3146 push(rrealsp); 3147 3148 get_thread(rthread); 3149 3150 // The code below wants the original RSP. 3151 // Move it back after the pushes above. 3152 movptr(rrealsp, rsp); 3153 addptr(rrealsp, 2*wordSize); 3154 #else 3155 Register rthread = r15_thread; 3156 Register rrealsp = rsp; 3157 #endif 3158 3159 Label done; 3160 cmpptr(rrealsp, Address(rthread, JavaThread::cont_fastpath_offset())); 3161 jccb(Assembler::below, done); 3162 movptr(Address(rthread, JavaThread::cont_fastpath_offset()), 0); 3163 bind(done); 3164 3165 #ifndef _LP64 3166 pop(rrealsp); 3167 pop(rthread); 3168 #endif 3169 } 3170 3171 void MacroAssembler::inc_held_monitor_count() { 3172 #ifndef _LP64 3173 Register thread = rax; 3174 push(thread); 3175 get_thread(thread); 3176 incrementl(Address(thread, JavaThread::held_monitor_count_offset())); 3177 pop(thread); 3178 #else // LP64 3179 incrementq(Address(r15_thread, JavaThread::held_monitor_count_offset())); 3180 #endif 3181 } 3182 3183 void MacroAssembler::dec_held_monitor_count() { 3184 #ifndef _LP64 3185 Register thread = rax; 3186 push(thread); 3187 get_thread(thread); 3188 decrementl(Address(thread, JavaThread::held_monitor_count_offset())); 3189 pop(thread); 3190 #else // LP64 3191 decrementq(Address(r15_thread, JavaThread::held_monitor_count_offset())); 3192 #endif 3193 } 3194 3195 #ifdef ASSERT 3196 void MacroAssembler::stop_if_in_cont(Register cont, const char* name) { 3197 #ifdef _LP64 3198 Label no_cont; 3199 movptr(cont, Address(r15_thread, JavaThread::cont_entry_offset())); 3200 testl(cont, cont); 3201 jcc(Assembler::zero, no_cont); 3202 stop(name); 3203 bind(no_cont); 3204 #else 3205 Unimplemented(); 3206 #endif 3207 } 3208 #endif 3209 3210 void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp) { // determine java_thread register 3211 if (!java_thread->is_valid()) { 3212 java_thread = rdi; 3213 get_thread(java_thread); 3214 } 3215 // we must set sp to zero to clear frame 3216 movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), NULL_WORD); 3217 // must clear fp, so that compiled frames are not confused; it is 3218 // possible that we need it only for debugging 3219 if (clear_fp) { 3220 movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), NULL_WORD); 3221 } 3222 // Always clear the pc because it could have been set by make_walkable() 3223 movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), NULL_WORD); 3224 vzeroupper(); 3225 } 3226 3227 void MacroAssembler::restore_rax(Register tmp) { 3228 if (tmp == noreg) pop(rax); 3229 else if (tmp != rax) mov(rax, tmp); 3230 } 3231 3232 void MacroAssembler::round_to(Register reg, int modulus) { 3233 addptr(reg, modulus - 1); 3234 andptr(reg, -modulus); 3235 } 3236 3237 void MacroAssembler::save_rax(Register tmp) { 3238 if (tmp == noreg) push(rax); 3239 else if (tmp != rax) mov(tmp, rax); 3240 } 3241 3242 void MacroAssembler::safepoint_poll(Label& slow_path, Register thread_reg, bool at_return, bool in_nmethod) { 3243 if (at_return) { 3244 // Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore, 3245 // we may safely use rsp instead to perform the stack watermark check. 3246 cmpptr(in_nmethod ? rsp : rbp, Address(thread_reg, JavaThread::polling_word_offset())); 3247 jcc(Assembler::above, slow_path); 3248 return; 3249 } 3250 testb(Address(thread_reg, JavaThread::polling_word_offset()), SafepointMechanism::poll_bit()); 3251 jcc(Assembler::notZero, slow_path); // handshake bit set implies poll 3252 } 3253 3254 // Calls to C land 3255 // 3256 // When entering C land, the rbp, & rsp of the last Java frame have to be recorded 3257 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp 3258 // has to be reset to 0. This is required to allow proper stack traversal. 3259 void MacroAssembler::set_last_Java_frame(Register java_thread, 3260 Register last_java_sp, 3261 Register last_java_fp, 3262 address last_java_pc, 3263 Register rscratch) { 3264 vzeroupper(); 3265 // determine java_thread register 3266 if (!java_thread->is_valid()) { 3267 java_thread = rdi; 3268 get_thread(java_thread); 3269 } 3270 // determine last_java_sp register 3271 if (!last_java_sp->is_valid()) { 3272 last_java_sp = rsp; 3273 } 3274 // last_java_fp is optional 3275 if (last_java_fp->is_valid()) { 3276 movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), last_java_fp); 3277 } 3278 // last_java_pc is optional 3279 if (last_java_pc != nullptr) { 3280 Address java_pc(java_thread, 3281 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()); 3282 lea(java_pc, InternalAddress(last_java_pc), rscratch); 3283 } 3284 movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), last_java_sp); 3285 } 3286 3287 void MacroAssembler::shlptr(Register dst, int imm8) { 3288 LP64_ONLY(shlq(dst, imm8)) NOT_LP64(shll(dst, imm8)); 3289 } 3290 3291 void MacroAssembler::shrptr(Register dst, int imm8) { 3292 LP64_ONLY(shrq(dst, imm8)) NOT_LP64(shrl(dst, imm8)); 3293 } 3294 3295 void MacroAssembler::sign_extend_byte(Register reg) { 3296 if (LP64_ONLY(true ||) (VM_Version::is_P6() && reg->has_byte_register())) { 3297 movsbl(reg, reg); // movsxb 3298 } else { 3299 shll(reg, 24); 3300 sarl(reg, 24); 3301 } 3302 } 3303 3304 void MacroAssembler::sign_extend_short(Register reg) { 3305 if (LP64_ONLY(true ||) VM_Version::is_P6()) { 3306 movswl(reg, reg); // movsxw 3307 } else { 3308 shll(reg, 16); 3309 sarl(reg, 16); 3310 } 3311 } 3312 3313 void MacroAssembler::testl(Address dst, int32_t imm32) { 3314 if (imm32 >= 0 && is8bit(imm32)) { 3315 testb(dst, imm32); 3316 } else { 3317 Assembler::testl(dst, imm32); 3318 } 3319 } 3320 3321 void MacroAssembler::testl(Register dst, int32_t imm32) { 3322 if (imm32 >= 0 && is8bit(imm32) && dst->has_byte_register()) { 3323 testb(dst, imm32); 3324 } else { 3325 Assembler::testl(dst, imm32); 3326 } 3327 } 3328 3329 void MacroAssembler::testl(Register dst, AddressLiteral src) { 3330 assert(always_reachable(src), "Address should be reachable"); 3331 testl(dst, as_Address(src)); 3332 } 3333 3334 #ifdef _LP64 3335 3336 void MacroAssembler::testq(Address dst, int32_t imm32) { 3337 if (imm32 >= 0) { 3338 testl(dst, imm32); 3339 } else { 3340 Assembler::testq(dst, imm32); 3341 } 3342 } 3343 3344 void MacroAssembler::testq(Register dst, int32_t imm32) { 3345 if (imm32 >= 0) { 3346 testl(dst, imm32); 3347 } else { 3348 Assembler::testq(dst, imm32); 3349 } 3350 } 3351 3352 #endif 3353 3354 void MacroAssembler::pcmpeqb(XMMRegister dst, XMMRegister src) { 3355 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3356 Assembler::pcmpeqb(dst, src); 3357 } 3358 3359 void MacroAssembler::pcmpeqw(XMMRegister dst, XMMRegister src) { 3360 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3361 Assembler::pcmpeqw(dst, src); 3362 } 3363 3364 void MacroAssembler::pcmpestri(XMMRegister dst, Address src, int imm8) { 3365 assert((dst->encoding() < 16),"XMM register should be 0-15"); 3366 Assembler::pcmpestri(dst, src, imm8); 3367 } 3368 3369 void MacroAssembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) { 3370 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15"); 3371 Assembler::pcmpestri(dst, src, imm8); 3372 } 3373 3374 void MacroAssembler::pmovzxbw(XMMRegister dst, XMMRegister src) { 3375 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3376 Assembler::pmovzxbw(dst, src); 3377 } 3378 3379 void MacroAssembler::pmovzxbw(XMMRegister dst, Address src) { 3380 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3381 Assembler::pmovzxbw(dst, src); 3382 } 3383 3384 void MacroAssembler::pmovmskb(Register dst, XMMRegister src) { 3385 assert((src->encoding() < 16),"XMM register should be 0-15"); 3386 Assembler::pmovmskb(dst, src); 3387 } 3388 3389 void MacroAssembler::ptest(XMMRegister dst, XMMRegister src) { 3390 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15"); 3391 Assembler::ptest(dst, src); 3392 } 3393 3394 void MacroAssembler::sqrtss(XMMRegister dst, AddressLiteral src, Register rscratch) { 3395 assert(rscratch != noreg || always_reachable(src), "missing"); 3396 3397 if (reachable(src)) { 3398 Assembler::sqrtss(dst, as_Address(src)); 3399 } else { 3400 lea(rscratch, src); 3401 Assembler::sqrtss(dst, Address(rscratch, 0)); 3402 } 3403 } 3404 3405 void MacroAssembler::subsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 3406 assert(rscratch != noreg || always_reachable(src), "missing"); 3407 3408 if (reachable(src)) { 3409 Assembler::subsd(dst, as_Address(src)); 3410 } else { 3411 lea(rscratch, src); 3412 Assembler::subsd(dst, Address(rscratch, 0)); 3413 } 3414 } 3415 3416 void MacroAssembler::roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register rscratch) { 3417 assert(rscratch != noreg || always_reachable(src), "missing"); 3418 3419 if (reachable(src)) { 3420 Assembler::roundsd(dst, as_Address(src), rmode); 3421 } else { 3422 lea(rscratch, src); 3423 Assembler::roundsd(dst, Address(rscratch, 0), rmode); 3424 } 3425 } 3426 3427 void MacroAssembler::subss(XMMRegister dst, AddressLiteral src, Register rscratch) { 3428 assert(rscratch != noreg || always_reachable(src), "missing"); 3429 3430 if (reachable(src)) { 3431 Assembler::subss(dst, as_Address(src)); 3432 } else { 3433 lea(rscratch, src); 3434 Assembler::subss(dst, Address(rscratch, 0)); 3435 } 3436 } 3437 3438 void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch) { 3439 assert(rscratch != noreg || always_reachable(src), "missing"); 3440 3441 if (reachable(src)) { 3442 Assembler::ucomisd(dst, as_Address(src)); 3443 } else { 3444 lea(rscratch, src); 3445 Assembler::ucomisd(dst, Address(rscratch, 0)); 3446 } 3447 } 3448 3449 void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch) { 3450 assert(rscratch != noreg || always_reachable(src), "missing"); 3451 3452 if (reachable(src)) { 3453 Assembler::ucomiss(dst, as_Address(src)); 3454 } else { 3455 lea(rscratch, src); 3456 Assembler::ucomiss(dst, Address(rscratch, 0)); 3457 } 3458 } 3459 3460 void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 3461 assert(rscratch != noreg || always_reachable(src), "missing"); 3462 3463 // Used in sign-bit flipping with aligned address. 3464 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 3465 if (reachable(src)) { 3466 Assembler::xorpd(dst, as_Address(src)); 3467 } else { 3468 lea(rscratch, src); 3469 Assembler::xorpd(dst, Address(rscratch, 0)); 3470 } 3471 } 3472 3473 void MacroAssembler::xorpd(XMMRegister dst, XMMRegister src) { 3474 if (UseAVX > 2 && !VM_Version::supports_avx512dq() && (dst->encoding() == src->encoding())) { 3475 Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit); 3476 } 3477 else { 3478 Assembler::xorpd(dst, src); 3479 } 3480 } 3481 3482 void MacroAssembler::xorps(XMMRegister dst, XMMRegister src) { 3483 if (UseAVX > 2 && !VM_Version::supports_avx512dq() && (dst->encoding() == src->encoding())) { 3484 Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit); 3485 } else { 3486 Assembler::xorps(dst, src); 3487 } 3488 } 3489 3490 void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src, Register rscratch) { 3491 assert(rscratch != noreg || always_reachable(src), "missing"); 3492 3493 // Used in sign-bit flipping with aligned address. 3494 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 3495 if (reachable(src)) { 3496 Assembler::xorps(dst, as_Address(src)); 3497 } else { 3498 lea(rscratch, src); 3499 Assembler::xorps(dst, Address(rscratch, 0)); 3500 } 3501 } 3502 3503 void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src, Register rscratch) { 3504 assert(rscratch != noreg || always_reachable(src), "missing"); 3505 3506 // Used in sign-bit flipping with aligned address. 3507 bool aligned_adr = (((intptr_t)src.target() & 15) == 0); 3508 assert((UseAVX > 0) || aligned_adr, "SSE mode requires address alignment 16 bytes"); 3509 if (reachable(src)) { 3510 Assembler::pshufb(dst, as_Address(src)); 3511 } else { 3512 lea(rscratch, src); 3513 Assembler::pshufb(dst, Address(rscratch, 0)); 3514 } 3515 } 3516 3517 // AVX 3-operands instructions 3518 3519 void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3520 assert(rscratch != noreg || always_reachable(src), "missing"); 3521 3522 if (reachable(src)) { 3523 vaddsd(dst, nds, as_Address(src)); 3524 } else { 3525 lea(rscratch, src); 3526 vaddsd(dst, nds, Address(rscratch, 0)); 3527 } 3528 } 3529 3530 void MacroAssembler::vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3531 assert(rscratch != noreg || always_reachable(src), "missing"); 3532 3533 if (reachable(src)) { 3534 vaddss(dst, nds, as_Address(src)); 3535 } else { 3536 lea(rscratch, src); 3537 vaddss(dst, nds, Address(rscratch, 0)); 3538 } 3539 } 3540 3541 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3542 assert(UseAVX > 0, "requires some form of AVX"); 3543 assert(rscratch != noreg || always_reachable(src), "missing"); 3544 3545 if (reachable(src)) { 3546 Assembler::vpaddb(dst, nds, as_Address(src), vector_len); 3547 } else { 3548 lea(rscratch, src); 3549 Assembler::vpaddb(dst, nds, Address(rscratch, 0), vector_len); 3550 } 3551 } 3552 3553 void MacroAssembler::vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3554 assert(UseAVX > 0, "requires some form of AVX"); 3555 assert(rscratch != noreg || always_reachable(src), "missing"); 3556 3557 if (reachable(src)) { 3558 Assembler::vpaddd(dst, nds, as_Address(src), vector_len); 3559 } else { 3560 lea(rscratch, src); 3561 Assembler::vpaddd(dst, nds, Address(rscratch, 0), vector_len); 3562 } 3563 } 3564 3565 void MacroAssembler::vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch) { 3566 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 3567 assert(rscratch != noreg || always_reachable(negate_field), "missing"); 3568 3569 vandps(dst, nds, negate_field, vector_len, rscratch); 3570 } 3571 3572 void MacroAssembler::vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch) { 3573 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 3574 assert(rscratch != noreg || always_reachable(negate_field), "missing"); 3575 3576 vandpd(dst, nds, negate_field, vector_len, rscratch); 3577 } 3578 3579 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3580 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3581 Assembler::vpaddb(dst, nds, src, vector_len); 3582 } 3583 3584 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3585 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3586 Assembler::vpaddb(dst, nds, src, vector_len); 3587 } 3588 3589 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3590 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3591 Assembler::vpaddw(dst, nds, src, vector_len); 3592 } 3593 3594 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3595 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3596 Assembler::vpaddw(dst, nds, src, vector_len); 3597 } 3598 3599 void MacroAssembler::vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3600 assert(rscratch != noreg || always_reachable(src), "missing"); 3601 3602 if (reachable(src)) { 3603 Assembler::vpand(dst, nds, as_Address(src), vector_len); 3604 } else { 3605 lea(rscratch, src); 3606 Assembler::vpand(dst, nds, Address(rscratch, 0), vector_len); 3607 } 3608 } 3609 3610 void MacroAssembler::vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 3611 assert(rscratch != noreg || always_reachable(src), "missing"); 3612 3613 if (reachable(src)) { 3614 Assembler::vpbroadcastd(dst, as_Address(src), vector_len); 3615 } else { 3616 lea(rscratch, src); 3617 Assembler::vpbroadcastd(dst, Address(rscratch, 0), vector_len); 3618 } 3619 } 3620 3621 void MacroAssembler::vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 3622 assert(rscratch != noreg || always_reachable(src), "missing"); 3623 3624 if (reachable(src)) { 3625 Assembler::vpbroadcastq(dst, as_Address(src), vector_len); 3626 } else { 3627 lea(rscratch, src); 3628 Assembler::vpbroadcastq(dst, Address(rscratch, 0), vector_len); 3629 } 3630 } 3631 3632 void MacroAssembler::vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 3633 assert(rscratch != noreg || always_reachable(src), "missing"); 3634 3635 if (reachable(src)) { 3636 Assembler::vbroadcastsd(dst, as_Address(src), vector_len); 3637 } else { 3638 lea(rscratch, src); 3639 Assembler::vbroadcastsd(dst, Address(rscratch, 0), vector_len); 3640 } 3641 } 3642 3643 void MacroAssembler::vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 3644 assert(rscratch != noreg || always_reachable(src), "missing"); 3645 3646 if (reachable(src)) { 3647 Assembler::vbroadcastss(dst, as_Address(src), vector_len); 3648 } else { 3649 lea(rscratch, src); 3650 Assembler::vbroadcastss(dst, Address(rscratch, 0), vector_len); 3651 } 3652 } 3653 3654 // Vector float blend 3655 // vblendvps(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg) 3656 void MacroAssembler::vblendvps(XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister mask, int vector_len, bool compute_mask, XMMRegister scratch) { 3657 // WARN: Allow dst == (src1|src2), mask == scratch 3658 bool blend_emulation = EnableX86ECoreOpts && UseAVX > 1; 3659 bool scratch_available = scratch != xnoreg && scratch != src1 && scratch != src2 && scratch != dst; 3660 bool dst_available = dst != mask && (dst != src1 || dst != src2); 3661 if (blend_emulation && scratch_available && dst_available) { 3662 if (compute_mask) { 3663 vpsrad(scratch, mask, 32, vector_len); 3664 mask = scratch; 3665 } 3666 if (dst == src1) { 3667 vpandn(dst, mask, src1, vector_len); // if mask == 0, src1 3668 vpand (scratch, mask, src2, vector_len); // if mask == 1, src2 3669 } else { 3670 vpand (dst, mask, src2, vector_len); // if mask == 1, src2 3671 vpandn(scratch, mask, src1, vector_len); // if mask == 0, src1 3672 } 3673 vpor(dst, dst, scratch, vector_len); 3674 } else { 3675 Assembler::vblendvps(dst, src1, src2, mask, vector_len); 3676 } 3677 } 3678 3679 // vblendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg) 3680 void MacroAssembler::vblendvpd(XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister mask, int vector_len, bool compute_mask, XMMRegister scratch) { 3681 // WARN: Allow dst == (src1|src2), mask == scratch 3682 bool blend_emulation = EnableX86ECoreOpts && UseAVX > 1; 3683 bool scratch_available = scratch != xnoreg && scratch != src1 && scratch != src2 && scratch != dst && (!compute_mask || scratch != mask); 3684 bool dst_available = dst != mask && (dst != src1 || dst != src2); 3685 if (blend_emulation && scratch_available && dst_available) { 3686 if (compute_mask) { 3687 vpxor(scratch, scratch, scratch, vector_len); 3688 vpcmpgtq(scratch, scratch, mask, vector_len); 3689 mask = scratch; 3690 } 3691 if (dst == src1) { 3692 vpandn(dst, mask, src1, vector_len); // if mask == 0, src 3693 vpand (scratch, mask, src2, vector_len); // if mask == 1, src2 3694 } else { 3695 vpand (dst, mask, src2, vector_len); // if mask == 1, src2 3696 vpandn(scratch, mask, src1, vector_len); // if mask == 0, src 3697 } 3698 vpor(dst, dst, scratch, vector_len); 3699 } else { 3700 Assembler::vblendvpd(dst, src1, src2, mask, vector_len); 3701 } 3702 } 3703 3704 void MacroAssembler::vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3705 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3706 Assembler::vpcmpeqb(dst, nds, src, vector_len); 3707 } 3708 3709 void MacroAssembler::vpcmpeqb(XMMRegister dst, XMMRegister src1, Address src2, int vector_len) { 3710 assert(((dst->encoding() < 16 && src1->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3711 Assembler::vpcmpeqb(dst, src1, src2, vector_len); 3712 } 3713 3714 void MacroAssembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3715 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3716 Assembler::vpcmpeqw(dst, nds, src, vector_len); 3717 } 3718 3719 void MacroAssembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3720 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3721 Assembler::vpcmpeqw(dst, nds, src, vector_len); 3722 } 3723 3724 void MacroAssembler::evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3725 assert(rscratch != noreg || always_reachable(src), "missing"); 3726 3727 if (reachable(src)) { 3728 Assembler::evpcmpeqd(kdst, mask, nds, as_Address(src), vector_len); 3729 } else { 3730 lea(rscratch, src); 3731 Assembler::evpcmpeqd(kdst, mask, nds, Address(rscratch, 0), vector_len); 3732 } 3733 } 3734 3735 void MacroAssembler::evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 3736 int comparison, bool is_signed, int vector_len, Register rscratch) { 3737 assert(rscratch != noreg || always_reachable(src), "missing"); 3738 3739 if (reachable(src)) { 3740 Assembler::evpcmpd(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 3741 } else { 3742 lea(rscratch, src); 3743 Assembler::evpcmpd(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 3744 } 3745 } 3746 3747 void MacroAssembler::evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 3748 int comparison, bool is_signed, int vector_len, Register rscratch) { 3749 assert(rscratch != noreg || always_reachable(src), "missing"); 3750 3751 if (reachable(src)) { 3752 Assembler::evpcmpq(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 3753 } else { 3754 lea(rscratch, src); 3755 Assembler::evpcmpq(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 3756 } 3757 } 3758 3759 void MacroAssembler::evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 3760 int comparison, bool is_signed, int vector_len, Register rscratch) { 3761 assert(rscratch != noreg || always_reachable(src), "missing"); 3762 3763 if (reachable(src)) { 3764 Assembler::evpcmpb(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 3765 } else { 3766 lea(rscratch, src); 3767 Assembler::evpcmpb(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 3768 } 3769 } 3770 3771 void MacroAssembler::evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 3772 int comparison, bool is_signed, int vector_len, Register rscratch) { 3773 assert(rscratch != noreg || always_reachable(src), "missing"); 3774 3775 if (reachable(src)) { 3776 Assembler::evpcmpw(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 3777 } else { 3778 lea(rscratch, src); 3779 Assembler::evpcmpw(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 3780 } 3781 } 3782 3783 void MacroAssembler::vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len) { 3784 if (width == Assembler::Q) { 3785 Assembler::vpcmpCCq(dst, nds, src, cond_encoding, vector_len); 3786 } else { 3787 Assembler::vpcmpCCbwd(dst, nds, src, cond_encoding, vector_len); 3788 } 3789 } 3790 3791 void MacroAssembler::vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister xtmp, ComparisonPredicate cond, Width width, int vector_len) { 3792 int eq_cond_enc = 0x29; 3793 int gt_cond_enc = 0x37; 3794 if (width != Assembler::Q) { 3795 eq_cond_enc = 0x74 + width; 3796 gt_cond_enc = 0x64 + width; 3797 } 3798 switch (cond) { 3799 case eq: 3800 vpcmpCC(dst, nds, src, eq_cond_enc, width, vector_len); 3801 break; 3802 case neq: 3803 vpcmpCC(dst, nds, src, eq_cond_enc, width, vector_len); 3804 vallones(xtmp, vector_len); 3805 vpxor(dst, xtmp, dst, vector_len); 3806 break; 3807 case le: 3808 vpcmpCC(dst, nds, src, gt_cond_enc, width, vector_len); 3809 vallones(xtmp, vector_len); 3810 vpxor(dst, xtmp, dst, vector_len); 3811 break; 3812 case nlt: 3813 vpcmpCC(dst, src, nds, gt_cond_enc, width, vector_len); 3814 vallones(xtmp, vector_len); 3815 vpxor(dst, xtmp, dst, vector_len); 3816 break; 3817 case lt: 3818 vpcmpCC(dst, src, nds, gt_cond_enc, width, vector_len); 3819 break; 3820 case nle: 3821 vpcmpCC(dst, nds, src, gt_cond_enc, width, vector_len); 3822 break; 3823 default: 3824 assert(false, "Should not reach here"); 3825 } 3826 } 3827 3828 void MacroAssembler::vpmovzxbw(XMMRegister dst, Address src, int vector_len) { 3829 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3830 Assembler::vpmovzxbw(dst, src, vector_len); 3831 } 3832 3833 void MacroAssembler::vpmovmskb(Register dst, XMMRegister src, int vector_len) { 3834 assert((src->encoding() < 16),"XMM register should be 0-15"); 3835 Assembler::vpmovmskb(dst, src, vector_len); 3836 } 3837 3838 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3839 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3840 Assembler::vpmullw(dst, nds, src, vector_len); 3841 } 3842 3843 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3844 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3845 Assembler::vpmullw(dst, nds, src, vector_len); 3846 } 3847 3848 void MacroAssembler::vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3849 assert((UseAVX > 0), "AVX support is needed"); 3850 assert(rscratch != noreg || always_reachable(src), "missing"); 3851 3852 if (reachable(src)) { 3853 Assembler::vpmulld(dst, nds, as_Address(src), vector_len); 3854 } else { 3855 lea(rscratch, src); 3856 Assembler::vpmulld(dst, nds, Address(rscratch, 0), vector_len); 3857 } 3858 } 3859 3860 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3861 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3862 Assembler::vpsubb(dst, nds, src, vector_len); 3863 } 3864 3865 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3866 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3867 Assembler::vpsubb(dst, nds, src, vector_len); 3868 } 3869 3870 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3871 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3872 Assembler::vpsubw(dst, nds, src, vector_len); 3873 } 3874 3875 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3876 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3877 Assembler::vpsubw(dst, nds, src, vector_len); 3878 } 3879 3880 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3881 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3882 Assembler::vpsraw(dst, nds, shift, vector_len); 3883 } 3884 3885 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3886 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3887 Assembler::vpsraw(dst, nds, shift, vector_len); 3888 } 3889 3890 void MacroAssembler::evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3891 assert(UseAVX > 2,""); 3892 if (!VM_Version::supports_avx512vl() && vector_len < 2) { 3893 vector_len = 2; 3894 } 3895 Assembler::evpsraq(dst, nds, shift, vector_len); 3896 } 3897 3898 void MacroAssembler::evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3899 assert(UseAVX > 2,""); 3900 if (!VM_Version::supports_avx512vl() && vector_len < 2) { 3901 vector_len = 2; 3902 } 3903 Assembler::evpsraq(dst, nds, shift, vector_len); 3904 } 3905 3906 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3907 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3908 Assembler::vpsrlw(dst, nds, shift, vector_len); 3909 } 3910 3911 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3912 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3913 Assembler::vpsrlw(dst, nds, shift, vector_len); 3914 } 3915 3916 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3917 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3918 Assembler::vpsllw(dst, nds, shift, vector_len); 3919 } 3920 3921 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3922 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3923 Assembler::vpsllw(dst, nds, shift, vector_len); 3924 } 3925 3926 void MacroAssembler::vptest(XMMRegister dst, XMMRegister src) { 3927 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15"); 3928 Assembler::vptest(dst, src); 3929 } 3930 3931 void MacroAssembler::punpcklbw(XMMRegister dst, XMMRegister src) { 3932 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3933 Assembler::punpcklbw(dst, src); 3934 } 3935 3936 void MacroAssembler::pshufd(XMMRegister dst, Address src, int mode) { 3937 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 3938 Assembler::pshufd(dst, src, mode); 3939 } 3940 3941 void MacroAssembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) { 3942 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3943 Assembler::pshuflw(dst, src, mode); 3944 } 3945 3946 void MacroAssembler::vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3947 assert(rscratch != noreg || always_reachable(src), "missing"); 3948 3949 if (reachable(src)) { 3950 vandpd(dst, nds, as_Address(src), vector_len); 3951 } else { 3952 lea(rscratch, src); 3953 vandpd(dst, nds, Address(rscratch, 0), vector_len); 3954 } 3955 } 3956 3957 void MacroAssembler::vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3958 assert(rscratch != noreg || always_reachable(src), "missing"); 3959 3960 if (reachable(src)) { 3961 vandps(dst, nds, as_Address(src), vector_len); 3962 } else { 3963 lea(rscratch, src); 3964 vandps(dst, nds, Address(rscratch, 0), vector_len); 3965 } 3966 } 3967 3968 void MacroAssembler::evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, 3969 bool merge, int vector_len, Register rscratch) { 3970 assert(rscratch != noreg || always_reachable(src), "missing"); 3971 3972 if (reachable(src)) { 3973 Assembler::evpord(dst, mask, nds, as_Address(src), merge, vector_len); 3974 } else { 3975 lea(rscratch, src); 3976 Assembler::evpord(dst, mask, nds, Address(rscratch, 0), merge, vector_len); 3977 } 3978 } 3979 3980 void MacroAssembler::vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3981 assert(rscratch != noreg || always_reachable(src), "missing"); 3982 3983 if (reachable(src)) { 3984 vdivsd(dst, nds, as_Address(src)); 3985 } else { 3986 lea(rscratch, src); 3987 vdivsd(dst, nds, Address(rscratch, 0)); 3988 } 3989 } 3990 3991 void MacroAssembler::vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3992 assert(rscratch != noreg || always_reachable(src), "missing"); 3993 3994 if (reachable(src)) { 3995 vdivss(dst, nds, as_Address(src)); 3996 } else { 3997 lea(rscratch, src); 3998 vdivss(dst, nds, Address(rscratch, 0)); 3999 } 4000 } 4001 4002 void MacroAssembler::vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 4003 assert(rscratch != noreg || always_reachable(src), "missing"); 4004 4005 if (reachable(src)) { 4006 vmulsd(dst, nds, as_Address(src)); 4007 } else { 4008 lea(rscratch, src); 4009 vmulsd(dst, nds, Address(rscratch, 0)); 4010 } 4011 } 4012 4013 void MacroAssembler::vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 4014 assert(rscratch != noreg || always_reachable(src), "missing"); 4015 4016 if (reachable(src)) { 4017 vmulss(dst, nds, as_Address(src)); 4018 } else { 4019 lea(rscratch, src); 4020 vmulss(dst, nds, Address(rscratch, 0)); 4021 } 4022 } 4023 4024 void MacroAssembler::vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 4025 assert(rscratch != noreg || always_reachable(src), "missing"); 4026 4027 if (reachable(src)) { 4028 vsubsd(dst, nds, as_Address(src)); 4029 } else { 4030 lea(rscratch, src); 4031 vsubsd(dst, nds, Address(rscratch, 0)); 4032 } 4033 } 4034 4035 void MacroAssembler::vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 4036 assert(rscratch != noreg || always_reachable(src), "missing"); 4037 4038 if (reachable(src)) { 4039 vsubss(dst, nds, as_Address(src)); 4040 } else { 4041 lea(rscratch, src); 4042 vsubss(dst, nds, Address(rscratch, 0)); 4043 } 4044 } 4045 4046 void MacroAssembler::vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 4047 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 4048 assert(rscratch != noreg || always_reachable(src), "missing"); 4049 4050 vxorps(dst, nds, src, Assembler::AVX_128bit, rscratch); 4051 } 4052 4053 void MacroAssembler::vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 4054 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 4055 assert(rscratch != noreg || always_reachable(src), "missing"); 4056 4057 vxorpd(dst, nds, src, Assembler::AVX_128bit, rscratch); 4058 } 4059 4060 void MacroAssembler::vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 4061 assert(rscratch != noreg || always_reachable(src), "missing"); 4062 4063 if (reachable(src)) { 4064 vxorpd(dst, nds, as_Address(src), vector_len); 4065 } else { 4066 lea(rscratch, src); 4067 vxorpd(dst, nds, Address(rscratch, 0), vector_len); 4068 } 4069 } 4070 4071 void MacroAssembler::vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 4072 assert(rscratch != noreg || always_reachable(src), "missing"); 4073 4074 if (reachable(src)) { 4075 vxorps(dst, nds, as_Address(src), vector_len); 4076 } else { 4077 lea(rscratch, src); 4078 vxorps(dst, nds, Address(rscratch, 0), vector_len); 4079 } 4080 } 4081 4082 void MacroAssembler::vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 4083 assert(rscratch != noreg || always_reachable(src), "missing"); 4084 4085 if (UseAVX > 1 || (vector_len < 1)) { 4086 if (reachable(src)) { 4087 Assembler::vpxor(dst, nds, as_Address(src), vector_len); 4088 } else { 4089 lea(rscratch, src); 4090 Assembler::vpxor(dst, nds, Address(rscratch, 0), vector_len); 4091 } 4092 } else { 4093 MacroAssembler::vxorpd(dst, nds, src, vector_len, rscratch); 4094 } 4095 } 4096 4097 void MacroAssembler::vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 4098 assert(rscratch != noreg || always_reachable(src), "missing"); 4099 4100 if (reachable(src)) { 4101 Assembler::vpermd(dst, nds, as_Address(src), vector_len); 4102 } else { 4103 lea(rscratch, src); 4104 Assembler::vpermd(dst, nds, Address(rscratch, 0), vector_len); 4105 } 4106 } 4107 4108 void MacroAssembler::clear_jobject_tag(Register possibly_non_local) { 4109 const int32_t inverted_mask = ~static_cast<int32_t>(JNIHandles::tag_mask); 4110 STATIC_ASSERT(inverted_mask == -4); // otherwise check this code 4111 // The inverted mask is sign-extended 4112 andptr(possibly_non_local, inverted_mask); 4113 } 4114 4115 void MacroAssembler::resolve_jobject(Register value, 4116 Register thread, 4117 Register tmp) { 4118 assert_different_registers(value, thread, tmp); 4119 Label done, tagged, weak_tagged; 4120 testptr(value, value); 4121 jcc(Assembler::zero, done); // Use null as-is. 4122 testptr(value, JNIHandles::tag_mask); // Test for tag. 4123 jcc(Assembler::notZero, tagged); 4124 4125 // Resolve local handle 4126 access_load_at(T_OBJECT, IN_NATIVE | AS_RAW, value, Address(value, 0), tmp, thread); 4127 verify_oop(value); 4128 jmp(done); 4129 4130 bind(tagged); 4131 testptr(value, JNIHandles::TypeTag::weak_global); // Test for weak tag. 4132 jcc(Assembler::notZero, weak_tagged); 4133 4134 // Resolve global handle 4135 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp, thread); 4136 verify_oop(value); 4137 jmp(done); 4138 4139 bind(weak_tagged); 4140 // Resolve jweak. 4141 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 4142 value, Address(value, -JNIHandles::TypeTag::weak_global), tmp, thread); 4143 verify_oop(value); 4144 4145 bind(done); 4146 } 4147 4148 void MacroAssembler::resolve_global_jobject(Register value, 4149 Register thread, 4150 Register tmp) { 4151 assert_different_registers(value, thread, tmp); 4152 Label done; 4153 4154 testptr(value, value); 4155 jcc(Assembler::zero, done); // Use null as-is. 4156 4157 #ifdef ASSERT 4158 { 4159 Label valid_global_tag; 4160 testptr(value, JNIHandles::TypeTag::global); // Test for global tag. 4161 jcc(Assembler::notZero, valid_global_tag); 4162 stop("non global jobject using resolve_global_jobject"); 4163 bind(valid_global_tag); 4164 } 4165 #endif 4166 4167 // Resolve global handle 4168 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp, thread); 4169 verify_oop(value); 4170 4171 bind(done); 4172 } 4173 4174 void MacroAssembler::subptr(Register dst, int32_t imm32) { 4175 LP64_ONLY(subq(dst, imm32)) NOT_LP64(subl(dst, imm32)); 4176 } 4177 4178 // Force generation of a 4 byte immediate value even if it fits into 8bit 4179 void MacroAssembler::subptr_imm32(Register dst, int32_t imm32) { 4180 LP64_ONLY(subq_imm32(dst, imm32)) NOT_LP64(subl_imm32(dst, imm32)); 4181 } 4182 4183 void MacroAssembler::subptr(Register dst, Register src) { 4184 LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); 4185 } 4186 4187 // C++ bool manipulation 4188 void MacroAssembler::testbool(Register dst) { 4189 if(sizeof(bool) == 1) 4190 testb(dst, 0xff); 4191 else if(sizeof(bool) == 2) { 4192 // testw implementation needed for two byte bools 4193 ShouldNotReachHere(); 4194 } else if(sizeof(bool) == 4) 4195 testl(dst, dst); 4196 else 4197 // unsupported 4198 ShouldNotReachHere(); 4199 } 4200 4201 void MacroAssembler::testptr(Register dst, Register src) { 4202 LP64_ONLY(testq(dst, src)) NOT_LP64(testl(dst, src)); 4203 } 4204 4205 // Object / value buffer allocation... 4206 // 4207 // Kills klass and rsi on LP64 4208 void MacroAssembler::allocate_instance(Register klass, Register new_obj, 4209 Register t1, Register t2, 4210 bool clear_fields, Label& alloc_failed) 4211 { 4212 Label done, initialize_header, initialize_object, slow_case, slow_case_no_pop; 4213 Register layout_size = t1; 4214 assert(new_obj == rax, "needs to be rax"); 4215 assert_different_registers(klass, new_obj, t1, t2); 4216 4217 // get instance_size in InstanceKlass (scaled to a count of bytes) 4218 movl(layout_size, Address(klass, Klass::layout_helper_offset())); 4219 // test to see if it is malformed in some way 4220 testl(layout_size, Klass::_lh_instance_slow_path_bit); 4221 jcc(Assembler::notZero, slow_case_no_pop); 4222 4223 // Allocate the instance: 4224 // If TLAB is enabled: 4225 // Try to allocate in the TLAB. 4226 // If fails, go to the slow path. 4227 // Else If inline contiguous allocations are enabled: 4228 // Try to allocate in eden. 4229 // If fails due to heap end, go to slow path. 4230 // 4231 // If TLAB is enabled OR inline contiguous is enabled: 4232 // Initialize the allocation. 4233 // Exit. 4234 // 4235 // Go to slow path. 4236 4237 push(klass); 4238 const Register thread = LP64_ONLY(r15_thread) NOT_LP64(klass); 4239 #ifndef _LP64 4240 if (UseTLAB) { 4241 get_thread(thread); 4242 } 4243 #endif // _LP64 4244 4245 if (UseTLAB) { 4246 tlab_allocate(thread, new_obj, layout_size, 0, klass, t2, slow_case); 4247 if (ZeroTLAB || (!clear_fields)) { 4248 // the fields have been already cleared 4249 jmp(initialize_header); 4250 } else { 4251 // initialize both the header and fields 4252 jmp(initialize_object); 4253 } 4254 } else { 4255 jmp(slow_case); 4256 } 4257 4258 // If UseTLAB is true, the object is created above and there is an initialize need. 4259 // Otherwise, skip and go to the slow path. 4260 if (UseTLAB) { 4261 if (clear_fields) { 4262 // The object is initialized before the header. If the object size is 4263 // zero, go directly to the header initialization. 4264 bind(initialize_object); 4265 decrement(layout_size, sizeof(oopDesc)); 4266 jcc(Assembler::zero, initialize_header); 4267 4268 // Initialize topmost object field, divide size by 8, check if odd and 4269 // test if zero. 4270 Register zero = klass; 4271 xorl(zero, zero); // use zero reg to clear memory (shorter code) 4272 shrl(layout_size, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd 4273 4274 #ifdef ASSERT 4275 // make sure instance_size was multiple of 8 4276 Label L; 4277 // Ignore partial flag stall after shrl() since it is debug VM 4278 jcc(Assembler::carryClear, L); 4279 stop("object size is not multiple of 2 - adjust this code"); 4280 bind(L); 4281 // must be > 0, no extra check needed here 4282 #endif 4283 4284 // initialize remaining object fields: instance_size was a multiple of 8 4285 { 4286 Label loop; 4287 bind(loop); 4288 movptr(Address(new_obj, layout_size, Address::times_8, sizeof(oopDesc) - 1*oopSize), zero); 4289 NOT_LP64(movptr(Address(new_obj, layout_size, Address::times_8, sizeof(oopDesc) - 2*oopSize), zero)); 4290 decrement(layout_size); 4291 jcc(Assembler::notZero, loop); 4292 } 4293 } // clear_fields 4294 4295 // initialize object header only. 4296 bind(initialize_header); 4297 pop(klass); 4298 Register mark_word = t2; 4299 movptr(mark_word, Address(klass, Klass::prototype_header_offset())); 4300 movptr(Address(new_obj, oopDesc::mark_offset_in_bytes ()), mark_word); 4301 #ifdef _LP64 4302 xorl(rsi, rsi); // use zero reg to clear memory (shorter code) 4303 store_klass_gap(new_obj, rsi); // zero klass gap for compressed oops 4304 #endif 4305 movptr(t2, klass); // preserve klass 4306 store_klass(new_obj, t2, rscratch1); // src klass reg is potentially compressed 4307 4308 jmp(done); 4309 } 4310 4311 bind(slow_case); 4312 pop(klass); 4313 bind(slow_case_no_pop); 4314 jmp(alloc_failed); 4315 4316 bind(done); 4317 } 4318 4319 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. 4320 void MacroAssembler::tlab_allocate(Register thread, Register obj, 4321 Register var_size_in_bytes, 4322 int con_size_in_bytes, 4323 Register t1, 4324 Register t2, 4325 Label& slow_case) { 4326 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 4327 bs->tlab_allocate(this, thread, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case); 4328 } 4329 4330 RegSet MacroAssembler::call_clobbered_gp_registers() { 4331 RegSet regs; 4332 #ifdef _LP64 4333 regs += RegSet::of(rax, rcx, rdx); 4334 #ifndef WINDOWS 4335 regs += RegSet::of(rsi, rdi); 4336 #endif 4337 regs += RegSet::range(r8, r11); 4338 #else 4339 regs += RegSet::of(rax, rcx, rdx); 4340 #endif 4341 #ifdef _LP64 4342 if (UseAPX) { 4343 regs += RegSet::range(r16, as_Register(Register::number_of_registers - 1)); 4344 } 4345 #endif 4346 return regs; 4347 } 4348 4349 XMMRegSet MacroAssembler::call_clobbered_xmm_registers() { 4350 int num_xmm_registers = XMMRegister::available_xmm_registers(); 4351 #if defined(WINDOWS) && defined(_LP64) 4352 XMMRegSet result = XMMRegSet::range(xmm0, xmm5); 4353 if (num_xmm_registers > 16) { 4354 result += XMMRegSet::range(xmm16, as_XMMRegister(num_xmm_registers - 1)); 4355 } 4356 return result; 4357 #else 4358 return XMMRegSet::range(xmm0, as_XMMRegister(num_xmm_registers - 1)); 4359 #endif 4360 } 4361 4362 static int FPUSaveAreaSize = align_up(108, StackAlignmentInBytes); // 108 bytes needed for FPU state by fsave/frstor 4363 4364 #ifndef _LP64 4365 static bool use_x87_registers() { return UseSSE < 2; } 4366 #endif 4367 static bool use_xmm_registers() { return UseSSE >= 1; } 4368 4369 // C1 only ever uses the first double/float of the XMM register. 4370 static int xmm_save_size() { return UseSSE >= 2 ? sizeof(double) : sizeof(float); } 4371 4372 static void save_xmm_register(MacroAssembler* masm, int offset, XMMRegister reg) { 4373 if (UseSSE == 1) { 4374 masm->movflt(Address(rsp, offset), reg); 4375 } else { 4376 masm->movdbl(Address(rsp, offset), reg); 4377 } 4378 } 4379 4380 static void restore_xmm_register(MacroAssembler* masm, int offset, XMMRegister reg) { 4381 if (UseSSE == 1) { 4382 masm->movflt(reg, Address(rsp, offset)); 4383 } else { 4384 masm->movdbl(reg, Address(rsp, offset)); 4385 } 4386 } 4387 4388 static int register_section_sizes(RegSet gp_registers, XMMRegSet xmm_registers, 4389 bool save_fpu, int& gp_area_size, 4390 int& fp_area_size, int& xmm_area_size) { 4391 4392 gp_area_size = align_up(gp_registers.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size, 4393 StackAlignmentInBytes); 4394 #ifdef _LP64 4395 fp_area_size = 0; 4396 #else 4397 fp_area_size = (save_fpu && use_x87_registers()) ? FPUSaveAreaSize : 0; 4398 #endif 4399 xmm_area_size = (save_fpu && use_xmm_registers()) ? xmm_registers.size() * xmm_save_size() : 0; 4400 4401 return gp_area_size + fp_area_size + xmm_area_size; 4402 } 4403 4404 void MacroAssembler::push_call_clobbered_registers_except(RegSet exclude, bool save_fpu) { 4405 block_comment("push_call_clobbered_registers start"); 4406 // Regular registers 4407 RegSet gp_registers_to_push = call_clobbered_gp_registers() - exclude; 4408 4409 int gp_area_size; 4410 int fp_area_size; 4411 int xmm_area_size; 4412 int total_save_size = register_section_sizes(gp_registers_to_push, call_clobbered_xmm_registers(), save_fpu, 4413 gp_area_size, fp_area_size, xmm_area_size); 4414 subptr(rsp, total_save_size); 4415 4416 push_set(gp_registers_to_push, 0); 4417 4418 #ifndef _LP64 4419 if (save_fpu && use_x87_registers()) { 4420 fnsave(Address(rsp, gp_area_size)); 4421 fwait(); 4422 } 4423 #endif 4424 if (save_fpu && use_xmm_registers()) { 4425 push_set(call_clobbered_xmm_registers(), gp_area_size + fp_area_size); 4426 } 4427 4428 block_comment("push_call_clobbered_registers end"); 4429 } 4430 4431 void MacroAssembler::pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu) { 4432 block_comment("pop_call_clobbered_registers start"); 4433 4434 RegSet gp_registers_to_pop = call_clobbered_gp_registers() - exclude; 4435 4436 int gp_area_size; 4437 int fp_area_size; 4438 int xmm_area_size; 4439 int total_save_size = register_section_sizes(gp_registers_to_pop, call_clobbered_xmm_registers(), restore_fpu, 4440 gp_area_size, fp_area_size, xmm_area_size); 4441 4442 if (restore_fpu && use_xmm_registers()) { 4443 pop_set(call_clobbered_xmm_registers(), gp_area_size + fp_area_size); 4444 } 4445 #ifndef _LP64 4446 if (restore_fpu && use_x87_registers()) { 4447 frstor(Address(rsp, gp_area_size)); 4448 } 4449 #endif 4450 4451 pop_set(gp_registers_to_pop, 0); 4452 4453 addptr(rsp, total_save_size); 4454 4455 vzeroupper(); 4456 4457 block_comment("pop_call_clobbered_registers end"); 4458 } 4459 4460 void MacroAssembler::push_set(XMMRegSet set, int offset) { 4461 assert(is_aligned(set.size() * xmm_save_size(), StackAlignmentInBytes), "must be"); 4462 int spill_offset = offset; 4463 4464 for (RegSetIterator<XMMRegister> it = set.begin(); *it != xnoreg; ++it) { 4465 save_xmm_register(this, spill_offset, *it); 4466 spill_offset += xmm_save_size(); 4467 } 4468 } 4469 4470 void MacroAssembler::pop_set(XMMRegSet set, int offset) { 4471 int restore_size = set.size() * xmm_save_size(); 4472 assert(is_aligned(restore_size, StackAlignmentInBytes), "must be"); 4473 4474 int restore_offset = offset + restore_size - xmm_save_size(); 4475 4476 for (ReverseRegSetIterator<XMMRegister> it = set.rbegin(); *it != xnoreg; ++it) { 4477 restore_xmm_register(this, restore_offset, *it); 4478 restore_offset -= xmm_save_size(); 4479 } 4480 } 4481 4482 void MacroAssembler::push_set(RegSet set, int offset) { 4483 int spill_offset; 4484 if (offset == -1) { 4485 int register_push_size = set.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size; 4486 int aligned_size = align_up(register_push_size, StackAlignmentInBytes); 4487 subptr(rsp, aligned_size); 4488 spill_offset = 0; 4489 } else { 4490 spill_offset = offset; 4491 } 4492 4493 for (RegSetIterator<Register> it = set.begin(); *it != noreg; ++it) { 4494 movptr(Address(rsp, spill_offset), *it); 4495 spill_offset += Register::max_slots_per_register * VMRegImpl::stack_slot_size; 4496 } 4497 } 4498 4499 void MacroAssembler::pop_set(RegSet set, int offset) { 4500 4501 int gp_reg_size = Register::max_slots_per_register * VMRegImpl::stack_slot_size; 4502 int restore_size = set.size() * gp_reg_size; 4503 int aligned_size = align_up(restore_size, StackAlignmentInBytes); 4504 4505 int restore_offset; 4506 if (offset == -1) { 4507 restore_offset = restore_size - gp_reg_size; 4508 } else { 4509 restore_offset = offset + restore_size - gp_reg_size; 4510 } 4511 for (ReverseRegSetIterator<Register> it = set.rbegin(); *it != noreg; ++it) { 4512 movptr(*it, Address(rsp, restore_offset)); 4513 restore_offset -= gp_reg_size; 4514 } 4515 4516 if (offset == -1) { 4517 addptr(rsp, aligned_size); 4518 } 4519 } 4520 4521 // Preserves the contents of address, destroys the contents length_in_bytes and temp. 4522 void MacroAssembler::zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp) { 4523 assert(address != length_in_bytes && address != temp && temp != length_in_bytes, "registers must be different"); 4524 assert((offset_in_bytes & (BytesPerWord - 1)) == 0, "offset must be a multiple of BytesPerWord"); 4525 Label done; 4526 4527 testptr(length_in_bytes, length_in_bytes); 4528 jcc(Assembler::zero, done); 4529 4530 // initialize topmost word, divide index by 2, check if odd and test if zero 4531 // note: for the remaining code to work, index must be a multiple of BytesPerWord 4532 #ifdef ASSERT 4533 { 4534 Label L; 4535 testptr(length_in_bytes, BytesPerWord - 1); 4536 jcc(Assembler::zero, L); 4537 stop("length must be a multiple of BytesPerWord"); 4538 bind(L); 4539 } 4540 #endif 4541 Register index = length_in_bytes; 4542 xorptr(temp, temp); // use _zero reg to clear memory (shorter code) 4543 if (UseIncDec) { 4544 shrptr(index, 3); // divide by 8/16 and set carry flag if bit 2 was set 4545 } else { 4546 shrptr(index, 2); // use 2 instructions to avoid partial flag stall 4547 shrptr(index, 1); 4548 } 4549 #ifndef _LP64 4550 // index could have not been a multiple of 8 (i.e., bit 2 was set) 4551 { 4552 Label even; 4553 // note: if index was a multiple of 8, then it cannot 4554 // be 0 now otherwise it must have been 0 before 4555 // => if it is even, we don't need to check for 0 again 4556 jcc(Assembler::carryClear, even); 4557 // clear topmost word (no jump would be needed if conditional assignment worked here) 4558 movptr(Address(address, index, Address::times_8, offset_in_bytes - 0*BytesPerWord), temp); 4559 // index could be 0 now, must check again 4560 jcc(Assembler::zero, done); 4561 bind(even); 4562 } 4563 #endif // !_LP64 4564 // initialize remaining object fields: index is a multiple of 2 now 4565 { 4566 Label loop; 4567 bind(loop); 4568 movptr(Address(address, index, Address::times_8, offset_in_bytes - 1*BytesPerWord), temp); 4569 NOT_LP64(movptr(Address(address, index, Address::times_8, offset_in_bytes - 2*BytesPerWord), temp);) 4570 decrement(index); 4571 jcc(Assembler::notZero, loop); 4572 } 4573 4574 bind(done); 4575 } 4576 4577 void MacroAssembler::get_inline_type_field_klass(Register klass, Register index, Register inline_klass) { 4578 movptr(inline_klass, Address(klass, InstanceKlass::inline_type_field_klasses_offset())); 4579 #ifdef ASSERT 4580 { 4581 Label done; 4582 cmpptr(inline_klass, 0); 4583 jcc(Assembler::notEqual, done); 4584 stop("get_inline_type_field_klass contains no inline klass"); 4585 bind(done); 4586 } 4587 #endif 4588 movptr(inline_klass, Address(inline_klass, index, Address::times_ptr, Array<InlineKlass*>::base_offset_in_bytes())); 4589 } 4590 4591 void MacroAssembler::get_default_value_oop(Register inline_klass, Register temp_reg, Register obj) { 4592 #ifdef ASSERT 4593 { 4594 Label done_check; 4595 test_klass_is_inline_type(inline_klass, temp_reg, done_check); 4596 stop("get_default_value_oop from non inline type klass"); 4597 bind(done_check); 4598 } 4599 #endif 4600 Register offset = temp_reg; 4601 // Getting the offset of the pre-allocated default value 4602 movptr(offset, Address(inline_klass, in_bytes(InstanceKlass::adr_inlineklass_fixed_block_offset()))); 4603 movl(offset, Address(offset, in_bytes(InlineKlass::default_value_offset_offset()))); 4604 4605 // Getting the mirror 4606 movptr(obj, Address(inline_klass, in_bytes(Klass::java_mirror_offset()))); 4607 resolve_oop_handle(obj, inline_klass); 4608 4609 // Getting the pre-allocated default value from the mirror 4610 Address field(obj, offset, Address::times_1); 4611 load_heap_oop(obj, field); 4612 } 4613 4614 void MacroAssembler::get_empty_inline_type_oop(Register inline_klass, Register temp_reg, Register obj) { 4615 #ifdef ASSERT 4616 { 4617 Label done_check; 4618 test_klass_is_empty_inline_type(inline_klass, temp_reg, done_check); 4619 stop("get_empty_value from non-empty inline klass"); 4620 bind(done_check); 4621 } 4622 #endif 4623 get_default_value_oop(inline_klass, temp_reg, obj); 4624 } 4625 4626 4627 // Look up the method for a megamorphic invokeinterface call. 4628 // The target method is determined by <intf_klass, itable_index>. 4629 // The receiver klass is in recv_klass. 4630 // On success, the result will be in method_result, and execution falls through. 4631 // On failure, execution transfers to the given label. 4632 void MacroAssembler::lookup_interface_method(Register recv_klass, 4633 Register intf_klass, 4634 RegisterOrConstant itable_index, 4635 Register method_result, 4636 Register scan_temp, 4637 Label& L_no_such_interface, 4638 bool return_method) { 4639 assert_different_registers(recv_klass, intf_klass, scan_temp); 4640 assert_different_registers(method_result, intf_klass, scan_temp); 4641 assert(recv_klass != method_result || !return_method, 4642 "recv_klass can be destroyed when method isn't needed"); 4643 4644 assert(itable_index.is_constant() || itable_index.as_register() == method_result, 4645 "caller must use same register for non-constant itable index as for method"); 4646 4647 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 4648 int vtable_base = in_bytes(Klass::vtable_start_offset()); 4649 int itentry_off = in_bytes(itableMethodEntry::method_offset()); 4650 int scan_step = itableOffsetEntry::size() * wordSize; 4651 int vte_size = vtableEntry::size_in_bytes(); 4652 Address::ScaleFactor times_vte_scale = Address::times_ptr; 4653 assert(vte_size == wordSize, "else adjust times_vte_scale"); 4654 4655 movl(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 4656 4657 // Could store the aligned, prescaled offset in the klass. 4658 lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base)); 4659 4660 if (return_method) { 4661 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 4662 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 4663 lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off)); 4664 } 4665 4666 // for (scan = klass->itable(); scan->interface() != nullptr; scan += scan_step) { 4667 // if (scan->interface() == intf) { 4668 // result = (klass + scan->offset() + itable_index); 4669 // } 4670 // } 4671 Label search, found_method; 4672 4673 for (int peel = 1; peel >= 0; peel--) { 4674 movptr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset())); 4675 cmpptr(intf_klass, method_result); 4676 4677 if (peel) { 4678 jccb(Assembler::equal, found_method); 4679 } else { 4680 jccb(Assembler::notEqual, search); 4681 // (invert the test to fall through to found_method...) 4682 } 4683 4684 if (!peel) break; 4685 4686 bind(search); 4687 4688 // Check that the previous entry is non-null. A null entry means that 4689 // the receiver class doesn't implement the interface, and wasn't the 4690 // same as when the caller was compiled. 4691 testptr(method_result, method_result); 4692 jcc(Assembler::zero, L_no_such_interface); 4693 addptr(scan_temp, scan_step); 4694 } 4695 4696 bind(found_method); 4697 4698 if (return_method) { 4699 // Got a hit. 4700 movl(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset())); 4701 movptr(method_result, Address(recv_klass, scan_temp, Address::times_1)); 4702 } 4703 } 4704 4705 // Look up the method for a megamorphic invokeinterface call in a single pass over itable: 4706 // - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICData 4707 // - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index 4708 // The target method is determined by <holder_klass, itable_index>. 4709 // The receiver klass is in recv_klass. 4710 // On success, the result will be in method_result, and execution falls through. 4711 // On failure, execution transfers to the given label. 4712 void MacroAssembler::lookup_interface_method_stub(Register recv_klass, 4713 Register holder_klass, 4714 Register resolved_klass, 4715 Register method_result, 4716 Register scan_temp, 4717 Register temp_reg2, 4718 Register receiver, 4719 int itable_index, 4720 Label& L_no_such_interface) { 4721 assert_different_registers(recv_klass, method_result, holder_klass, resolved_klass, scan_temp, temp_reg2, receiver); 4722 Register temp_itbl_klass = method_result; 4723 Register temp_reg = (temp_reg2 == noreg ? recv_klass : temp_reg2); // reuse recv_klass register on 32-bit x86 impl 4724 4725 int vtable_base = in_bytes(Klass::vtable_start_offset()); 4726 int itentry_off = in_bytes(itableMethodEntry::method_offset()); 4727 int scan_step = itableOffsetEntry::size() * wordSize; 4728 int vte_size = vtableEntry::size_in_bytes(); 4729 int ioffset = in_bytes(itableOffsetEntry::interface_offset()); 4730 int ooffset = in_bytes(itableOffsetEntry::offset_offset()); 4731 Address::ScaleFactor times_vte_scale = Address::times_ptr; 4732 assert(vte_size == wordSize, "adjust times_vte_scale"); 4733 4734 Label L_loop_scan_resolved_entry, L_resolved_found, L_holder_found; 4735 4736 // temp_itbl_klass = recv_klass.itable[0] 4737 // scan_temp = &recv_klass.itable[0] + step 4738 movl(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 4739 movptr(temp_itbl_klass, Address(recv_klass, scan_temp, times_vte_scale, vtable_base + ioffset)); 4740 lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base + ioffset + scan_step)); 4741 xorptr(temp_reg, temp_reg); 4742 4743 // Initial checks: 4744 // - if (holder_klass != resolved_klass), go to "scan for resolved" 4745 // - if (itable[0] == 0), no such interface 4746 // - if (itable[0] == holder_klass), shortcut to "holder found" 4747 cmpptr(holder_klass, resolved_klass); 4748 jccb(Assembler::notEqual, L_loop_scan_resolved_entry); 4749 testptr(temp_itbl_klass, temp_itbl_klass); 4750 jccb(Assembler::zero, L_no_such_interface); 4751 cmpptr(holder_klass, temp_itbl_klass); 4752 jccb(Assembler::equal, L_holder_found); 4753 4754 // Loop: Look for holder_klass record in itable 4755 // do { 4756 // tmp = itable[index]; 4757 // index += step; 4758 // if (tmp == holder_klass) { 4759 // goto L_holder_found; // Found! 4760 // } 4761 // } while (tmp != 0); 4762 // goto L_no_such_interface // Not found. 4763 Label L_scan_holder; 4764 bind(L_scan_holder); 4765 movptr(temp_itbl_klass, Address(scan_temp, 0)); 4766 addptr(scan_temp, scan_step); 4767 cmpptr(holder_klass, temp_itbl_klass); 4768 jccb(Assembler::equal, L_holder_found); 4769 testptr(temp_itbl_klass, temp_itbl_klass); 4770 jccb(Assembler::notZero, L_scan_holder); 4771 4772 jmpb(L_no_such_interface); 4773 4774 // Loop: Look for resolved_class record in itable 4775 // do { 4776 // tmp = itable[index]; 4777 // index += step; 4778 // if (tmp == holder_klass) { 4779 // // Also check if we have met a holder klass 4780 // holder_tmp = itable[index-step-ioffset]; 4781 // } 4782 // if (tmp == resolved_klass) { 4783 // goto L_resolved_found; // Found! 4784 // } 4785 // } while (tmp != 0); 4786 // goto L_no_such_interface // Not found. 4787 // 4788 Label L_loop_scan_resolved; 4789 bind(L_loop_scan_resolved); 4790 movptr(temp_itbl_klass, Address(scan_temp, 0)); 4791 addptr(scan_temp, scan_step); 4792 bind(L_loop_scan_resolved_entry); 4793 cmpptr(holder_klass, temp_itbl_klass); 4794 cmovl(Assembler::equal, temp_reg, Address(scan_temp, ooffset - ioffset - scan_step)); 4795 cmpptr(resolved_klass, temp_itbl_klass); 4796 jccb(Assembler::equal, L_resolved_found); 4797 testptr(temp_itbl_klass, temp_itbl_klass); 4798 jccb(Assembler::notZero, L_loop_scan_resolved); 4799 4800 jmpb(L_no_such_interface); 4801 4802 Label L_ready; 4803 4804 // See if we already have a holder klass. If not, go and scan for it. 4805 bind(L_resolved_found); 4806 testptr(temp_reg, temp_reg); 4807 jccb(Assembler::zero, L_scan_holder); 4808 jmpb(L_ready); 4809 4810 bind(L_holder_found); 4811 movl(temp_reg, Address(scan_temp, ooffset - ioffset - scan_step)); 4812 4813 // Finally, temp_reg contains holder_klass vtable offset 4814 bind(L_ready); 4815 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 4816 if (temp_reg2 == noreg) { // recv_klass register is clobbered for 32-bit x86 impl 4817 load_klass(scan_temp, receiver, noreg); 4818 movptr(method_result, Address(scan_temp, temp_reg, Address::times_1, itable_index * wordSize + itentry_off)); 4819 } else { 4820 movptr(method_result, Address(recv_klass, temp_reg, Address::times_1, itable_index * wordSize + itentry_off)); 4821 } 4822 } 4823 4824 4825 // virtual method calling 4826 void MacroAssembler::lookup_virtual_method(Register recv_klass, 4827 RegisterOrConstant vtable_index, 4828 Register method_result) { 4829 const ByteSize base = Klass::vtable_start_offset(); 4830 assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below"); 4831 Address vtable_entry_addr(recv_klass, 4832 vtable_index, Address::times_ptr, 4833 base + vtableEntry::method_offset()); 4834 movptr(method_result, vtable_entry_addr); 4835 } 4836 4837 4838 void MacroAssembler::check_klass_subtype(Register sub_klass, 4839 Register super_klass, 4840 Register temp_reg, 4841 Label& L_success) { 4842 Label L_failure; 4843 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, nullptr); 4844 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, nullptr); 4845 bind(L_failure); 4846 } 4847 4848 4849 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 4850 Register super_klass, 4851 Register temp_reg, 4852 Label* L_success, 4853 Label* L_failure, 4854 Label* L_slow_path, 4855 RegisterOrConstant super_check_offset) { 4856 assert_different_registers(sub_klass, super_klass, temp_reg); 4857 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 4858 if (super_check_offset.is_register()) { 4859 assert_different_registers(sub_klass, super_klass, 4860 super_check_offset.as_register()); 4861 } else if (must_load_sco) { 4862 assert(temp_reg != noreg, "supply either a temp or a register offset"); 4863 } 4864 4865 Label L_fallthrough; 4866 int label_nulls = 0; 4867 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 4868 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 4869 if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; } 4870 assert(label_nulls <= 1, "at most one null in the batch"); 4871 4872 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 4873 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 4874 Address super_check_offset_addr(super_klass, sco_offset); 4875 4876 // Hacked jcc, which "knows" that L_fallthrough, at least, is in 4877 // range of a jccb. If this routine grows larger, reconsider at 4878 // least some of these. 4879 #define local_jcc(assembler_cond, label) \ 4880 if (&(label) == &L_fallthrough) jccb(assembler_cond, label); \ 4881 else jcc( assembler_cond, label) /*omit semi*/ 4882 4883 // Hacked jmp, which may only be used just before L_fallthrough. 4884 #define final_jmp(label) \ 4885 if (&(label) == &L_fallthrough) { /*do nothing*/ } \ 4886 else jmp(label) /*omit semi*/ 4887 4888 // If the pointers are equal, we are done (e.g., String[] elements). 4889 // This self-check enables sharing of secondary supertype arrays among 4890 // non-primary types such as array-of-interface. Otherwise, each such 4891 // type would need its own customized SSA. 4892 // We move this check to the front of the fast path because many 4893 // type checks are in fact trivially successful in this manner, 4894 // so we get a nicely predicted branch right at the start of the check. 4895 cmpptr(sub_klass, super_klass); 4896 local_jcc(Assembler::equal, *L_success); 4897 4898 // Check the supertype display: 4899 if (must_load_sco) { 4900 // Positive movl does right thing on LP64. 4901 movl(temp_reg, super_check_offset_addr); 4902 super_check_offset = RegisterOrConstant(temp_reg); 4903 } 4904 Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0); 4905 cmpptr(super_klass, super_check_addr); // load displayed supertype 4906 4907 // This check has worked decisively for primary supers. 4908 // Secondary supers are sought in the super_cache ('super_cache_addr'). 4909 // (Secondary supers are interfaces and very deeply nested subtypes.) 4910 // This works in the same check above because of a tricky aliasing 4911 // between the super_cache and the primary super display elements. 4912 // (The 'super_check_addr' can address either, as the case requires.) 4913 // Note that the cache is updated below if it does not help us find 4914 // what we need immediately. 4915 // So if it was a primary super, we can just fail immediately. 4916 // Otherwise, it's the slow path for us (no success at this point). 4917 4918 if (super_check_offset.is_register()) { 4919 local_jcc(Assembler::equal, *L_success); 4920 cmpl(super_check_offset.as_register(), sc_offset); 4921 if (L_failure == &L_fallthrough) { 4922 local_jcc(Assembler::equal, *L_slow_path); 4923 } else { 4924 local_jcc(Assembler::notEqual, *L_failure); 4925 final_jmp(*L_slow_path); 4926 } 4927 } else if (super_check_offset.as_constant() == sc_offset) { 4928 // Need a slow path; fast failure is impossible. 4929 if (L_slow_path == &L_fallthrough) { 4930 local_jcc(Assembler::equal, *L_success); 4931 } else { 4932 local_jcc(Assembler::notEqual, *L_slow_path); 4933 final_jmp(*L_success); 4934 } 4935 } else { 4936 // No slow path; it's a fast decision. 4937 if (L_failure == &L_fallthrough) { 4938 local_jcc(Assembler::equal, *L_success); 4939 } else { 4940 local_jcc(Assembler::notEqual, *L_failure); 4941 final_jmp(*L_success); 4942 } 4943 } 4944 4945 bind(L_fallthrough); 4946 4947 #undef local_jcc 4948 #undef final_jmp 4949 } 4950 4951 4952 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 4953 Register super_klass, 4954 Register temp_reg, 4955 Register temp2_reg, 4956 Label* L_success, 4957 Label* L_failure, 4958 bool set_cond_codes) { 4959 assert_different_registers(sub_klass, super_klass, temp_reg); 4960 if (temp2_reg != noreg) 4961 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg); 4962 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg) 4963 4964 Label L_fallthrough; 4965 int label_nulls = 0; 4966 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 4967 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 4968 assert(label_nulls <= 1, "at most one null in the batch"); 4969 4970 // a couple of useful fields in sub_klass: 4971 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 4972 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 4973 Address secondary_supers_addr(sub_klass, ss_offset); 4974 Address super_cache_addr( sub_klass, sc_offset); 4975 4976 // Do a linear scan of the secondary super-klass chain. 4977 // This code is rarely used, so simplicity is a virtue here. 4978 // The repne_scan instruction uses fixed registers, which we must spill. 4979 // Don't worry too much about pre-existing connections with the input regs. 4980 4981 assert(sub_klass != rax, "killed reg"); // killed by mov(rax, super) 4982 assert(sub_klass != rcx, "killed reg"); // killed by lea(rcx, &pst_counter) 4983 4984 // Get super_klass value into rax (even if it was in rdi or rcx). 4985 bool pushed_rax = false, pushed_rcx = false, pushed_rdi = false; 4986 if (super_klass != rax) { 4987 if (!IS_A_TEMP(rax)) { push(rax); pushed_rax = true; } 4988 mov(rax, super_klass); 4989 } 4990 if (!IS_A_TEMP(rcx)) { push(rcx); pushed_rcx = true; } 4991 if (!IS_A_TEMP(rdi)) { push(rdi); pushed_rdi = true; } 4992 4993 #ifndef PRODUCT 4994 uint* pst_counter = &SharedRuntime::_partial_subtype_ctr; 4995 ExternalAddress pst_counter_addr((address) pst_counter); 4996 NOT_LP64( incrementl(pst_counter_addr) ); 4997 LP64_ONLY( lea(rcx, pst_counter_addr) ); 4998 LP64_ONLY( incrementl(Address(rcx, 0)) ); 4999 #endif //PRODUCT 5000 5001 // We will consult the secondary-super array. 5002 movptr(rdi, secondary_supers_addr); 5003 // Load the array length. (Positive movl does right thing on LP64.) 5004 movl(rcx, Address(rdi, Array<Klass*>::length_offset_in_bytes())); 5005 // Skip to start of data. 5006 addptr(rdi, Array<Klass*>::base_offset_in_bytes()); 5007 5008 // Scan RCX words at [RDI] for an occurrence of RAX. 5009 // Set NZ/Z based on last compare. 5010 // Z flag value will not be set by 'repne' if RCX == 0 since 'repne' does 5011 // not change flags (only scas instruction which is repeated sets flags). 5012 // Set Z = 0 (not equal) before 'repne' to indicate that class was not found. 5013 5014 testptr(rax,rax); // Set Z = 0 5015 repne_scan(); 5016 5017 // Unspill the temp. registers: 5018 if (pushed_rdi) pop(rdi); 5019 if (pushed_rcx) pop(rcx); 5020 if (pushed_rax) pop(rax); 5021 5022 if (set_cond_codes) { 5023 // Special hack for the AD files: rdi is guaranteed non-zero. 5024 assert(!pushed_rdi, "rdi must be left non-null"); 5025 // Also, the condition codes are properly set Z/NZ on succeed/failure. 5026 } 5027 5028 if (L_failure == &L_fallthrough) 5029 jccb(Assembler::notEqual, *L_failure); 5030 else jcc(Assembler::notEqual, *L_failure); 5031 5032 // Success. Cache the super we found and proceed in triumph. 5033 movptr(super_cache_addr, super_klass); 5034 5035 if (L_success != &L_fallthrough) { 5036 jmp(*L_success); 5037 } 5038 5039 #undef IS_A_TEMP 5040 5041 bind(L_fallthrough); 5042 } 5043 5044 #ifdef _LP64 5045 5046 // population_count variant for running without the POPCNT 5047 // instruction, which was introduced with SSE4.2 in 2008. 5048 void MacroAssembler::population_count(Register dst, Register src, 5049 Register scratch1, Register scratch2) { 5050 assert_different_registers(src, scratch1, scratch2); 5051 if (UsePopCountInstruction) { 5052 Assembler::popcntq(dst, src); 5053 } else { 5054 assert_different_registers(src, scratch1, scratch2); 5055 assert_different_registers(dst, scratch1, scratch2); 5056 Label loop, done; 5057 5058 mov(scratch1, src); 5059 // dst = 0; 5060 // while(scratch1 != 0) { 5061 // dst++; 5062 // scratch1 &= (scratch1 - 1); 5063 // } 5064 xorl(dst, dst); 5065 testq(scratch1, scratch1); 5066 jccb(Assembler::equal, done); 5067 { 5068 bind(loop); 5069 incq(dst); 5070 movq(scratch2, scratch1); 5071 decq(scratch2); 5072 andq(scratch1, scratch2); 5073 jccb(Assembler::notEqual, loop); 5074 } 5075 bind(done); 5076 } 5077 } 5078 5079 // Ensure that the inline code and the stub are using the same registers. 5080 #define LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS \ 5081 do { \ 5082 assert(r_super_klass == rax, "mismatch"); \ 5083 assert(r_array_base == rbx, "mismatch"); \ 5084 assert(r_array_length == rcx, "mismatch"); \ 5085 assert(r_array_index == rdx, "mismatch"); \ 5086 assert(r_sub_klass == rsi || r_sub_klass == noreg, "mismatch"); \ 5087 assert(r_bitmap == r11 || r_bitmap == noreg, "mismatch"); \ 5088 assert(result == rdi || result == noreg, "mismatch"); \ 5089 } while(0) 5090 5091 void MacroAssembler::lookup_secondary_supers_table(Register r_sub_klass, 5092 Register r_super_klass, 5093 Register temp1, 5094 Register temp2, 5095 Register temp3, 5096 Register temp4, 5097 Register result, 5098 u1 super_klass_slot) { 5099 assert_different_registers(r_sub_klass, r_super_klass, temp1, temp2, temp3, temp4, result); 5100 5101 Label L_fallthrough, L_success, L_failure; 5102 5103 BLOCK_COMMENT("lookup_secondary_supers_table {"); 5104 5105 const Register 5106 r_array_index = temp1, 5107 r_array_length = temp2, 5108 r_array_base = temp3, 5109 r_bitmap = temp4; 5110 5111 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS; 5112 5113 xorq(result, result); // = 0 5114 5115 movq(r_bitmap, Address(r_sub_klass, Klass::bitmap_offset())); 5116 movq(r_array_index, r_bitmap); 5117 5118 // First check the bitmap to see if super_klass might be present. If 5119 // the bit is zero, we are certain that super_klass is not one of 5120 // the secondary supers. 5121 u1 bit = super_klass_slot; 5122 { 5123 // NB: If the count in a x86 shift instruction is 0, the flags are 5124 // not affected, so we do a testq instead. 5125 int shift_count = Klass::SECONDARY_SUPERS_TABLE_MASK - bit; 5126 if (shift_count != 0) { 5127 salq(r_array_index, shift_count); 5128 } else { 5129 testq(r_array_index, r_array_index); 5130 } 5131 } 5132 // We test the MSB of r_array_index, i.e. its sign bit 5133 jcc(Assembler::positive, L_failure); 5134 5135 // Get the first array index that can contain super_klass into r_array_index. 5136 if (bit != 0) { 5137 population_count(r_array_index, r_array_index, temp2, temp3); 5138 } else { 5139 movl(r_array_index, 1); 5140 } 5141 // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word. 5142 5143 // We will consult the secondary-super array. 5144 movptr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset()))); 5145 5146 // We're asserting that the first word in an Array<Klass*> is the 5147 // length, and the second word is the first word of the data. If 5148 // that ever changes, r_array_base will have to be adjusted here. 5149 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code"); 5150 assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code"); 5151 5152 cmpq(r_super_klass, Address(r_array_base, r_array_index, Address::times_8)); 5153 jccb(Assembler::equal, L_success); 5154 5155 // Is there another entry to check? Consult the bitmap. 5156 btq(r_bitmap, (bit + 1) & Klass::SECONDARY_SUPERS_TABLE_MASK); 5157 jccb(Assembler::carryClear, L_failure); 5158 5159 // Linear probe. Rotate the bitmap so that the next bit to test is 5160 // in Bit 1. 5161 if (bit != 0) { 5162 rorq(r_bitmap, bit); 5163 } 5164 5165 // Calls into the stub generated by lookup_secondary_supers_table_slow_path. 5166 // Arguments: r_super_klass, r_array_base, r_array_index, r_bitmap. 5167 // Kills: r_array_length. 5168 // Returns: result. 5169 call(RuntimeAddress(StubRoutines::lookup_secondary_supers_table_slow_path_stub())); 5170 // Result (0/1) is in rdi 5171 jmpb(L_fallthrough); 5172 5173 bind(L_failure); 5174 incq(result); // 0 => 1 5175 5176 bind(L_success); 5177 // result = 0; 5178 5179 bind(L_fallthrough); 5180 BLOCK_COMMENT("} lookup_secondary_supers_table"); 5181 5182 if (VerifySecondarySupers) { 5183 verify_secondary_supers_table(r_sub_klass, r_super_klass, result, 5184 temp1, temp2, temp3); 5185 } 5186 } 5187 5188 void MacroAssembler::repne_scanq(Register addr, Register value, Register count, Register limit, 5189 Label* L_success, Label* L_failure) { 5190 Label L_loop, L_fallthrough; 5191 { 5192 int label_nulls = 0; 5193 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 5194 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 5195 assert(label_nulls <= 1, "at most one null in the batch"); 5196 } 5197 bind(L_loop); 5198 cmpq(value, Address(addr, count, Address::times_8)); 5199 jcc(Assembler::equal, *L_success); 5200 addl(count, 1); 5201 cmpl(count, limit); 5202 jcc(Assembler::less, L_loop); 5203 5204 if (&L_fallthrough != L_failure) { 5205 jmp(*L_failure); 5206 } 5207 bind(L_fallthrough); 5208 } 5209 5210 // Called by code generated by check_klass_subtype_slow_path 5211 // above. This is called when there is a collision in the hashed 5212 // lookup in the secondary supers array. 5213 void MacroAssembler::lookup_secondary_supers_table_slow_path(Register r_super_klass, 5214 Register r_array_base, 5215 Register r_array_index, 5216 Register r_bitmap, 5217 Register temp1, 5218 Register temp2, 5219 Label* L_success, 5220 Label* L_failure) { 5221 assert_different_registers(r_super_klass, r_array_base, r_array_index, r_bitmap, temp1, temp2); 5222 5223 const Register 5224 r_array_length = temp1, 5225 r_sub_klass = noreg, 5226 result = noreg; 5227 5228 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS; 5229 5230 Label L_fallthrough; 5231 int label_nulls = 0; 5232 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 5233 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 5234 assert(label_nulls <= 1, "at most one null in the batch"); 5235 5236 // Load the array length. 5237 movl(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes())); 5238 // And adjust the array base to point to the data. 5239 // NB! Effectively increments current slot index by 1. 5240 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, ""); 5241 addptr(r_array_base, Array<Klass*>::base_offset_in_bytes()); 5242 5243 // Linear probe 5244 Label L_huge; 5245 5246 // The bitmap is full to bursting. 5247 // Implicit invariant: BITMAP_FULL implies (length > 0) 5248 cmpl(r_array_length, (int32_t)Klass::SECONDARY_SUPERS_TABLE_SIZE - 2); 5249 jcc(Assembler::greater, L_huge); 5250 5251 // NB! Our caller has checked bits 0 and 1 in the bitmap. The 5252 // current slot (at secondary_supers[r_array_index]) has not yet 5253 // been inspected, and r_array_index may be out of bounds if we 5254 // wrapped around the end of the array. 5255 5256 { // This is conventional linear probing, but instead of terminating 5257 // when a null entry is found in the table, we maintain a bitmap 5258 // in which a 0 indicates missing entries. 5259 // The check above guarantees there are 0s in the bitmap, so the loop 5260 // eventually terminates. 5261 5262 xorl(temp2, temp2); // = 0; 5263 5264 Label L_again; 5265 bind(L_again); 5266 5267 // Check for array wraparound. 5268 cmpl(r_array_index, r_array_length); 5269 cmovl(Assembler::greaterEqual, r_array_index, temp2); 5270 5271 cmpq(r_super_klass, Address(r_array_base, r_array_index, Address::times_8)); 5272 jcc(Assembler::equal, *L_success); 5273 5274 // If the next bit in bitmap is zero, we're done. 5275 btq(r_bitmap, 2); // look-ahead check (Bit 2); Bits 0 and 1 are tested by now 5276 jcc(Assembler::carryClear, *L_failure); 5277 5278 rorq(r_bitmap, 1); // Bits 1/2 => 0/1 5279 addl(r_array_index, 1); 5280 5281 jmp(L_again); 5282 } 5283 5284 { // Degenerate case: more than 64 secondary supers. 5285 // FIXME: We could do something smarter here, maybe a vectorized 5286 // comparison or a binary search, but is that worth any added 5287 // complexity? 5288 bind(L_huge); 5289 xorl(r_array_index, r_array_index); // = 0 5290 repne_scanq(r_array_base, r_super_klass, r_array_index, r_array_length, 5291 L_success, 5292 (&L_fallthrough != L_failure ? L_failure : nullptr)); 5293 5294 bind(L_fallthrough); 5295 } 5296 } 5297 5298 struct VerifyHelperArguments { 5299 Klass* _super; 5300 Klass* _sub; 5301 intptr_t _linear_result; 5302 intptr_t _table_result; 5303 }; 5304 5305 static void verify_secondary_supers_table_helper(const char* msg, VerifyHelperArguments* args) { 5306 Klass::on_secondary_supers_verification_failure(args->_super, 5307 args->_sub, 5308 args->_linear_result, 5309 args->_table_result, 5310 msg); 5311 } 5312 5313 // Make sure that the hashed lookup and a linear scan agree. 5314 void MacroAssembler::verify_secondary_supers_table(Register r_sub_klass, 5315 Register r_super_klass, 5316 Register result, 5317 Register temp1, 5318 Register temp2, 5319 Register temp3) { 5320 const Register 5321 r_array_index = temp1, 5322 r_array_length = temp2, 5323 r_array_base = temp3, 5324 r_bitmap = noreg; 5325 5326 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS; 5327 5328 BLOCK_COMMENT("verify_secondary_supers_table {"); 5329 5330 Label L_success, L_failure, L_check, L_done; 5331 5332 movptr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset()))); 5333 movl(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes())); 5334 // And adjust the array base to point to the data. 5335 addptr(r_array_base, Array<Klass*>::base_offset_in_bytes()); 5336 5337 testl(r_array_length, r_array_length); // array_length == 0? 5338 jcc(Assembler::zero, L_failure); 5339 5340 movl(r_array_index, 0); 5341 repne_scanq(r_array_base, r_super_klass, r_array_index, r_array_length, &L_success); 5342 // fall through to L_failure 5343 5344 const Register linear_result = r_array_index; // reuse temp1 5345 5346 bind(L_failure); // not present 5347 movl(linear_result, 1); 5348 jmp(L_check); 5349 5350 bind(L_success); // present 5351 movl(linear_result, 0); 5352 5353 bind(L_check); 5354 cmpl(linear_result, result); 5355 jcc(Assembler::equal, L_done); 5356 5357 { // To avoid calling convention issues, build a record on the stack 5358 // and pass the pointer to that instead. 5359 push(result); 5360 push(linear_result); 5361 push(r_sub_klass); 5362 push(r_super_klass); 5363 movptr(c_rarg1, rsp); 5364 movptr(c_rarg0, (uintptr_t) "mismatch"); 5365 call(RuntimeAddress(CAST_FROM_FN_PTR(address, verify_secondary_supers_table_helper))); 5366 should_not_reach_here(); 5367 } 5368 bind(L_done); 5369 5370 BLOCK_COMMENT("} verify_secondary_supers_table"); 5371 } 5372 5373 #undef LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS 5374 5375 #endif // LP64 5376 5377 void MacroAssembler::clinit_barrier(Register klass, Register thread, Label* L_fast_path, Label* L_slow_path) { 5378 assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required"); 5379 5380 Label L_fallthrough; 5381 if (L_fast_path == nullptr) { 5382 L_fast_path = &L_fallthrough; 5383 } else if (L_slow_path == nullptr) { 5384 L_slow_path = &L_fallthrough; 5385 } 5386 5387 // Fast path check: class is fully initialized 5388 cmpb(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized); 5389 jcc(Assembler::equal, *L_fast_path); 5390 5391 // Fast path check: current thread is initializer thread 5392 cmpptr(thread, Address(klass, InstanceKlass::init_thread_offset())); 5393 if (L_slow_path == &L_fallthrough) { 5394 jcc(Assembler::equal, *L_fast_path); 5395 bind(*L_slow_path); 5396 } else if (L_fast_path == &L_fallthrough) { 5397 jcc(Assembler::notEqual, *L_slow_path); 5398 bind(*L_fast_path); 5399 } else { 5400 Unimplemented(); 5401 } 5402 } 5403 5404 void MacroAssembler::cmov32(Condition cc, Register dst, Address src) { 5405 if (VM_Version::supports_cmov()) { 5406 cmovl(cc, dst, src); 5407 } else { 5408 Label L; 5409 jccb(negate_condition(cc), L); 5410 movl(dst, src); 5411 bind(L); 5412 } 5413 } 5414 5415 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) { 5416 if (VM_Version::supports_cmov()) { 5417 cmovl(cc, dst, src); 5418 } else { 5419 Label L; 5420 jccb(negate_condition(cc), L); 5421 movl(dst, src); 5422 bind(L); 5423 } 5424 } 5425 5426 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) { 5427 if (!VerifyOops || VerifyAdapterSharing) { 5428 // Below address of the code string confuses VerifyAdapterSharing 5429 // because it may differ between otherwise equivalent adapters. 5430 return; 5431 } 5432 5433 BLOCK_COMMENT("verify_oop {"); 5434 #ifdef _LP64 5435 push(rscratch1); 5436 #endif 5437 push(rax); // save rax 5438 push(reg); // pass register argument 5439 5440 // Pass register number to verify_oop_subroutine 5441 const char* b = nullptr; 5442 { 5443 ResourceMark rm; 5444 stringStream ss; 5445 ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line); 5446 b = code_string(ss.as_string()); 5447 } 5448 AddressLiteral buffer((address) b, external_word_Relocation::spec_for_immediate()); 5449 pushptr(buffer.addr(), rscratch1); 5450 5451 // call indirectly to solve generation ordering problem 5452 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 5453 call(rax); 5454 // Caller pops the arguments (oop, message) and restores rax, r10 5455 BLOCK_COMMENT("} verify_oop"); 5456 } 5457 5458 void MacroAssembler::vallones(XMMRegister dst, int vector_len) { 5459 if (UseAVX > 2 && (vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl())) { 5460 // Only pcmpeq has dependency breaking treatment (i.e the execution can begin without 5461 // waiting for the previous result on dst), not vpcmpeqd, so just use vpternlog 5462 vpternlogd(dst, 0xFF, dst, dst, vector_len); 5463 } else if (VM_Version::supports_avx()) { 5464 vpcmpeqd(dst, dst, dst, vector_len); 5465 } else { 5466 assert(VM_Version::supports_sse2(), ""); 5467 pcmpeqd(dst, dst); 5468 } 5469 } 5470 5471 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 5472 int extra_slot_offset) { 5473 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 5474 int stackElementSize = Interpreter::stackElementSize; 5475 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0); 5476 #ifdef ASSERT 5477 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1); 5478 assert(offset1 - offset == stackElementSize, "correct arithmetic"); 5479 #endif 5480 Register scale_reg = noreg; 5481 Address::ScaleFactor scale_factor = Address::no_scale; 5482 if (arg_slot.is_constant()) { 5483 offset += arg_slot.as_constant() * stackElementSize; 5484 } else { 5485 scale_reg = arg_slot.as_register(); 5486 scale_factor = Address::times(stackElementSize); 5487 } 5488 offset += wordSize; // return PC is on stack 5489 return Address(rsp, scale_reg, scale_factor, offset); 5490 } 5491 5492 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) { 5493 if (!VerifyOops || VerifyAdapterSharing) { 5494 // Below address of the code string confuses VerifyAdapterSharing 5495 // because it may differ between otherwise equivalent adapters. 5496 return; 5497 } 5498 5499 #ifdef _LP64 5500 push(rscratch1); 5501 #endif 5502 push(rax); // save rax, 5503 // addr may contain rsp so we will have to adjust it based on the push 5504 // we just did (and on 64 bit we do two pushes) 5505 // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which 5506 // stores rax into addr which is backwards of what was intended. 5507 if (addr.uses(rsp)) { 5508 lea(rax, addr); 5509 pushptr(Address(rax, LP64_ONLY(2 *) BytesPerWord)); 5510 } else { 5511 pushptr(addr); 5512 } 5513 5514 // Pass register number to verify_oop_subroutine 5515 const char* b = nullptr; 5516 { 5517 ResourceMark rm; 5518 stringStream ss; 5519 ss.print("verify_oop_addr: %s (%s:%d)", s, file, line); 5520 b = code_string(ss.as_string()); 5521 } 5522 AddressLiteral buffer((address) b, external_word_Relocation::spec_for_immediate()); 5523 pushptr(buffer.addr(), rscratch1); 5524 5525 // call indirectly to solve generation ordering problem 5526 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 5527 call(rax); 5528 // Caller pops the arguments (addr, message) and restores rax, r10. 5529 } 5530 5531 void MacroAssembler::verify_tlab() { 5532 #ifdef ASSERT 5533 if (UseTLAB && VerifyOops) { 5534 Label next, ok; 5535 Register t1 = rsi; 5536 Register thread_reg = NOT_LP64(rbx) LP64_ONLY(r15_thread); 5537 5538 push(t1); 5539 NOT_LP64(push(thread_reg)); 5540 NOT_LP64(get_thread(thread_reg)); 5541 5542 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset()))); 5543 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset()))); 5544 jcc(Assembler::aboveEqual, next); 5545 STOP("assert(top >= start)"); 5546 should_not_reach_here(); 5547 5548 bind(next); 5549 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset()))); 5550 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset()))); 5551 jcc(Assembler::aboveEqual, ok); 5552 STOP("assert(top <= end)"); 5553 should_not_reach_here(); 5554 5555 bind(ok); 5556 NOT_LP64(pop(thread_reg)); 5557 pop(t1); 5558 } 5559 #endif 5560 } 5561 5562 class ControlWord { 5563 public: 5564 int32_t _value; 5565 5566 int rounding_control() const { return (_value >> 10) & 3 ; } 5567 int precision_control() const { return (_value >> 8) & 3 ; } 5568 bool precision() const { return ((_value >> 5) & 1) != 0; } 5569 bool underflow() const { return ((_value >> 4) & 1) != 0; } 5570 bool overflow() const { return ((_value >> 3) & 1) != 0; } 5571 bool zero_divide() const { return ((_value >> 2) & 1) != 0; } 5572 bool denormalized() const { return ((_value >> 1) & 1) != 0; } 5573 bool invalid() const { return ((_value >> 0) & 1) != 0; } 5574 5575 void print() const { 5576 // rounding control 5577 const char* rc; 5578 switch (rounding_control()) { 5579 case 0: rc = "round near"; break; 5580 case 1: rc = "round down"; break; 5581 case 2: rc = "round up "; break; 5582 case 3: rc = "chop "; break; 5583 default: 5584 rc = nullptr; // silence compiler warnings 5585 fatal("Unknown rounding control: %d", rounding_control()); 5586 }; 5587 // precision control 5588 const char* pc; 5589 switch (precision_control()) { 5590 case 0: pc = "24 bits "; break; 5591 case 1: pc = "reserved"; break; 5592 case 2: pc = "53 bits "; break; 5593 case 3: pc = "64 bits "; break; 5594 default: 5595 pc = nullptr; // silence compiler warnings 5596 fatal("Unknown precision control: %d", precision_control()); 5597 }; 5598 // flags 5599 char f[9]; 5600 f[0] = ' '; 5601 f[1] = ' '; 5602 f[2] = (precision ()) ? 'P' : 'p'; 5603 f[3] = (underflow ()) ? 'U' : 'u'; 5604 f[4] = (overflow ()) ? 'O' : 'o'; 5605 f[5] = (zero_divide ()) ? 'Z' : 'z'; 5606 f[6] = (denormalized()) ? 'D' : 'd'; 5607 f[7] = (invalid ()) ? 'I' : 'i'; 5608 f[8] = '\x0'; 5609 // output 5610 printf("%04x masks = %s, %s, %s", _value & 0xFFFF, f, rc, pc); 5611 } 5612 5613 }; 5614 5615 class StatusWord { 5616 public: 5617 int32_t _value; 5618 5619 bool busy() const { return ((_value >> 15) & 1) != 0; } 5620 bool C3() const { return ((_value >> 14) & 1) != 0; } 5621 bool C2() const { return ((_value >> 10) & 1) != 0; } 5622 bool C1() const { return ((_value >> 9) & 1) != 0; } 5623 bool C0() const { return ((_value >> 8) & 1) != 0; } 5624 int top() const { return (_value >> 11) & 7 ; } 5625 bool error_status() const { return ((_value >> 7) & 1) != 0; } 5626 bool stack_fault() const { return ((_value >> 6) & 1) != 0; } 5627 bool precision() const { return ((_value >> 5) & 1) != 0; } 5628 bool underflow() const { return ((_value >> 4) & 1) != 0; } 5629 bool overflow() const { return ((_value >> 3) & 1) != 0; } 5630 bool zero_divide() const { return ((_value >> 2) & 1) != 0; } 5631 bool denormalized() const { return ((_value >> 1) & 1) != 0; } 5632 bool invalid() const { return ((_value >> 0) & 1) != 0; } 5633 5634 void print() const { 5635 // condition codes 5636 char c[5]; 5637 c[0] = (C3()) ? '3' : '-'; 5638 c[1] = (C2()) ? '2' : '-'; 5639 c[2] = (C1()) ? '1' : '-'; 5640 c[3] = (C0()) ? '0' : '-'; 5641 c[4] = '\x0'; 5642 // flags 5643 char f[9]; 5644 f[0] = (error_status()) ? 'E' : '-'; 5645 f[1] = (stack_fault ()) ? 'S' : '-'; 5646 f[2] = (precision ()) ? 'P' : '-'; 5647 f[3] = (underflow ()) ? 'U' : '-'; 5648 f[4] = (overflow ()) ? 'O' : '-'; 5649 f[5] = (zero_divide ()) ? 'Z' : '-'; 5650 f[6] = (denormalized()) ? 'D' : '-'; 5651 f[7] = (invalid ()) ? 'I' : '-'; 5652 f[8] = '\x0'; 5653 // output 5654 printf("%04x flags = %s, cc = %s, top = %d", _value & 0xFFFF, f, c, top()); 5655 } 5656 5657 }; 5658 5659 class TagWord { 5660 public: 5661 int32_t _value; 5662 5663 int tag_at(int i) const { return (_value >> (i*2)) & 3; } 5664 5665 void print() const { 5666 printf("%04x", _value & 0xFFFF); 5667 } 5668 5669 }; 5670 5671 class FPU_Register { 5672 public: 5673 int32_t _m0; 5674 int32_t _m1; 5675 int16_t _ex; 5676 5677 bool is_indefinite() const { 5678 return _ex == -1 && _m1 == (int32_t)0xC0000000 && _m0 == 0; 5679 } 5680 5681 void print() const { 5682 char sign = (_ex < 0) ? '-' : '+'; 5683 const char* kind = (_ex == 0x7FFF || _ex == (int16_t)-1) ? "NaN" : " "; 5684 printf("%c%04hx.%08x%08x %s", sign, _ex, _m1, _m0, kind); 5685 }; 5686 5687 }; 5688 5689 class FPU_State { 5690 public: 5691 enum { 5692 register_size = 10, 5693 number_of_registers = 8, 5694 register_mask = 7 5695 }; 5696 5697 ControlWord _control_word; 5698 StatusWord _status_word; 5699 TagWord _tag_word; 5700 int32_t _error_offset; 5701 int32_t _error_selector; 5702 int32_t _data_offset; 5703 int32_t _data_selector; 5704 int8_t _register[register_size * number_of_registers]; 5705 5706 int tag_for_st(int i) const { return _tag_word.tag_at((_status_word.top() + i) & register_mask); } 5707 FPU_Register* st(int i) const { return (FPU_Register*)&_register[register_size * i]; } 5708 5709 const char* tag_as_string(int tag) const { 5710 switch (tag) { 5711 case 0: return "valid"; 5712 case 1: return "zero"; 5713 case 2: return "special"; 5714 case 3: return "empty"; 5715 } 5716 ShouldNotReachHere(); 5717 return nullptr; 5718 } 5719 5720 void print() const { 5721 // print computation registers 5722 { int t = _status_word.top(); 5723 for (int i = 0; i < number_of_registers; i++) { 5724 int j = (i - t) & register_mask; 5725 printf("%c r%d = ST%d = ", (j == 0 ? '*' : ' '), i, j); 5726 st(j)->print(); 5727 printf(" %s\n", tag_as_string(_tag_word.tag_at(i))); 5728 } 5729 } 5730 printf("\n"); 5731 // print control registers 5732 printf("ctrl = "); _control_word.print(); printf("\n"); 5733 printf("stat = "); _status_word .print(); printf("\n"); 5734 printf("tags = "); _tag_word .print(); printf("\n"); 5735 } 5736 5737 }; 5738 5739 class Flag_Register { 5740 public: 5741 int32_t _value; 5742 5743 bool overflow() const { return ((_value >> 11) & 1) != 0; } 5744 bool direction() const { return ((_value >> 10) & 1) != 0; } 5745 bool sign() const { return ((_value >> 7) & 1) != 0; } 5746 bool zero() const { return ((_value >> 6) & 1) != 0; } 5747 bool auxiliary_carry() const { return ((_value >> 4) & 1) != 0; } 5748 bool parity() const { return ((_value >> 2) & 1) != 0; } 5749 bool carry() const { return ((_value >> 0) & 1) != 0; } 5750 5751 void print() const { 5752 // flags 5753 char f[8]; 5754 f[0] = (overflow ()) ? 'O' : '-'; 5755 f[1] = (direction ()) ? 'D' : '-'; 5756 f[2] = (sign ()) ? 'S' : '-'; 5757 f[3] = (zero ()) ? 'Z' : '-'; 5758 f[4] = (auxiliary_carry()) ? 'A' : '-'; 5759 f[5] = (parity ()) ? 'P' : '-'; 5760 f[6] = (carry ()) ? 'C' : '-'; 5761 f[7] = '\x0'; 5762 // output 5763 printf("%08x flags = %s", _value, f); 5764 } 5765 5766 }; 5767 5768 class IU_Register { 5769 public: 5770 int32_t _value; 5771 5772 void print() const { 5773 printf("%08x %11d", _value, _value); 5774 } 5775 5776 }; 5777 5778 class IU_State { 5779 public: 5780 Flag_Register _eflags; 5781 IU_Register _rdi; 5782 IU_Register _rsi; 5783 IU_Register _rbp; 5784 IU_Register _rsp; 5785 IU_Register _rbx; 5786 IU_Register _rdx; 5787 IU_Register _rcx; 5788 IU_Register _rax; 5789 5790 void print() const { 5791 // computation registers 5792 printf("rax, = "); _rax.print(); printf("\n"); 5793 printf("rbx, = "); _rbx.print(); printf("\n"); 5794 printf("rcx = "); _rcx.print(); printf("\n"); 5795 printf("rdx = "); _rdx.print(); printf("\n"); 5796 printf("rdi = "); _rdi.print(); printf("\n"); 5797 printf("rsi = "); _rsi.print(); printf("\n"); 5798 printf("rbp, = "); _rbp.print(); printf("\n"); 5799 printf("rsp = "); _rsp.print(); printf("\n"); 5800 printf("\n"); 5801 // control registers 5802 printf("flgs = "); _eflags.print(); printf("\n"); 5803 } 5804 }; 5805 5806 5807 class CPU_State { 5808 public: 5809 FPU_State _fpu_state; 5810 IU_State _iu_state; 5811 5812 void print() const { 5813 printf("--------------------------------------------------\n"); 5814 _iu_state .print(); 5815 printf("\n"); 5816 _fpu_state.print(); 5817 printf("--------------------------------------------------\n"); 5818 } 5819 5820 }; 5821 5822 5823 static void _print_CPU_state(CPU_State* state) { 5824 state->print(); 5825 }; 5826 5827 5828 void MacroAssembler::print_CPU_state() { 5829 push_CPU_state(); 5830 push(rsp); // pass CPU state 5831 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _print_CPU_state))); 5832 addptr(rsp, wordSize); // discard argument 5833 pop_CPU_state(); 5834 } 5835 5836 5837 #ifndef _LP64 5838 static bool _verify_FPU(int stack_depth, char* s, CPU_State* state) { 5839 static int counter = 0; 5840 FPU_State* fs = &state->_fpu_state; 5841 counter++; 5842 // For leaf calls, only verify that the top few elements remain empty. 5843 // We only need 1 empty at the top for C2 code. 5844 if( stack_depth < 0 ) { 5845 if( fs->tag_for_st(7) != 3 ) { 5846 printf("FPR7 not empty\n"); 5847 state->print(); 5848 assert(false, "error"); 5849 return false; 5850 } 5851 return true; // All other stack states do not matter 5852 } 5853 5854 assert((fs->_control_word._value & 0xffff) == StubRoutines::x86::fpu_cntrl_wrd_std(), 5855 "bad FPU control word"); 5856 5857 // compute stack depth 5858 int i = 0; 5859 while (i < FPU_State::number_of_registers && fs->tag_for_st(i) < 3) i++; 5860 int d = i; 5861 while (i < FPU_State::number_of_registers && fs->tag_for_st(i) == 3) i++; 5862 // verify findings 5863 if (i != FPU_State::number_of_registers) { 5864 // stack not contiguous 5865 printf("%s: stack not contiguous at ST%d\n", s, i); 5866 state->print(); 5867 assert(false, "error"); 5868 return false; 5869 } 5870 // check if computed stack depth corresponds to expected stack depth 5871 if (stack_depth < 0) { 5872 // expected stack depth is -stack_depth or less 5873 if (d > -stack_depth) { 5874 // too many elements on the stack 5875 printf("%s: <= %d stack elements expected but found %d\n", s, -stack_depth, d); 5876 state->print(); 5877 assert(false, "error"); 5878 return false; 5879 } 5880 } else { 5881 // expected stack depth is stack_depth 5882 if (d != stack_depth) { 5883 // wrong stack depth 5884 printf("%s: %d stack elements expected but found %d\n", s, stack_depth, d); 5885 state->print(); 5886 assert(false, "error"); 5887 return false; 5888 } 5889 } 5890 // everything is cool 5891 return true; 5892 } 5893 5894 void MacroAssembler::verify_FPU(int stack_depth, const char* s) { 5895 if (!VerifyFPU) return; 5896 push_CPU_state(); 5897 push(rsp); // pass CPU state 5898 ExternalAddress msg((address) s); 5899 // pass message string s 5900 pushptr(msg.addr(), noreg); 5901 push(stack_depth); // pass stack depth 5902 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _verify_FPU))); 5903 addptr(rsp, 3 * wordSize); // discard arguments 5904 // check for error 5905 { Label L; 5906 testl(rax, rax); 5907 jcc(Assembler::notZero, L); 5908 int3(); // break if error condition 5909 bind(L); 5910 } 5911 pop_CPU_state(); 5912 } 5913 #endif // _LP64 5914 5915 void MacroAssembler::restore_cpu_control_state_after_jni(Register rscratch) { 5916 // Either restore the MXCSR register after returning from the JNI Call 5917 // or verify that it wasn't changed (with -Xcheck:jni flag). 5918 if (VM_Version::supports_sse()) { 5919 if (RestoreMXCSROnJNICalls) { 5920 ldmxcsr(ExternalAddress(StubRoutines::x86::addr_mxcsr_std()), rscratch); 5921 } else if (CheckJNICalls) { 5922 call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry())); 5923 } 5924 } 5925 // Clear upper bits of YMM registers to avoid SSE <-> AVX transition penalty. 5926 vzeroupper(); 5927 5928 #ifndef _LP64 5929 // Either restore the x87 floating pointer control word after returning 5930 // from the JNI call or verify that it wasn't changed. 5931 if (CheckJNICalls) { 5932 call(RuntimeAddress(StubRoutines::x86::verify_fpu_cntrl_wrd_entry())); 5933 } 5934 #endif // _LP64 5935 } 5936 5937 // ((OopHandle)result).resolve(); 5938 void MacroAssembler::resolve_oop_handle(Register result, Register tmp) { 5939 assert_different_registers(result, tmp); 5940 5941 // Only 64 bit platforms support GCs that require a tmp register 5942 // Only IN_HEAP loads require a thread_tmp register 5943 // OopHandle::resolve is an indirection like jobject. 5944 access_load_at(T_OBJECT, IN_NATIVE, 5945 result, Address(result, 0), tmp, /*tmp_thread*/noreg); 5946 } 5947 5948 // ((WeakHandle)result).resolve(); 5949 void MacroAssembler::resolve_weak_handle(Register rresult, Register rtmp) { 5950 assert_different_registers(rresult, rtmp); 5951 Label resolved; 5952 5953 // A null weak handle resolves to null. 5954 cmpptr(rresult, 0); 5955 jcc(Assembler::equal, resolved); 5956 5957 // Only 64 bit platforms support GCs that require a tmp register 5958 // Only IN_HEAP loads require a thread_tmp register 5959 // WeakHandle::resolve is an indirection like jweak. 5960 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 5961 rresult, Address(rresult, 0), rtmp, /*tmp_thread*/noreg); 5962 bind(resolved); 5963 } 5964 5965 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) { 5966 // get mirror 5967 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 5968 load_method_holder(mirror, method); 5969 movptr(mirror, Address(mirror, mirror_offset)); 5970 resolve_oop_handle(mirror, tmp); 5971 } 5972 5973 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) { 5974 load_method_holder(rresult, rmethod); 5975 movptr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset())); 5976 } 5977 5978 void MacroAssembler::load_method_holder(Register holder, Register method) { 5979 movptr(holder, Address(method, Method::const_offset())); // ConstMethod* 5980 movptr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool* 5981 movptr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass* 5982 } 5983 5984 void MacroAssembler::load_metadata(Register dst, Register src) { 5985 if (UseCompressedClassPointers) { 5986 movl(dst, Address(src, oopDesc::klass_offset_in_bytes())); 5987 } else { 5988 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes())); 5989 } 5990 } 5991 5992 void MacroAssembler::load_klass(Register dst, Register src, Register tmp) { 5993 assert_different_registers(src, tmp); 5994 assert_different_registers(dst, tmp); 5995 #ifdef _LP64 5996 if (UseCompressedClassPointers) { 5997 movl(dst, Address(src, oopDesc::klass_offset_in_bytes())); 5998 decode_klass_not_null(dst, tmp); 5999 } else 6000 #endif 6001 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes())); 6002 } 6003 6004 void MacroAssembler::load_prototype_header(Register dst, Register src, Register tmp) { 6005 load_klass(dst, src, tmp); 6006 movptr(dst, Address(dst, Klass::prototype_header_offset())); 6007 } 6008 6009 void MacroAssembler::store_klass(Register dst, Register src, Register tmp) { 6010 assert_different_registers(src, tmp); 6011 assert_different_registers(dst, tmp); 6012 #ifdef _LP64 6013 if (UseCompressedClassPointers) { 6014 encode_klass_not_null(src, tmp); 6015 movl(Address(dst, oopDesc::klass_offset_in_bytes()), src); 6016 } else 6017 #endif 6018 movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src); 6019 } 6020 6021 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src, 6022 Register tmp1, Register thread_tmp) { 6023 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 6024 decorators = AccessInternal::decorator_fixup(decorators, type); 6025 bool as_raw = (decorators & AS_RAW) != 0; 6026 if (as_raw) { 6027 bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, thread_tmp); 6028 } else { 6029 bs->load_at(this, decorators, type, dst, src, tmp1, thread_tmp); 6030 } 6031 } 6032 6033 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val, 6034 Register tmp1, Register tmp2, Register tmp3) { 6035 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 6036 decorators = AccessInternal::decorator_fixup(decorators, type); 6037 bool as_raw = (decorators & AS_RAW) != 0; 6038 if (as_raw) { 6039 bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 6040 } else { 6041 bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 6042 } 6043 } 6044 6045 void MacroAssembler::access_value_copy(DecoratorSet decorators, Register src, Register dst, 6046 Register inline_klass) { 6047 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 6048 bs->value_copy(this, decorators, src, dst, inline_klass); 6049 } 6050 6051 void MacroAssembler::first_field_offset(Register inline_klass, Register offset) { 6052 movptr(offset, Address(inline_klass, InstanceKlass::adr_inlineklass_fixed_block_offset())); 6053 movl(offset, Address(offset, InlineKlass::first_field_offset_offset())); 6054 } 6055 6056 void MacroAssembler::data_for_oop(Register oop, Register data, Register inline_klass) { 6057 // ((address) (void*) o) + vk->first_field_offset(); 6058 Register offset = (data == oop) ? rscratch1 : data; 6059 first_field_offset(inline_klass, offset); 6060 if (data == oop) { 6061 addptr(data, offset); 6062 } else { 6063 lea(data, Address(oop, offset)); 6064 } 6065 } 6066 6067 void MacroAssembler::data_for_value_array_index(Register array, Register array_klass, 6068 Register index, Register data) { 6069 assert(index != rcx, "index needs to shift by rcx"); 6070 assert_different_registers(array, array_klass, index); 6071 assert_different_registers(rcx, array, index); 6072 6073 // array->base() + (index << Klass::layout_helper_log2_element_size(lh)); 6074 movl(rcx, Address(array_klass, Klass::layout_helper_offset())); 6075 6076 // Klass::layout_helper_log2_element_size(lh) 6077 // (lh >> _lh_log2_element_size_shift) & _lh_log2_element_size_mask; 6078 shrl(rcx, Klass::_lh_log2_element_size_shift); 6079 andl(rcx, Klass::_lh_log2_element_size_mask); 6080 shlptr(index); // index << rcx 6081 6082 lea(data, Address(array, index, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_PRIMITIVE_OBJECT))); 6083 } 6084 6085 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1, 6086 Register thread_tmp, DecoratorSet decorators) { 6087 access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp); 6088 } 6089 6090 // Doesn't do verification, generates fixed size code 6091 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1, 6092 Register thread_tmp, DecoratorSet decorators) { 6093 access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, thread_tmp); 6094 } 6095 6096 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1, 6097 Register tmp2, Register tmp3, DecoratorSet decorators) { 6098 access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3); 6099 } 6100 6101 // Used for storing nulls. 6102 void MacroAssembler::store_heap_oop_null(Address dst) { 6103 access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg); 6104 } 6105 6106 #ifdef _LP64 6107 void MacroAssembler::store_klass_gap(Register dst, Register src) { 6108 if (UseCompressedClassPointers) { 6109 // Store to klass gap in destination 6110 movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src); 6111 } 6112 } 6113 6114 #ifdef ASSERT 6115 void MacroAssembler::verify_heapbase(const char* msg) { 6116 assert (UseCompressedOops, "should be compressed"); 6117 assert (Universe::heap() != nullptr, "java heap should be initialized"); 6118 if (CheckCompressedOops) { 6119 Label ok; 6120 ExternalAddress src2(CompressedOops::ptrs_base_addr()); 6121 const bool is_src2_reachable = reachable(src2); 6122 if (!is_src2_reachable) { 6123 push(rscratch1); // cmpptr trashes rscratch1 6124 } 6125 cmpptr(r12_heapbase, src2, rscratch1); 6126 jcc(Assembler::equal, ok); 6127 STOP(msg); 6128 bind(ok); 6129 if (!is_src2_reachable) { 6130 pop(rscratch1); 6131 } 6132 } 6133 } 6134 #endif 6135 6136 // Algorithm must match oop.inline.hpp encode_heap_oop. 6137 void MacroAssembler::encode_heap_oop(Register r) { 6138 #ifdef ASSERT 6139 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?"); 6140 #endif 6141 verify_oop_msg(r, "broken oop in encode_heap_oop"); 6142 if (CompressedOops::base() == nullptr) { 6143 if (CompressedOops::shift() != 0) { 6144 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 6145 shrq(r, LogMinObjAlignmentInBytes); 6146 } 6147 return; 6148 } 6149 testq(r, r); 6150 cmovq(Assembler::equal, r, r12_heapbase); 6151 subq(r, r12_heapbase); 6152 shrq(r, LogMinObjAlignmentInBytes); 6153 } 6154 6155 void MacroAssembler::encode_heap_oop_not_null(Register r) { 6156 #ifdef ASSERT 6157 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?"); 6158 if (CheckCompressedOops) { 6159 Label ok; 6160 testq(r, r); 6161 jcc(Assembler::notEqual, ok); 6162 STOP("null oop passed to encode_heap_oop_not_null"); 6163 bind(ok); 6164 } 6165 #endif 6166 verify_oop_msg(r, "broken oop in encode_heap_oop_not_null"); 6167 if (CompressedOops::base() != nullptr) { 6168 subq(r, r12_heapbase); 6169 } 6170 if (CompressedOops::shift() != 0) { 6171 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 6172 shrq(r, LogMinObjAlignmentInBytes); 6173 } 6174 } 6175 6176 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) { 6177 #ifdef ASSERT 6178 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?"); 6179 if (CheckCompressedOops) { 6180 Label ok; 6181 testq(src, src); 6182 jcc(Assembler::notEqual, ok); 6183 STOP("null oop passed to encode_heap_oop_not_null2"); 6184 bind(ok); 6185 } 6186 #endif 6187 verify_oop_msg(src, "broken oop in encode_heap_oop_not_null2"); 6188 if (dst != src) { 6189 movq(dst, src); 6190 } 6191 if (CompressedOops::base() != nullptr) { 6192 subq(dst, r12_heapbase); 6193 } 6194 if (CompressedOops::shift() != 0) { 6195 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 6196 shrq(dst, LogMinObjAlignmentInBytes); 6197 } 6198 } 6199 6200 void MacroAssembler::decode_heap_oop(Register r) { 6201 #ifdef ASSERT 6202 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?"); 6203 #endif 6204 if (CompressedOops::base() == nullptr) { 6205 if (CompressedOops::shift() != 0) { 6206 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 6207 shlq(r, LogMinObjAlignmentInBytes); 6208 } 6209 } else { 6210 Label done; 6211 shlq(r, LogMinObjAlignmentInBytes); 6212 jccb(Assembler::equal, done); 6213 addq(r, r12_heapbase); 6214 bind(done); 6215 } 6216 verify_oop_msg(r, "broken oop in decode_heap_oop"); 6217 } 6218 6219 void MacroAssembler::decode_heap_oop_not_null(Register r) { 6220 // Note: it will change flags 6221 assert (UseCompressedOops, "should only be used for compressed headers"); 6222 assert (Universe::heap() != nullptr, "java heap should be initialized"); 6223 // Cannot assert, unverified entry point counts instructions (see .ad file) 6224 // vtableStubs also counts instructions in pd_code_size_limit. 6225 // Also do not verify_oop as this is called by verify_oop. 6226 if (CompressedOops::shift() != 0) { 6227 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 6228 shlq(r, LogMinObjAlignmentInBytes); 6229 if (CompressedOops::base() != nullptr) { 6230 addq(r, r12_heapbase); 6231 } 6232 } else { 6233 assert (CompressedOops::base() == nullptr, "sanity"); 6234 } 6235 } 6236 6237 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { 6238 // Note: it will change flags 6239 assert (UseCompressedOops, "should only be used for compressed headers"); 6240 assert (Universe::heap() != nullptr, "java heap should be initialized"); 6241 // Cannot assert, unverified entry point counts instructions (see .ad file) 6242 // vtableStubs also counts instructions in pd_code_size_limit. 6243 // Also do not verify_oop as this is called by verify_oop. 6244 if (CompressedOops::shift() != 0) { 6245 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 6246 if (LogMinObjAlignmentInBytes == Address::times_8) { 6247 leaq(dst, Address(r12_heapbase, src, Address::times_8, 0)); 6248 } else { 6249 if (dst != src) { 6250 movq(dst, src); 6251 } 6252 shlq(dst, LogMinObjAlignmentInBytes); 6253 if (CompressedOops::base() != nullptr) { 6254 addq(dst, r12_heapbase); 6255 } 6256 } 6257 } else { 6258 assert (CompressedOops::base() == nullptr, "sanity"); 6259 if (dst != src) { 6260 movq(dst, src); 6261 } 6262 } 6263 } 6264 6265 void MacroAssembler::encode_klass_not_null(Register r, Register tmp) { 6266 assert_different_registers(r, tmp); 6267 if (CompressedKlassPointers::base() != nullptr) { 6268 mov64(tmp, (int64_t)CompressedKlassPointers::base()); 6269 subq(r, tmp); 6270 } 6271 if (CompressedKlassPointers::shift() != 0) { 6272 assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); 6273 shrq(r, LogKlassAlignmentInBytes); 6274 } 6275 } 6276 6277 void MacroAssembler::encode_and_move_klass_not_null(Register dst, Register src) { 6278 assert_different_registers(src, dst); 6279 if (CompressedKlassPointers::base() != nullptr) { 6280 mov64(dst, -(int64_t)CompressedKlassPointers::base()); 6281 addq(dst, src); 6282 } else { 6283 movptr(dst, src); 6284 } 6285 if (CompressedKlassPointers::shift() != 0) { 6286 assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); 6287 shrq(dst, LogKlassAlignmentInBytes); 6288 } 6289 } 6290 6291 void MacroAssembler::decode_klass_not_null(Register r, Register tmp) { 6292 assert_different_registers(r, tmp); 6293 // Note: it will change flags 6294 assert(UseCompressedClassPointers, "should only be used for compressed headers"); 6295 // Cannot assert, unverified entry point counts instructions (see .ad file) 6296 // vtableStubs also counts instructions in pd_code_size_limit. 6297 // Also do not verify_oop as this is called by verify_oop. 6298 if (CompressedKlassPointers::shift() != 0) { 6299 assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); 6300 shlq(r, LogKlassAlignmentInBytes); 6301 } 6302 if (CompressedKlassPointers::base() != nullptr) { 6303 mov64(tmp, (int64_t)CompressedKlassPointers::base()); 6304 addq(r, tmp); 6305 } 6306 } 6307 6308 void MacroAssembler::decode_and_move_klass_not_null(Register dst, Register src) { 6309 assert_different_registers(src, dst); 6310 // Note: it will change flags 6311 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 6312 // Cannot assert, unverified entry point counts instructions (see .ad file) 6313 // vtableStubs also counts instructions in pd_code_size_limit. 6314 // Also do not verify_oop as this is called by verify_oop. 6315 6316 if (CompressedKlassPointers::base() == nullptr && 6317 CompressedKlassPointers::shift() == 0) { 6318 // The best case scenario is that there is no base or shift. Then it is already 6319 // a pointer that needs nothing but a register rename. 6320 movl(dst, src); 6321 } else { 6322 if (CompressedKlassPointers::base() != nullptr) { 6323 mov64(dst, (int64_t)CompressedKlassPointers::base()); 6324 } else { 6325 xorq(dst, dst); 6326 } 6327 if (CompressedKlassPointers::shift() != 0) { 6328 assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); 6329 assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?"); 6330 leaq(dst, Address(dst, src, Address::times_8, 0)); 6331 } else { 6332 addq(dst, src); 6333 } 6334 } 6335 } 6336 6337 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) { 6338 assert (UseCompressedOops, "should only be used for compressed headers"); 6339 assert (Universe::heap() != nullptr, "java heap should be initialized"); 6340 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6341 int oop_index = oop_recorder()->find_index(obj); 6342 RelocationHolder rspec = oop_Relocation::spec(oop_index); 6343 mov_narrow_oop(dst, oop_index, rspec); 6344 } 6345 6346 void MacroAssembler::set_narrow_oop(Address dst, jobject obj) { 6347 assert (UseCompressedOops, "should only be used for compressed headers"); 6348 assert (Universe::heap() != nullptr, "java heap should be initialized"); 6349 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6350 int oop_index = oop_recorder()->find_index(obj); 6351 RelocationHolder rspec = oop_Relocation::spec(oop_index); 6352 mov_narrow_oop(dst, oop_index, rspec); 6353 } 6354 6355 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) { 6356 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 6357 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6358 int klass_index = oop_recorder()->find_index(k); 6359 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 6360 mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 6361 } 6362 6363 void MacroAssembler::set_narrow_klass(Address dst, Klass* k) { 6364 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 6365 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6366 int klass_index = oop_recorder()->find_index(k); 6367 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 6368 mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 6369 } 6370 6371 void MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) { 6372 assert (UseCompressedOops, "should only be used for compressed headers"); 6373 assert (Universe::heap() != nullptr, "java heap should be initialized"); 6374 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6375 int oop_index = oop_recorder()->find_index(obj); 6376 RelocationHolder rspec = oop_Relocation::spec(oop_index); 6377 Assembler::cmp_narrow_oop(dst, oop_index, rspec); 6378 } 6379 6380 void MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) { 6381 assert (UseCompressedOops, "should only be used for compressed headers"); 6382 assert (Universe::heap() != nullptr, "java heap should be initialized"); 6383 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6384 int oop_index = oop_recorder()->find_index(obj); 6385 RelocationHolder rspec = oop_Relocation::spec(oop_index); 6386 Assembler::cmp_narrow_oop(dst, oop_index, rspec); 6387 } 6388 6389 void MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) { 6390 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 6391 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6392 int klass_index = oop_recorder()->find_index(k); 6393 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 6394 Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 6395 } 6396 6397 void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) { 6398 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 6399 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6400 int klass_index = oop_recorder()->find_index(k); 6401 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 6402 Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 6403 } 6404 6405 void MacroAssembler::reinit_heapbase() { 6406 if (UseCompressedOops) { 6407 if (Universe::heap() != nullptr) { 6408 if (CompressedOops::base() == nullptr) { 6409 MacroAssembler::xorptr(r12_heapbase, r12_heapbase); 6410 } else { 6411 mov64(r12_heapbase, (int64_t)CompressedOops::ptrs_base()); 6412 } 6413 } else { 6414 movptr(r12_heapbase, ExternalAddress(CompressedOops::ptrs_base_addr())); 6415 } 6416 } 6417 } 6418 6419 #endif // _LP64 6420 6421 #if COMPILER2_OR_JVMCI 6422 6423 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM/ZMM registers 6424 void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, KRegister mask) { 6425 // cnt - number of qwords (8-byte words). 6426 // base - start address, qword aligned. 6427 Label L_zero_64_bytes, L_loop, L_sloop, L_tail, L_end; 6428 bool use64byteVector = (MaxVectorSize == 64) && (VM_Version::avx3_threshold() == 0); 6429 if (use64byteVector) { 6430 evpbroadcastq(xtmp, val, AVX_512bit); 6431 } else if (MaxVectorSize >= 32) { 6432 movdq(xtmp, val); 6433 punpcklqdq(xtmp, xtmp); 6434 vinserti128_high(xtmp, xtmp); 6435 } else { 6436 movdq(xtmp, val); 6437 punpcklqdq(xtmp, xtmp); 6438 } 6439 jmp(L_zero_64_bytes); 6440 6441 BIND(L_loop); 6442 if (MaxVectorSize >= 32) { 6443 fill64(base, 0, xtmp, use64byteVector); 6444 } else { 6445 movdqu(Address(base, 0), xtmp); 6446 movdqu(Address(base, 16), xtmp); 6447 movdqu(Address(base, 32), xtmp); 6448 movdqu(Address(base, 48), xtmp); 6449 } 6450 addptr(base, 64); 6451 6452 BIND(L_zero_64_bytes); 6453 subptr(cnt, 8); 6454 jccb(Assembler::greaterEqual, L_loop); 6455 6456 // Copy trailing 64 bytes 6457 if (use64byteVector) { 6458 addptr(cnt, 8); 6459 jccb(Assembler::equal, L_end); 6460 fill64_masked(3, base, 0, xtmp, mask, cnt, val, true); 6461 jmp(L_end); 6462 } else { 6463 addptr(cnt, 4); 6464 jccb(Assembler::less, L_tail); 6465 if (MaxVectorSize >= 32) { 6466 vmovdqu(Address(base, 0), xtmp); 6467 } else { 6468 movdqu(Address(base, 0), xtmp); 6469 movdqu(Address(base, 16), xtmp); 6470 } 6471 } 6472 addptr(base, 32); 6473 subptr(cnt, 4); 6474 6475 BIND(L_tail); 6476 addptr(cnt, 4); 6477 jccb(Assembler::lessEqual, L_end); 6478 if (UseAVX > 2 && MaxVectorSize >= 32 && VM_Version::supports_avx512vl()) { 6479 fill32_masked(3, base, 0, xtmp, mask, cnt, val); 6480 } else { 6481 decrement(cnt); 6482 6483 BIND(L_sloop); 6484 movq(Address(base, 0), xtmp); 6485 addptr(base, 8); 6486 decrement(cnt); 6487 jccb(Assembler::greaterEqual, L_sloop); 6488 } 6489 BIND(L_end); 6490 } 6491 6492 int MacroAssembler::store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter) { 6493 assert(InlineTypeReturnedAsFields, "Inline types should never be returned as fields"); 6494 // An inline type might be returned. If fields are in registers we 6495 // need to allocate an inline type instance and initialize it with 6496 // the value of the fields. 6497 Label skip; 6498 // We only need a new buffered inline type if a new one is not returned 6499 testptr(rax, 1); 6500 jcc(Assembler::zero, skip); 6501 int call_offset = -1; 6502 6503 #ifdef _LP64 6504 // The following code is similar to allocate_instance but has some slight differences, 6505 // e.g. object size is always not zero, sometimes it's constant; storing klass ptr after 6506 // allocating is not necessary if vk != nullptr, etc. allocate_instance is not aware of these. 6507 Label slow_case; 6508 // 1. Try to allocate a new buffered inline instance either from TLAB or eden space 6509 mov(rscratch1, rax); // save rax for slow_case since *_allocate may corrupt it when allocation failed 6510 if (vk != nullptr) { 6511 // Called from C1, where the return type is statically known. 6512 movptr(rbx, (intptr_t)vk->get_InlineKlass()); 6513 jint lh = vk->layout_helper(); 6514 assert(lh != Klass::_lh_neutral_value, "inline class in return type must have been resolved"); 6515 if (UseTLAB && !Klass::layout_helper_needs_slow_path(lh)) { 6516 tlab_allocate(r15_thread, rax, noreg, lh, r13, r14, slow_case); 6517 } else { 6518 jmp(slow_case); 6519 } 6520 } else { 6521 // Call from interpreter. RAX contains ((the InlineKlass* of the return type) | 0x01) 6522 mov(rbx, rax); 6523 andptr(rbx, -2); 6524 if (UseTLAB) { 6525 movl(r14, Address(rbx, Klass::layout_helper_offset())); 6526 testl(r14, Klass::_lh_instance_slow_path_bit); 6527 jcc(Assembler::notZero, slow_case); 6528 tlab_allocate(r15_thread, rax, r14, 0, r13, r14, slow_case); 6529 } else { 6530 jmp(slow_case); 6531 } 6532 } 6533 if (UseTLAB) { 6534 // 2. Initialize buffered inline instance header 6535 Register buffer_obj = rax; 6536 movptr(Address(buffer_obj, oopDesc::mark_offset_in_bytes()), (intptr_t)markWord::inline_type_prototype().value()); 6537 xorl(r13, r13); 6538 store_klass_gap(buffer_obj, r13); 6539 if (vk == nullptr) { 6540 // store_klass corrupts rbx(klass), so save it in r13 for later use (interpreter case only). 6541 mov(r13, rbx); 6542 } 6543 store_klass(buffer_obj, rbx, rscratch1); 6544 // 3. Initialize its fields with an inline class specific handler 6545 if (vk != nullptr) { 6546 call(RuntimeAddress(vk->pack_handler())); // no need for call info as this will not safepoint. 6547 } else { 6548 movptr(rbx, Address(r13, InstanceKlass::adr_inlineklass_fixed_block_offset())); 6549 movptr(rbx, Address(rbx, InlineKlass::pack_handler_offset())); 6550 call(rbx); 6551 } 6552 jmp(skip); 6553 } 6554 bind(slow_case); 6555 // We failed to allocate a new inline type, fall back to a runtime 6556 // call. Some oop field may be live in some registers but we can't 6557 // tell. That runtime call will take care of preserving them 6558 // across a GC if there's one. 6559 mov(rax, rscratch1); 6560 #endif 6561 6562 if (from_interpreter) { 6563 super_call_VM_leaf(StubRoutines::store_inline_type_fields_to_buf()); 6564 } else { 6565 call(RuntimeAddress(StubRoutines::store_inline_type_fields_to_buf())); 6566 call_offset = offset(); 6567 } 6568 6569 bind(skip); 6570 return call_offset; 6571 } 6572 6573 // Move a value between registers/stack slots and update the reg_state 6574 bool MacroAssembler::move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]) { 6575 assert(from->is_valid() && to->is_valid(), "source and destination must be valid"); 6576 if (reg_state[to->value()] == reg_written) { 6577 return true; // Already written 6578 } 6579 if (from != to && bt != T_VOID) { 6580 if (reg_state[to->value()] == reg_readonly) { 6581 return false; // Not yet writable 6582 } 6583 if (from->is_reg()) { 6584 if (to->is_reg()) { 6585 if (from->is_XMMRegister()) { 6586 if (bt == T_DOUBLE) { 6587 movdbl(to->as_XMMRegister(), from->as_XMMRegister()); 6588 } else { 6589 assert(bt == T_FLOAT, "must be float"); 6590 movflt(to->as_XMMRegister(), from->as_XMMRegister()); 6591 } 6592 } else { 6593 movq(to->as_Register(), from->as_Register()); 6594 } 6595 } else { 6596 int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize; 6597 Address to_addr = Address(rsp, st_off); 6598 if (from->is_XMMRegister()) { 6599 if (bt == T_DOUBLE) { 6600 movdbl(to_addr, from->as_XMMRegister()); 6601 } else { 6602 assert(bt == T_FLOAT, "must be float"); 6603 movflt(to_addr, from->as_XMMRegister()); 6604 } 6605 } else { 6606 movq(to_addr, from->as_Register()); 6607 } 6608 } 6609 } else { 6610 Address from_addr = Address(rsp, from->reg2stack() * VMRegImpl::stack_slot_size + wordSize); 6611 if (to->is_reg()) { 6612 if (to->is_XMMRegister()) { 6613 if (bt == T_DOUBLE) { 6614 movdbl(to->as_XMMRegister(), from_addr); 6615 } else { 6616 assert(bt == T_FLOAT, "must be float"); 6617 movflt(to->as_XMMRegister(), from_addr); 6618 } 6619 } else { 6620 movq(to->as_Register(), from_addr); 6621 } 6622 } else { 6623 int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize; 6624 movq(r13, from_addr); 6625 movq(Address(rsp, st_off), r13); 6626 } 6627 } 6628 } 6629 // Update register states 6630 reg_state[from->value()] = reg_writable; 6631 reg_state[to->value()] = reg_written; 6632 return true; 6633 } 6634 6635 // Calculate the extra stack space required for packing or unpacking inline 6636 // args and adjust the stack pointer 6637 int MacroAssembler::extend_stack_for_inline_args(int args_on_stack) { 6638 // Two additional slots to account for return address 6639 int sp_inc = (args_on_stack + 2) * VMRegImpl::stack_slot_size; 6640 sp_inc = align_up(sp_inc, StackAlignmentInBytes); 6641 // Save the return address, adjust the stack (make sure it is properly 6642 // 16-byte aligned) and copy the return address to the new top of the stack. 6643 // The stack will be repaired on return (see MacroAssembler::remove_frame). 6644 assert(sp_inc > 0, "sanity"); 6645 pop(r13); 6646 subptr(rsp, sp_inc); 6647 push(r13); 6648 return sp_inc; 6649 } 6650 6651 // Read all fields from an inline type buffer and store the field values in registers/stack slots. 6652 bool MacroAssembler::unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, 6653 VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index, 6654 RegState reg_state[]) { 6655 assert(sig->at(sig_index)._bt == T_VOID, "should be at end delimiter"); 6656 assert(from->is_valid(), "source must be valid"); 6657 bool progress = false; 6658 #ifdef ASSERT 6659 const int start_offset = offset(); 6660 #endif 6661 6662 Label L_null, L_notNull; 6663 // Don't use r14 as tmp because it's used for spilling (see MacroAssembler::spill_reg_for) 6664 Register tmp1 = r10; 6665 Register tmp2 = r13; 6666 Register fromReg = noreg; 6667 ScalarizedInlineArgsStream stream(sig, sig_index, to, to_count, to_index, -1); 6668 bool done = true; 6669 bool mark_done = true; 6670 VMReg toReg; 6671 BasicType bt; 6672 // Check if argument requires a null check 6673 bool null_check = false; 6674 VMReg nullCheckReg; 6675 while (stream.next(nullCheckReg, bt)) { 6676 if (sig->at(stream.sig_index())._offset == -1) { 6677 null_check = true; 6678 break; 6679 } 6680 } 6681 stream.reset(sig_index, to_index); 6682 while (stream.next(toReg, bt)) { 6683 assert(toReg->is_valid(), "destination must be valid"); 6684 int idx = (int)toReg->value(); 6685 if (reg_state[idx] == reg_readonly) { 6686 if (idx != from->value()) { 6687 mark_done = false; 6688 } 6689 done = false; 6690 continue; 6691 } else if (reg_state[idx] == reg_written) { 6692 continue; 6693 } 6694 assert(reg_state[idx] == reg_writable, "must be writable"); 6695 reg_state[idx] = reg_written; 6696 progress = true; 6697 6698 if (fromReg == noreg) { 6699 if (from->is_reg()) { 6700 fromReg = from->as_Register(); 6701 } else { 6702 int st_off = from->reg2stack() * VMRegImpl::stack_slot_size + wordSize; 6703 movq(tmp1, Address(rsp, st_off)); 6704 fromReg = tmp1; 6705 } 6706 if (null_check) { 6707 // Nullable inline type argument, emit null check 6708 testptr(fromReg, fromReg); 6709 jcc(Assembler::zero, L_null); 6710 } 6711 } 6712 int off = sig->at(stream.sig_index())._offset; 6713 if (off == -1) { 6714 assert(null_check, "Missing null check at"); 6715 if (toReg->is_stack()) { 6716 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize; 6717 movq(Address(rsp, st_off), 1); 6718 } else { 6719 movq(toReg->as_Register(), 1); 6720 } 6721 continue; 6722 } 6723 assert(off > 0, "offset in object should be positive"); 6724 Address fromAddr = Address(fromReg, off); 6725 if (!toReg->is_XMMRegister()) { 6726 Register dst = toReg->is_stack() ? tmp2 : toReg->as_Register(); 6727 if (is_reference_type(bt)) { 6728 load_heap_oop(dst, fromAddr); 6729 } else { 6730 bool is_signed = (bt != T_CHAR) && (bt != T_BOOLEAN); 6731 load_sized_value(dst, fromAddr, type2aelembytes(bt), is_signed); 6732 } 6733 if (toReg->is_stack()) { 6734 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize; 6735 movq(Address(rsp, st_off), dst); 6736 } 6737 } else if (bt == T_DOUBLE) { 6738 movdbl(toReg->as_XMMRegister(), fromAddr); 6739 } else { 6740 assert(bt == T_FLOAT, "must be float"); 6741 movflt(toReg->as_XMMRegister(), fromAddr); 6742 } 6743 } 6744 if (progress && null_check) { 6745 if (done) { 6746 jmp(L_notNull); 6747 bind(L_null); 6748 // Set IsInit field to zero to signal that the argument is null. 6749 // Also set all oop fields to zero to make the GC happy. 6750 stream.reset(sig_index, to_index); 6751 while (stream.next(toReg, bt)) { 6752 if (sig->at(stream.sig_index())._offset == -1 || 6753 bt == T_OBJECT || bt == T_ARRAY) { 6754 if (toReg->is_stack()) { 6755 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize; 6756 movq(Address(rsp, st_off), 0); 6757 } else { 6758 xorq(toReg->as_Register(), toReg->as_Register()); 6759 } 6760 } 6761 } 6762 bind(L_notNull); 6763 } else { 6764 bind(L_null); 6765 } 6766 } 6767 6768 sig_index = stream.sig_index(); 6769 to_index = stream.regs_index(); 6770 6771 if (mark_done && reg_state[from->value()] != reg_written) { 6772 // This is okay because no one else will write to that slot 6773 reg_state[from->value()] = reg_writable; 6774 } 6775 from_index--; 6776 assert(progress || (start_offset == offset()), "should not emit code"); 6777 return done; 6778 } 6779 6780 bool MacroAssembler::pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index, 6781 VMRegPair* from, int from_count, int& from_index, VMReg to, 6782 RegState reg_state[], Register val_array) { 6783 assert(sig->at(sig_index)._bt == T_METADATA, "should be at delimiter"); 6784 assert(to->is_valid(), "destination must be valid"); 6785 6786 if (reg_state[to->value()] == reg_written) { 6787 skip_unpacked_fields(sig, sig_index, from, from_count, from_index); 6788 return true; // Already written 6789 } 6790 6791 // TODO 8284443 Isn't it an issue if below code uses r14 as tmp when it contains a spilled value? 6792 // Be careful with r14 because it's used for spilling (see MacroAssembler::spill_reg_for). 6793 Register val_obj_tmp = r11; 6794 Register from_reg_tmp = r14; 6795 Register tmp1 = r10; 6796 Register tmp2 = r13; 6797 Register tmp3 = rbx; 6798 Register val_obj = to->is_stack() ? val_obj_tmp : to->as_Register(); 6799 6800 assert_different_registers(val_obj_tmp, from_reg_tmp, tmp1, tmp2, tmp3, val_array); 6801 6802 if (reg_state[to->value()] == reg_readonly) { 6803 if (!is_reg_in_unpacked_fields(sig, sig_index, to, from, from_count, from_index)) { 6804 skip_unpacked_fields(sig, sig_index, from, from_count, from_index); 6805 return false; // Not yet writable 6806 } 6807 val_obj = val_obj_tmp; 6808 } 6809 6810 int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + vtarg_index * type2aelembytes(T_OBJECT); 6811 load_heap_oop(val_obj, Address(val_array, index)); 6812 6813 ScalarizedInlineArgsStream stream(sig, sig_index, from, from_count, from_index); 6814 VMReg fromReg; 6815 BasicType bt; 6816 Label L_null; 6817 while (stream.next(fromReg, bt)) { 6818 assert(fromReg->is_valid(), "source must be valid"); 6819 reg_state[fromReg->value()] = reg_writable; 6820 6821 int off = sig->at(stream.sig_index())._offset; 6822 if (off == -1) { 6823 // Nullable inline type argument, emit null check 6824 Label L_notNull; 6825 if (fromReg->is_stack()) { 6826 int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize; 6827 testb(Address(rsp, ld_off), 1); 6828 } else { 6829 testb(fromReg->as_Register(), 1); 6830 } 6831 jcc(Assembler::notZero, L_notNull); 6832 movptr(val_obj, 0); 6833 jmp(L_null); 6834 bind(L_notNull); 6835 continue; 6836 } 6837 6838 assert(off > 0, "offset in object should be positive"); 6839 size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize; 6840 6841 Address dst(val_obj, off); 6842 if (!fromReg->is_XMMRegister()) { 6843 Register src; 6844 if (fromReg->is_stack()) { 6845 src = from_reg_tmp; 6846 int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize; 6847 load_sized_value(src, Address(rsp, ld_off), size_in_bytes, /* is_signed */ false); 6848 } else { 6849 src = fromReg->as_Register(); 6850 } 6851 assert_different_registers(dst.base(), src, tmp1, tmp2, tmp3, val_array); 6852 if (is_reference_type(bt)) { 6853 store_heap_oop(dst, src, tmp1, tmp2, tmp3, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED); 6854 } else { 6855 store_sized_value(dst, src, size_in_bytes); 6856 } 6857 } else if (bt == T_DOUBLE) { 6858 movdbl(dst, fromReg->as_XMMRegister()); 6859 } else { 6860 assert(bt == T_FLOAT, "must be float"); 6861 movflt(dst, fromReg->as_XMMRegister()); 6862 } 6863 } 6864 bind(L_null); 6865 sig_index = stream.sig_index(); 6866 from_index = stream.regs_index(); 6867 6868 assert(reg_state[to->value()] == reg_writable, "must have already been read"); 6869 bool success = move_helper(val_obj->as_VMReg(), to, T_OBJECT, reg_state); 6870 assert(success, "to register must be writeable"); 6871 return true; 6872 } 6873 6874 VMReg MacroAssembler::spill_reg_for(VMReg reg) { 6875 return reg->is_XMMRegister() ? xmm8->as_VMReg() : r14->as_VMReg(); 6876 } 6877 6878 void MacroAssembler::remove_frame(int initial_framesize, bool needs_stack_repair) { 6879 assert((initial_framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned"); 6880 if (needs_stack_repair) { 6881 movq(rbp, Address(rsp, initial_framesize)); 6882 // The stack increment resides just below the saved rbp 6883 addq(rsp, Address(rsp, initial_framesize - wordSize)); 6884 } else { 6885 if (initial_framesize > 0) { 6886 addq(rsp, initial_framesize); 6887 } 6888 pop(rbp); 6889 } 6890 } 6891 6892 // Clearing constant sized memory using YMM/ZMM registers. 6893 void MacroAssembler::clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask) { 6894 assert(UseAVX > 2 && VM_Version::supports_avx512vl(), ""); 6895 bool use64byteVector = (MaxVectorSize > 32) && (VM_Version::avx3_threshold() == 0); 6896 6897 int vector64_count = (cnt & (~0x7)) >> 3; 6898 cnt = cnt & 0x7; 6899 const int fill64_per_loop = 4; 6900 const int max_unrolled_fill64 = 8; 6901 6902 // 64 byte initialization loop. 6903 vpxor(xtmp, xtmp, xtmp, use64byteVector ? AVX_512bit : AVX_256bit); 6904 int start64 = 0; 6905 if (vector64_count > max_unrolled_fill64) { 6906 Label LOOP; 6907 Register index = rtmp; 6908 6909 start64 = vector64_count - (vector64_count % fill64_per_loop); 6910 6911 movl(index, 0); 6912 BIND(LOOP); 6913 for (int i = 0; i < fill64_per_loop; i++) { 6914 fill64(Address(base, index, Address::times_1, i * 64), xtmp, use64byteVector); 6915 } 6916 addl(index, fill64_per_loop * 64); 6917 cmpl(index, start64 * 64); 6918 jccb(Assembler::less, LOOP); 6919 } 6920 for (int i = start64; i < vector64_count; i++) { 6921 fill64(base, i * 64, xtmp, use64byteVector); 6922 } 6923 6924 // Clear remaining 64 byte tail. 6925 int disp = vector64_count * 64; 6926 if (cnt) { 6927 switch (cnt) { 6928 case 1: 6929 movq(Address(base, disp), xtmp); 6930 break; 6931 case 2: 6932 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_128bit); 6933 break; 6934 case 3: 6935 movl(rtmp, 0x7); 6936 kmovwl(mask, rtmp); 6937 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_256bit); 6938 break; 6939 case 4: 6940 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 6941 break; 6942 case 5: 6943 if (use64byteVector) { 6944 movl(rtmp, 0x1F); 6945 kmovwl(mask, rtmp); 6946 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit); 6947 } else { 6948 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 6949 movq(Address(base, disp + 32), xtmp); 6950 } 6951 break; 6952 case 6: 6953 if (use64byteVector) { 6954 movl(rtmp, 0x3F); 6955 kmovwl(mask, rtmp); 6956 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit); 6957 } else { 6958 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 6959 evmovdqu(T_LONG, k0, Address(base, disp + 32), xtmp, false, Assembler::AVX_128bit); 6960 } 6961 break; 6962 case 7: 6963 if (use64byteVector) { 6964 movl(rtmp, 0x7F); 6965 kmovwl(mask, rtmp); 6966 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit); 6967 } else { 6968 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 6969 movl(rtmp, 0x7); 6970 kmovwl(mask, rtmp); 6971 evmovdqu(T_LONG, mask, Address(base, disp + 32), xtmp, true, Assembler::AVX_256bit); 6972 } 6973 break; 6974 default: 6975 fatal("Unexpected length : %d\n",cnt); 6976 break; 6977 } 6978 } 6979 } 6980 6981 void MacroAssembler::clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, 6982 bool is_large, bool word_copy_only, KRegister mask) { 6983 // cnt - number of qwords (8-byte words). 6984 // base - start address, qword aligned. 6985 // is_large - if optimizers know cnt is larger than InitArrayShortSize 6986 assert(base==rdi, "base register must be edi for rep stos"); 6987 assert(val==rax, "val register must be eax for rep stos"); 6988 assert(cnt==rcx, "cnt register must be ecx for rep stos"); 6989 assert(InitArrayShortSize % BytesPerLong == 0, 6990 "InitArrayShortSize should be the multiple of BytesPerLong"); 6991 6992 Label DONE; 6993 6994 if (!is_large) { 6995 Label LOOP, LONG; 6996 cmpptr(cnt, InitArrayShortSize/BytesPerLong); 6997 jccb(Assembler::greater, LONG); 6998 6999 NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM 7000 7001 decrement(cnt); 7002 jccb(Assembler::negative, DONE); // Zero length 7003 7004 // Use individual pointer-sized stores for small counts: 7005 BIND(LOOP); 7006 movptr(Address(base, cnt, Address::times_ptr), val); 7007 decrement(cnt); 7008 jccb(Assembler::greaterEqual, LOOP); 7009 jmpb(DONE); 7010 7011 BIND(LONG); 7012 } 7013 7014 // Use longer rep-prefixed ops for non-small counts: 7015 if (UseFastStosb && !word_copy_only) { 7016 shlptr(cnt, 3); // convert to number of bytes 7017 rep_stosb(); 7018 } else if (UseXMMForObjInit) { 7019 xmm_clear_mem(base, cnt, val, xtmp, mask); 7020 } else { 7021 NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM 7022 rep_stos(); 7023 } 7024 7025 BIND(DONE); 7026 } 7027 7028 #endif //COMPILER2_OR_JVMCI 7029 7030 7031 void MacroAssembler::generate_fill(BasicType t, bool aligned, 7032 Register to, Register value, Register count, 7033 Register rtmp, XMMRegister xtmp) { 7034 ShortBranchVerifier sbv(this); 7035 assert_different_registers(to, value, count, rtmp); 7036 Label L_exit; 7037 Label L_fill_2_bytes, L_fill_4_bytes; 7038 7039 #if defined(COMPILER2) && defined(_LP64) 7040 if(MaxVectorSize >=32 && 7041 VM_Version::supports_avx512vlbw() && 7042 VM_Version::supports_bmi2()) { 7043 generate_fill_avx3(t, to, value, count, rtmp, xtmp); 7044 return; 7045 } 7046 #endif 7047 7048 int shift = -1; 7049 switch (t) { 7050 case T_BYTE: 7051 shift = 2; 7052 break; 7053 case T_SHORT: 7054 shift = 1; 7055 break; 7056 case T_INT: 7057 shift = 0; 7058 break; 7059 default: ShouldNotReachHere(); 7060 } 7061 7062 if (t == T_BYTE) { 7063 andl(value, 0xff); 7064 movl(rtmp, value); 7065 shll(rtmp, 8); 7066 orl(value, rtmp); 7067 } 7068 if (t == T_SHORT) { 7069 andl(value, 0xffff); 7070 } 7071 if (t == T_BYTE || t == T_SHORT) { 7072 movl(rtmp, value); 7073 shll(rtmp, 16); 7074 orl(value, rtmp); 7075 } 7076 7077 cmpptr(count, 2<<shift); // Short arrays (< 8 bytes) fill by element 7078 jcc(Assembler::below, L_fill_4_bytes); // use unsigned cmp 7079 if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) { 7080 Label L_skip_align2; 7081 // align source address at 4 bytes address boundary 7082 if (t == T_BYTE) { 7083 Label L_skip_align1; 7084 // One byte misalignment happens only for byte arrays 7085 testptr(to, 1); 7086 jccb(Assembler::zero, L_skip_align1); 7087 movb(Address(to, 0), value); 7088 increment(to); 7089 decrement(count); 7090 BIND(L_skip_align1); 7091 } 7092 // Two bytes misalignment happens only for byte and short (char) arrays 7093 testptr(to, 2); 7094 jccb(Assembler::zero, L_skip_align2); 7095 movw(Address(to, 0), value); 7096 addptr(to, 2); 7097 subptr(count, 1<<(shift-1)); 7098 BIND(L_skip_align2); 7099 } 7100 if (UseSSE < 2) { 7101 Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes; 7102 // Fill 32-byte chunks 7103 subptr(count, 8 << shift); 7104 jcc(Assembler::less, L_check_fill_8_bytes); 7105 align(16); 7106 7107 BIND(L_fill_32_bytes_loop); 7108 7109 for (int i = 0; i < 32; i += 4) { 7110 movl(Address(to, i), value); 7111 } 7112 7113 addptr(to, 32); 7114 subptr(count, 8 << shift); 7115 jcc(Assembler::greaterEqual, L_fill_32_bytes_loop); 7116 BIND(L_check_fill_8_bytes); 7117 addptr(count, 8 << shift); 7118 jccb(Assembler::zero, L_exit); 7119 jmpb(L_fill_8_bytes); 7120 7121 // 7122 // length is too short, just fill qwords 7123 // 7124 BIND(L_fill_8_bytes_loop); 7125 movl(Address(to, 0), value); 7126 movl(Address(to, 4), value); 7127 addptr(to, 8); 7128 BIND(L_fill_8_bytes); 7129 subptr(count, 1 << (shift + 1)); 7130 jcc(Assembler::greaterEqual, L_fill_8_bytes_loop); 7131 // fall through to fill 4 bytes 7132 } else { 7133 Label L_fill_32_bytes; 7134 if (!UseUnalignedLoadStores) { 7135 // align to 8 bytes, we know we are 4 byte aligned to start 7136 testptr(to, 4); 7137 jccb(Assembler::zero, L_fill_32_bytes); 7138 movl(Address(to, 0), value); 7139 addptr(to, 4); 7140 subptr(count, 1<<shift); 7141 } 7142 BIND(L_fill_32_bytes); 7143 { 7144 assert( UseSSE >= 2, "supported cpu only" ); 7145 Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes; 7146 movdl(xtmp, value); 7147 if (UseAVX >= 2 && UseUnalignedLoadStores) { 7148 Label L_check_fill_32_bytes; 7149 if (UseAVX > 2) { 7150 // Fill 64-byte chunks 7151 Label L_fill_64_bytes_loop_avx3, L_check_fill_64_bytes_avx2; 7152 7153 // If number of bytes to fill < VM_Version::avx3_threshold(), perform fill using AVX2 7154 cmpptr(count, VM_Version::avx3_threshold()); 7155 jccb(Assembler::below, L_check_fill_64_bytes_avx2); 7156 7157 vpbroadcastd(xtmp, xtmp, Assembler::AVX_512bit); 7158 7159 subptr(count, 16 << shift); 7160 jccb(Assembler::less, L_check_fill_32_bytes); 7161 align(16); 7162 7163 BIND(L_fill_64_bytes_loop_avx3); 7164 evmovdqul(Address(to, 0), xtmp, Assembler::AVX_512bit); 7165 addptr(to, 64); 7166 subptr(count, 16 << shift); 7167 jcc(Assembler::greaterEqual, L_fill_64_bytes_loop_avx3); 7168 jmpb(L_check_fill_32_bytes); 7169 7170 BIND(L_check_fill_64_bytes_avx2); 7171 } 7172 // Fill 64-byte chunks 7173 Label L_fill_64_bytes_loop; 7174 vpbroadcastd(xtmp, xtmp, Assembler::AVX_256bit); 7175 7176 subptr(count, 16 << shift); 7177 jcc(Assembler::less, L_check_fill_32_bytes); 7178 align(16); 7179 7180 BIND(L_fill_64_bytes_loop); 7181 vmovdqu(Address(to, 0), xtmp); 7182 vmovdqu(Address(to, 32), xtmp); 7183 addptr(to, 64); 7184 subptr(count, 16 << shift); 7185 jcc(Assembler::greaterEqual, L_fill_64_bytes_loop); 7186 7187 BIND(L_check_fill_32_bytes); 7188 addptr(count, 8 << shift); 7189 jccb(Assembler::less, L_check_fill_8_bytes); 7190 vmovdqu(Address(to, 0), xtmp); 7191 addptr(to, 32); 7192 subptr(count, 8 << shift); 7193 7194 BIND(L_check_fill_8_bytes); 7195 // clean upper bits of YMM registers 7196 movdl(xtmp, value); 7197 pshufd(xtmp, xtmp, 0); 7198 } else { 7199 // Fill 32-byte chunks 7200 pshufd(xtmp, xtmp, 0); 7201 7202 subptr(count, 8 << shift); 7203 jcc(Assembler::less, L_check_fill_8_bytes); 7204 align(16); 7205 7206 BIND(L_fill_32_bytes_loop); 7207 7208 if (UseUnalignedLoadStores) { 7209 movdqu(Address(to, 0), xtmp); 7210 movdqu(Address(to, 16), xtmp); 7211 } else { 7212 movq(Address(to, 0), xtmp); 7213 movq(Address(to, 8), xtmp); 7214 movq(Address(to, 16), xtmp); 7215 movq(Address(to, 24), xtmp); 7216 } 7217 7218 addptr(to, 32); 7219 subptr(count, 8 << shift); 7220 jcc(Assembler::greaterEqual, L_fill_32_bytes_loop); 7221 7222 BIND(L_check_fill_8_bytes); 7223 } 7224 addptr(count, 8 << shift); 7225 jccb(Assembler::zero, L_exit); 7226 jmpb(L_fill_8_bytes); 7227 7228 // 7229 // length is too short, just fill qwords 7230 // 7231 BIND(L_fill_8_bytes_loop); 7232 movq(Address(to, 0), xtmp); 7233 addptr(to, 8); 7234 BIND(L_fill_8_bytes); 7235 subptr(count, 1 << (shift + 1)); 7236 jcc(Assembler::greaterEqual, L_fill_8_bytes_loop); 7237 } 7238 } 7239 // fill trailing 4 bytes 7240 BIND(L_fill_4_bytes); 7241 testl(count, 1<<shift); 7242 jccb(Assembler::zero, L_fill_2_bytes); 7243 movl(Address(to, 0), value); 7244 if (t == T_BYTE || t == T_SHORT) { 7245 Label L_fill_byte; 7246 addptr(to, 4); 7247 BIND(L_fill_2_bytes); 7248 // fill trailing 2 bytes 7249 testl(count, 1<<(shift-1)); 7250 jccb(Assembler::zero, L_fill_byte); 7251 movw(Address(to, 0), value); 7252 if (t == T_BYTE) { 7253 addptr(to, 2); 7254 BIND(L_fill_byte); 7255 // fill trailing byte 7256 testl(count, 1); 7257 jccb(Assembler::zero, L_exit); 7258 movb(Address(to, 0), value); 7259 } else { 7260 BIND(L_fill_byte); 7261 } 7262 } else { 7263 BIND(L_fill_2_bytes); 7264 } 7265 BIND(L_exit); 7266 } 7267 7268 void MacroAssembler::evpbroadcast(BasicType type, XMMRegister dst, Register src, int vector_len) { 7269 switch(type) { 7270 case T_BYTE: 7271 case T_BOOLEAN: 7272 evpbroadcastb(dst, src, vector_len); 7273 break; 7274 case T_SHORT: 7275 case T_CHAR: 7276 evpbroadcastw(dst, src, vector_len); 7277 break; 7278 case T_INT: 7279 case T_FLOAT: 7280 evpbroadcastd(dst, src, vector_len); 7281 break; 7282 case T_LONG: 7283 case T_DOUBLE: 7284 evpbroadcastq(dst, src, vector_len); 7285 break; 7286 default: 7287 fatal("Unhandled type : %s", type2name(type)); 7288 break; 7289 } 7290 } 7291 7292 // encode char[] to byte[] in ISO_8859_1 or ASCII 7293 //@IntrinsicCandidate 7294 //private static int implEncodeISOArray(byte[] sa, int sp, 7295 //byte[] da, int dp, int len) { 7296 // int i = 0; 7297 // for (; i < len; i++) { 7298 // char c = StringUTF16.getChar(sa, sp++); 7299 // if (c > '\u00FF') 7300 // break; 7301 // da[dp++] = (byte)c; 7302 // } 7303 // return i; 7304 //} 7305 // 7306 //@IntrinsicCandidate 7307 //private static int implEncodeAsciiArray(char[] sa, int sp, 7308 // byte[] da, int dp, int len) { 7309 // int i = 0; 7310 // for (; i < len; i++) { 7311 // char c = sa[sp++]; 7312 // if (c >= '\u0080') 7313 // break; 7314 // da[dp++] = (byte)c; 7315 // } 7316 // return i; 7317 //} 7318 void MacroAssembler::encode_iso_array(Register src, Register dst, Register len, 7319 XMMRegister tmp1Reg, XMMRegister tmp2Reg, 7320 XMMRegister tmp3Reg, XMMRegister tmp4Reg, 7321 Register tmp5, Register result, bool ascii) { 7322 7323 // rsi: src 7324 // rdi: dst 7325 // rdx: len 7326 // rcx: tmp5 7327 // rax: result 7328 ShortBranchVerifier sbv(this); 7329 assert_different_registers(src, dst, len, tmp5, result); 7330 Label L_done, L_copy_1_char, L_copy_1_char_exit; 7331 7332 int mask = ascii ? 0xff80ff80 : 0xff00ff00; 7333 int short_mask = ascii ? 0xff80 : 0xff00; 7334 7335 // set result 7336 xorl(result, result); 7337 // check for zero length 7338 testl(len, len); 7339 jcc(Assembler::zero, L_done); 7340 7341 movl(result, len); 7342 7343 // Setup pointers 7344 lea(src, Address(src, len, Address::times_2)); // char[] 7345 lea(dst, Address(dst, len, Address::times_1)); // byte[] 7346 negptr(len); 7347 7348 if (UseSSE42Intrinsics || UseAVX >= 2) { 7349 Label L_copy_8_chars, L_copy_8_chars_exit; 7350 Label L_chars_16_check, L_copy_16_chars, L_copy_16_chars_exit; 7351 7352 if (UseAVX >= 2) { 7353 Label L_chars_32_check, L_copy_32_chars, L_copy_32_chars_exit; 7354 movl(tmp5, mask); // create mask to test for Unicode or non-ASCII chars in vector 7355 movdl(tmp1Reg, tmp5); 7356 vpbroadcastd(tmp1Reg, tmp1Reg, Assembler::AVX_256bit); 7357 jmp(L_chars_32_check); 7358 7359 bind(L_copy_32_chars); 7360 vmovdqu(tmp3Reg, Address(src, len, Address::times_2, -64)); 7361 vmovdqu(tmp4Reg, Address(src, len, Address::times_2, -32)); 7362 vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1); 7363 vptest(tmp2Reg, tmp1Reg); // check for Unicode or non-ASCII chars in vector 7364 jccb(Assembler::notZero, L_copy_32_chars_exit); 7365 vpackuswb(tmp3Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1); 7366 vpermq(tmp4Reg, tmp3Reg, 0xD8, /* vector_len */ 1); 7367 vmovdqu(Address(dst, len, Address::times_1, -32), tmp4Reg); 7368 7369 bind(L_chars_32_check); 7370 addptr(len, 32); 7371 jcc(Assembler::lessEqual, L_copy_32_chars); 7372 7373 bind(L_copy_32_chars_exit); 7374 subptr(len, 16); 7375 jccb(Assembler::greater, L_copy_16_chars_exit); 7376 7377 } else if (UseSSE42Intrinsics) { 7378 movl(tmp5, mask); // create mask to test for Unicode or non-ASCII chars in vector 7379 movdl(tmp1Reg, tmp5); 7380 pshufd(tmp1Reg, tmp1Reg, 0); 7381 jmpb(L_chars_16_check); 7382 } 7383 7384 bind(L_copy_16_chars); 7385 if (UseAVX >= 2) { 7386 vmovdqu(tmp2Reg, Address(src, len, Address::times_2, -32)); 7387 vptest(tmp2Reg, tmp1Reg); 7388 jcc(Assembler::notZero, L_copy_16_chars_exit); 7389 vpackuswb(tmp2Reg, tmp2Reg, tmp1Reg, /* vector_len */ 1); 7390 vpermq(tmp3Reg, tmp2Reg, 0xD8, /* vector_len */ 1); 7391 } else { 7392 if (UseAVX > 0) { 7393 movdqu(tmp3Reg, Address(src, len, Address::times_2, -32)); 7394 movdqu(tmp4Reg, Address(src, len, Address::times_2, -16)); 7395 vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 0); 7396 } else { 7397 movdqu(tmp3Reg, Address(src, len, Address::times_2, -32)); 7398 por(tmp2Reg, tmp3Reg); 7399 movdqu(tmp4Reg, Address(src, len, Address::times_2, -16)); 7400 por(tmp2Reg, tmp4Reg); 7401 } 7402 ptest(tmp2Reg, tmp1Reg); // check for Unicode or non-ASCII chars in vector 7403 jccb(Assembler::notZero, L_copy_16_chars_exit); 7404 packuswb(tmp3Reg, tmp4Reg); 7405 } 7406 movdqu(Address(dst, len, Address::times_1, -16), tmp3Reg); 7407 7408 bind(L_chars_16_check); 7409 addptr(len, 16); 7410 jcc(Assembler::lessEqual, L_copy_16_chars); 7411 7412 bind(L_copy_16_chars_exit); 7413 if (UseAVX >= 2) { 7414 // clean upper bits of YMM registers 7415 vpxor(tmp2Reg, tmp2Reg); 7416 vpxor(tmp3Reg, tmp3Reg); 7417 vpxor(tmp4Reg, tmp4Reg); 7418 movdl(tmp1Reg, tmp5); 7419 pshufd(tmp1Reg, tmp1Reg, 0); 7420 } 7421 subptr(len, 8); 7422 jccb(Assembler::greater, L_copy_8_chars_exit); 7423 7424 bind(L_copy_8_chars); 7425 movdqu(tmp3Reg, Address(src, len, Address::times_2, -16)); 7426 ptest(tmp3Reg, tmp1Reg); 7427 jccb(Assembler::notZero, L_copy_8_chars_exit); 7428 packuswb(tmp3Reg, tmp1Reg); 7429 movq(Address(dst, len, Address::times_1, -8), tmp3Reg); 7430 addptr(len, 8); 7431 jccb(Assembler::lessEqual, L_copy_8_chars); 7432 7433 bind(L_copy_8_chars_exit); 7434 subptr(len, 8); 7435 jccb(Assembler::zero, L_done); 7436 } 7437 7438 bind(L_copy_1_char); 7439 load_unsigned_short(tmp5, Address(src, len, Address::times_2, 0)); 7440 testl(tmp5, short_mask); // check if Unicode or non-ASCII char 7441 jccb(Assembler::notZero, L_copy_1_char_exit); 7442 movb(Address(dst, len, Address::times_1, 0), tmp5); 7443 addptr(len, 1); 7444 jccb(Assembler::less, L_copy_1_char); 7445 7446 bind(L_copy_1_char_exit); 7447 addptr(result, len); // len is negative count of not processed elements 7448 7449 bind(L_done); 7450 } 7451 7452 #ifdef _LP64 7453 /** 7454 * Helper for multiply_to_len(). 7455 */ 7456 void MacroAssembler::add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2) { 7457 addq(dest_lo, src1); 7458 adcq(dest_hi, 0); 7459 addq(dest_lo, src2); 7460 adcq(dest_hi, 0); 7461 } 7462 7463 /** 7464 * Multiply 64 bit by 64 bit first loop. 7465 */ 7466 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 7467 Register y, Register y_idx, Register z, 7468 Register carry, Register product, 7469 Register idx, Register kdx) { 7470 // 7471 // jlong carry, x[], y[], z[]; 7472 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 7473 // huge_128 product = y[idx] * x[xstart] + carry; 7474 // z[kdx] = (jlong)product; 7475 // carry = (jlong)(product >>> 64); 7476 // } 7477 // z[xstart] = carry; 7478 // 7479 7480 Label L_first_loop, L_first_loop_exit; 7481 Label L_one_x, L_one_y, L_multiply; 7482 7483 decrementl(xstart); 7484 jcc(Assembler::negative, L_one_x); 7485 7486 movq(x_xstart, Address(x, xstart, Address::times_4, 0)); 7487 rorq(x_xstart, 32); // convert big-endian to little-endian 7488 7489 bind(L_first_loop); 7490 decrementl(idx); 7491 jcc(Assembler::negative, L_first_loop_exit); 7492 decrementl(idx); 7493 jcc(Assembler::negative, L_one_y); 7494 movq(y_idx, Address(y, idx, Address::times_4, 0)); 7495 rorq(y_idx, 32); // convert big-endian to little-endian 7496 bind(L_multiply); 7497 movq(product, x_xstart); 7498 mulq(y_idx); // product(rax) * y_idx -> rdx:rax 7499 addq(product, carry); 7500 adcq(rdx, 0); 7501 subl(kdx, 2); 7502 movl(Address(z, kdx, Address::times_4, 4), product); 7503 shrq(product, 32); 7504 movl(Address(z, kdx, Address::times_4, 0), product); 7505 movq(carry, rdx); 7506 jmp(L_first_loop); 7507 7508 bind(L_one_y); 7509 movl(y_idx, Address(y, 0)); 7510 jmp(L_multiply); 7511 7512 bind(L_one_x); 7513 movl(x_xstart, Address(x, 0)); 7514 jmp(L_first_loop); 7515 7516 bind(L_first_loop_exit); 7517 } 7518 7519 /** 7520 * Multiply 64 bit by 64 bit and add 128 bit. 7521 */ 7522 void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y, Register z, 7523 Register yz_idx, Register idx, 7524 Register carry, Register product, int offset) { 7525 // huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry; 7526 // z[kdx] = (jlong)product; 7527 7528 movq(yz_idx, Address(y, idx, Address::times_4, offset)); 7529 rorq(yz_idx, 32); // convert big-endian to little-endian 7530 movq(product, x_xstart); 7531 mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax) 7532 movq(yz_idx, Address(z, idx, Address::times_4, offset)); 7533 rorq(yz_idx, 32); // convert big-endian to little-endian 7534 7535 add2_with_carry(rdx, product, carry, yz_idx); 7536 7537 movl(Address(z, idx, Address::times_4, offset+4), product); 7538 shrq(product, 32); 7539 movl(Address(z, idx, Address::times_4, offset), product); 7540 7541 } 7542 7543 /** 7544 * Multiply 128 bit by 128 bit. Unrolled inner loop. 7545 */ 7546 void MacroAssembler::multiply_128_x_128_loop(Register x_xstart, Register y, Register z, 7547 Register yz_idx, Register idx, Register jdx, 7548 Register carry, Register product, 7549 Register carry2) { 7550 // jlong carry, x[], y[], z[]; 7551 // int kdx = ystart+1; 7552 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 7553 // huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry; 7554 // z[kdx+idx+1] = (jlong)product; 7555 // jlong carry2 = (jlong)(product >>> 64); 7556 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry2; 7557 // z[kdx+idx] = (jlong)product; 7558 // carry = (jlong)(product >>> 64); 7559 // } 7560 // idx += 2; 7561 // if (idx > 0) { 7562 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry; 7563 // z[kdx+idx] = (jlong)product; 7564 // carry = (jlong)(product >>> 64); 7565 // } 7566 // 7567 7568 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 7569 7570 movl(jdx, idx); 7571 andl(jdx, 0xFFFFFFFC); 7572 shrl(jdx, 2); 7573 7574 bind(L_third_loop); 7575 subl(jdx, 1); 7576 jcc(Assembler::negative, L_third_loop_exit); 7577 subl(idx, 4); 7578 7579 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 8); 7580 movq(carry2, rdx); 7581 7582 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product, 0); 7583 movq(carry, rdx); 7584 jmp(L_third_loop); 7585 7586 bind (L_third_loop_exit); 7587 7588 andl (idx, 0x3); 7589 jcc(Assembler::zero, L_post_third_loop_done); 7590 7591 Label L_check_1; 7592 subl(idx, 2); 7593 jcc(Assembler::negative, L_check_1); 7594 7595 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 0); 7596 movq(carry, rdx); 7597 7598 bind (L_check_1); 7599 addl (idx, 0x2); 7600 andl (idx, 0x1); 7601 subl(idx, 1); 7602 jcc(Assembler::negative, L_post_third_loop_done); 7603 7604 movl(yz_idx, Address(y, idx, Address::times_4, 0)); 7605 movq(product, x_xstart); 7606 mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax) 7607 movl(yz_idx, Address(z, idx, Address::times_4, 0)); 7608 7609 add2_with_carry(rdx, product, yz_idx, carry); 7610 7611 movl(Address(z, idx, Address::times_4, 0), product); 7612 shrq(product, 32); 7613 7614 shlq(rdx, 32); 7615 orq(product, rdx); 7616 movq(carry, product); 7617 7618 bind(L_post_third_loop_done); 7619 } 7620 7621 /** 7622 * Multiply 128 bit by 128 bit using BMI2. Unrolled inner loop. 7623 * 7624 */ 7625 void MacroAssembler::multiply_128_x_128_bmi2_loop(Register y, Register z, 7626 Register carry, Register carry2, 7627 Register idx, Register jdx, 7628 Register yz_idx1, Register yz_idx2, 7629 Register tmp, Register tmp3, Register tmp4) { 7630 assert(UseBMI2Instructions, "should be used only when BMI2 is available"); 7631 7632 // jlong carry, x[], y[], z[]; 7633 // int kdx = ystart+1; 7634 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 7635 // huge_128 tmp3 = (y[idx+1] * rdx) + z[kdx+idx+1] + carry; 7636 // jlong carry2 = (jlong)(tmp3 >>> 64); 7637 // huge_128 tmp4 = (y[idx] * rdx) + z[kdx+idx] + carry2; 7638 // carry = (jlong)(tmp4 >>> 64); 7639 // z[kdx+idx+1] = (jlong)tmp3; 7640 // z[kdx+idx] = (jlong)tmp4; 7641 // } 7642 // idx += 2; 7643 // if (idx > 0) { 7644 // yz_idx1 = (y[idx] * rdx) + z[kdx+idx] + carry; 7645 // z[kdx+idx] = (jlong)yz_idx1; 7646 // carry = (jlong)(yz_idx1 >>> 64); 7647 // } 7648 // 7649 7650 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 7651 7652 movl(jdx, idx); 7653 andl(jdx, 0xFFFFFFFC); 7654 shrl(jdx, 2); 7655 7656 bind(L_third_loop); 7657 subl(jdx, 1); 7658 jcc(Assembler::negative, L_third_loop_exit); 7659 subl(idx, 4); 7660 7661 movq(yz_idx1, Address(y, idx, Address::times_4, 8)); 7662 rorxq(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian 7663 movq(yz_idx2, Address(y, idx, Address::times_4, 0)); 7664 rorxq(yz_idx2, yz_idx2, 32); 7665 7666 mulxq(tmp4, tmp3, yz_idx1); // yz_idx1 * rdx -> tmp4:tmp3 7667 mulxq(carry2, tmp, yz_idx2); // yz_idx2 * rdx -> carry2:tmp 7668 7669 movq(yz_idx1, Address(z, idx, Address::times_4, 8)); 7670 rorxq(yz_idx1, yz_idx1, 32); 7671 movq(yz_idx2, Address(z, idx, Address::times_4, 0)); 7672 rorxq(yz_idx2, yz_idx2, 32); 7673 7674 if (VM_Version::supports_adx()) { 7675 adcxq(tmp3, carry); 7676 adoxq(tmp3, yz_idx1); 7677 7678 adcxq(tmp4, tmp); 7679 adoxq(tmp4, yz_idx2); 7680 7681 movl(carry, 0); // does not affect flags 7682 adcxq(carry2, carry); 7683 adoxq(carry2, carry); 7684 } else { 7685 add2_with_carry(tmp4, tmp3, carry, yz_idx1); 7686 add2_with_carry(carry2, tmp4, tmp, yz_idx2); 7687 } 7688 movq(carry, carry2); 7689 7690 movl(Address(z, idx, Address::times_4, 12), tmp3); 7691 shrq(tmp3, 32); 7692 movl(Address(z, idx, Address::times_4, 8), tmp3); 7693 7694 movl(Address(z, idx, Address::times_4, 4), tmp4); 7695 shrq(tmp4, 32); 7696 movl(Address(z, idx, Address::times_4, 0), tmp4); 7697 7698 jmp(L_third_loop); 7699 7700 bind (L_third_loop_exit); 7701 7702 andl (idx, 0x3); 7703 jcc(Assembler::zero, L_post_third_loop_done); 7704 7705 Label L_check_1; 7706 subl(idx, 2); 7707 jcc(Assembler::negative, L_check_1); 7708 7709 movq(yz_idx1, Address(y, idx, Address::times_4, 0)); 7710 rorxq(yz_idx1, yz_idx1, 32); 7711 mulxq(tmp4, tmp3, yz_idx1); // yz_idx1 * rdx -> tmp4:tmp3 7712 movq(yz_idx2, Address(z, idx, Address::times_4, 0)); 7713 rorxq(yz_idx2, yz_idx2, 32); 7714 7715 add2_with_carry(tmp4, tmp3, carry, yz_idx2); 7716 7717 movl(Address(z, idx, Address::times_4, 4), tmp3); 7718 shrq(tmp3, 32); 7719 movl(Address(z, idx, Address::times_4, 0), tmp3); 7720 movq(carry, tmp4); 7721 7722 bind (L_check_1); 7723 addl (idx, 0x2); 7724 andl (idx, 0x1); 7725 subl(idx, 1); 7726 jcc(Assembler::negative, L_post_third_loop_done); 7727 movl(tmp4, Address(y, idx, Address::times_4, 0)); 7728 mulxq(carry2, tmp3, tmp4); // tmp4 * rdx -> carry2:tmp3 7729 movl(tmp4, Address(z, idx, Address::times_4, 0)); 7730 7731 add2_with_carry(carry2, tmp3, tmp4, carry); 7732 7733 movl(Address(z, idx, Address::times_4, 0), tmp3); 7734 shrq(tmp3, 32); 7735 7736 shlq(carry2, 32); 7737 orq(tmp3, carry2); 7738 movq(carry, tmp3); 7739 7740 bind(L_post_third_loop_done); 7741 } 7742 7743 /** 7744 * Code for BigInteger::multiplyToLen() intrinsic. 7745 * 7746 * rdi: x 7747 * rax: xlen 7748 * rsi: y 7749 * rcx: ylen 7750 * r8: z 7751 * r11: tmp0 7752 * r12: tmp1 7753 * r13: tmp2 7754 * r14: tmp3 7755 * r15: tmp4 7756 * rbx: tmp5 7757 * 7758 */ 7759 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register tmp0, 7760 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5) { 7761 ShortBranchVerifier sbv(this); 7762 assert_different_registers(x, xlen, y, ylen, z, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, rdx); 7763 7764 push(tmp0); 7765 push(tmp1); 7766 push(tmp2); 7767 push(tmp3); 7768 push(tmp4); 7769 push(tmp5); 7770 7771 push(xlen); 7772 7773 const Register idx = tmp1; 7774 const Register kdx = tmp2; 7775 const Register xstart = tmp3; 7776 7777 const Register y_idx = tmp4; 7778 const Register carry = tmp5; 7779 const Register product = xlen; 7780 const Register x_xstart = tmp0; 7781 7782 // First Loop. 7783 // 7784 // final static long LONG_MASK = 0xffffffffL; 7785 // int xstart = xlen - 1; 7786 // int ystart = ylen - 1; 7787 // long carry = 0; 7788 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 7789 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry; 7790 // z[kdx] = (int)product; 7791 // carry = product >>> 32; 7792 // } 7793 // z[xstart] = (int)carry; 7794 // 7795 7796 movl(idx, ylen); // idx = ylen; 7797 lea(kdx, Address(xlen, ylen)); // kdx = xlen+ylen; 7798 xorq(carry, carry); // carry = 0; 7799 7800 Label L_done; 7801 7802 movl(xstart, xlen); 7803 decrementl(xstart); 7804 jcc(Assembler::negative, L_done); 7805 7806 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx); 7807 7808 Label L_second_loop; 7809 testl(kdx, kdx); 7810 jcc(Assembler::zero, L_second_loop); 7811 7812 Label L_carry; 7813 subl(kdx, 1); 7814 jcc(Assembler::zero, L_carry); 7815 7816 movl(Address(z, kdx, Address::times_4, 0), carry); 7817 shrq(carry, 32); 7818 subl(kdx, 1); 7819 7820 bind(L_carry); 7821 movl(Address(z, kdx, Address::times_4, 0), carry); 7822 7823 // Second and third (nested) loops. 7824 // 7825 // for (int i = xstart-1; i >= 0; i--) { // Second loop 7826 // carry = 0; 7827 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop 7828 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) + 7829 // (z[k] & LONG_MASK) + carry; 7830 // z[k] = (int)product; 7831 // carry = product >>> 32; 7832 // } 7833 // z[i] = (int)carry; 7834 // } 7835 // 7836 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx 7837 7838 const Register jdx = tmp1; 7839 7840 bind(L_second_loop); 7841 xorl(carry, carry); // carry = 0; 7842 movl(jdx, ylen); // j = ystart+1 7843 7844 subl(xstart, 1); // i = xstart-1; 7845 jcc(Assembler::negative, L_done); 7846 7847 push (z); 7848 7849 Label L_last_x; 7850 lea(z, Address(z, xstart, Address::times_4, 4)); // z = z + k - j 7851 subl(xstart, 1); // i = xstart-1; 7852 jcc(Assembler::negative, L_last_x); 7853 7854 if (UseBMI2Instructions) { 7855 movq(rdx, Address(x, xstart, Address::times_4, 0)); 7856 rorxq(rdx, rdx, 32); // convert big-endian to little-endian 7857 } else { 7858 movq(x_xstart, Address(x, xstart, Address::times_4, 0)); 7859 rorq(x_xstart, 32); // convert big-endian to little-endian 7860 } 7861 7862 Label L_third_loop_prologue; 7863 bind(L_third_loop_prologue); 7864 7865 push (x); 7866 push (xstart); 7867 push (ylen); 7868 7869 7870 if (UseBMI2Instructions) { 7871 multiply_128_x_128_bmi2_loop(y, z, carry, x, jdx, ylen, product, tmp2, x_xstart, tmp3, tmp4); 7872 } else { // !UseBMI2Instructions 7873 multiply_128_x_128_loop(x_xstart, y, z, y_idx, jdx, ylen, carry, product, x); 7874 } 7875 7876 pop(ylen); 7877 pop(xlen); 7878 pop(x); 7879 pop(z); 7880 7881 movl(tmp3, xlen); 7882 addl(tmp3, 1); 7883 movl(Address(z, tmp3, Address::times_4, 0), carry); 7884 subl(tmp3, 1); 7885 jccb(Assembler::negative, L_done); 7886 7887 shrq(carry, 32); 7888 movl(Address(z, tmp3, Address::times_4, 0), carry); 7889 jmp(L_second_loop); 7890 7891 // Next infrequent code is moved outside loops. 7892 bind(L_last_x); 7893 if (UseBMI2Instructions) { 7894 movl(rdx, Address(x, 0)); 7895 } else { 7896 movl(x_xstart, Address(x, 0)); 7897 } 7898 jmp(L_third_loop_prologue); 7899 7900 bind(L_done); 7901 7902 pop(xlen); 7903 7904 pop(tmp5); 7905 pop(tmp4); 7906 pop(tmp3); 7907 pop(tmp2); 7908 pop(tmp1); 7909 pop(tmp0); 7910 } 7911 7912 void MacroAssembler::vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale, 7913 Register result, Register tmp1, Register tmp2, XMMRegister rymm0, XMMRegister rymm1, XMMRegister rymm2){ 7914 assert(UseSSE42Intrinsics, "SSE4.2 must be enabled."); 7915 Label VECTOR16_LOOP, VECTOR8_LOOP, VECTOR4_LOOP; 7916 Label VECTOR8_TAIL, VECTOR4_TAIL; 7917 Label VECTOR32_NOT_EQUAL, VECTOR16_NOT_EQUAL, VECTOR8_NOT_EQUAL, VECTOR4_NOT_EQUAL; 7918 Label SAME_TILL_END, DONE; 7919 Label BYTES_LOOP, BYTES_TAIL, BYTES_NOT_EQUAL; 7920 7921 //scale is in rcx in both Win64 and Unix 7922 ShortBranchVerifier sbv(this); 7923 7924 shlq(length); 7925 xorq(result, result); 7926 7927 if ((AVX3Threshold == 0) && (UseAVX > 2) && 7928 VM_Version::supports_avx512vlbw()) { 7929 Label VECTOR64_LOOP, VECTOR64_NOT_EQUAL, VECTOR32_TAIL; 7930 7931 cmpq(length, 64); 7932 jcc(Assembler::less, VECTOR32_TAIL); 7933 7934 movq(tmp1, length); 7935 andq(tmp1, 0x3F); // tail count 7936 andq(length, ~(0x3F)); //vector count 7937 7938 bind(VECTOR64_LOOP); 7939 // AVX512 code to compare 64 byte vectors. 7940 evmovdqub(rymm0, Address(obja, result), Assembler::AVX_512bit); 7941 evpcmpeqb(k7, rymm0, Address(objb, result), Assembler::AVX_512bit); 7942 kortestql(k7, k7); 7943 jcc(Assembler::aboveEqual, VECTOR64_NOT_EQUAL); // mismatch 7944 addq(result, 64); 7945 subq(length, 64); 7946 jccb(Assembler::notZero, VECTOR64_LOOP); 7947 7948 //bind(VECTOR64_TAIL); 7949 testq(tmp1, tmp1); 7950 jcc(Assembler::zero, SAME_TILL_END); 7951 7952 //bind(VECTOR64_TAIL); 7953 // AVX512 code to compare up to 63 byte vectors. 7954 mov64(tmp2, 0xFFFFFFFFFFFFFFFF); 7955 shlxq(tmp2, tmp2, tmp1); 7956 notq(tmp2); 7957 kmovql(k3, tmp2); 7958 7959 evmovdqub(rymm0, k3, Address(obja, result), false, Assembler::AVX_512bit); 7960 evpcmpeqb(k7, k3, rymm0, Address(objb, result), Assembler::AVX_512bit); 7961 7962 ktestql(k7, k3); 7963 jcc(Assembler::below, SAME_TILL_END); // not mismatch 7964 7965 bind(VECTOR64_NOT_EQUAL); 7966 kmovql(tmp1, k7); 7967 notq(tmp1); 7968 tzcntq(tmp1, tmp1); 7969 addq(result, tmp1); 7970 shrq(result); 7971 jmp(DONE); 7972 bind(VECTOR32_TAIL); 7973 } 7974 7975 cmpq(length, 8); 7976 jcc(Assembler::equal, VECTOR8_LOOP); 7977 jcc(Assembler::less, VECTOR4_TAIL); 7978 7979 if (UseAVX >= 2) { 7980 Label VECTOR16_TAIL, VECTOR32_LOOP; 7981 7982 cmpq(length, 16); 7983 jcc(Assembler::equal, VECTOR16_LOOP); 7984 jcc(Assembler::less, VECTOR8_LOOP); 7985 7986 cmpq(length, 32); 7987 jccb(Assembler::less, VECTOR16_TAIL); 7988 7989 subq(length, 32); 7990 bind(VECTOR32_LOOP); 7991 vmovdqu(rymm0, Address(obja, result)); 7992 vmovdqu(rymm1, Address(objb, result)); 7993 vpxor(rymm2, rymm0, rymm1, Assembler::AVX_256bit); 7994 vptest(rymm2, rymm2); 7995 jcc(Assembler::notZero, VECTOR32_NOT_EQUAL);//mismatch found 7996 addq(result, 32); 7997 subq(length, 32); 7998 jcc(Assembler::greaterEqual, VECTOR32_LOOP); 7999 addq(length, 32); 8000 jcc(Assembler::equal, SAME_TILL_END); 8001 //falling through if less than 32 bytes left //close the branch here. 8002 8003 bind(VECTOR16_TAIL); 8004 cmpq(length, 16); 8005 jccb(Assembler::less, VECTOR8_TAIL); 8006 bind(VECTOR16_LOOP); 8007 movdqu(rymm0, Address(obja, result)); 8008 movdqu(rymm1, Address(objb, result)); 8009 vpxor(rymm2, rymm0, rymm1, Assembler::AVX_128bit); 8010 ptest(rymm2, rymm2); 8011 jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found 8012 addq(result, 16); 8013 subq(length, 16); 8014 jcc(Assembler::equal, SAME_TILL_END); 8015 //falling through if less than 16 bytes left 8016 } else {//regular intrinsics 8017 8018 cmpq(length, 16); 8019 jccb(Assembler::less, VECTOR8_TAIL); 8020 8021 subq(length, 16); 8022 bind(VECTOR16_LOOP); 8023 movdqu(rymm0, Address(obja, result)); 8024 movdqu(rymm1, Address(objb, result)); 8025 pxor(rymm0, rymm1); 8026 ptest(rymm0, rymm0); 8027 jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found 8028 addq(result, 16); 8029 subq(length, 16); 8030 jccb(Assembler::greaterEqual, VECTOR16_LOOP); 8031 addq(length, 16); 8032 jcc(Assembler::equal, SAME_TILL_END); 8033 //falling through if less than 16 bytes left 8034 } 8035 8036 bind(VECTOR8_TAIL); 8037 cmpq(length, 8); 8038 jccb(Assembler::less, VECTOR4_TAIL); 8039 bind(VECTOR8_LOOP); 8040 movq(tmp1, Address(obja, result)); 8041 movq(tmp2, Address(objb, result)); 8042 xorq(tmp1, tmp2); 8043 testq(tmp1, tmp1); 8044 jcc(Assembler::notZero, VECTOR8_NOT_EQUAL);//mismatch found 8045 addq(result, 8); 8046 subq(length, 8); 8047 jcc(Assembler::equal, SAME_TILL_END); 8048 //falling through if less than 8 bytes left 8049 8050 bind(VECTOR4_TAIL); 8051 cmpq(length, 4); 8052 jccb(Assembler::less, BYTES_TAIL); 8053 bind(VECTOR4_LOOP); 8054 movl(tmp1, Address(obja, result)); 8055 xorl(tmp1, Address(objb, result)); 8056 testl(tmp1, tmp1); 8057 jcc(Assembler::notZero, VECTOR4_NOT_EQUAL);//mismatch found 8058 addq(result, 4); 8059 subq(length, 4); 8060 jcc(Assembler::equal, SAME_TILL_END); 8061 //falling through if less than 4 bytes left 8062 8063 bind(BYTES_TAIL); 8064 bind(BYTES_LOOP); 8065 load_unsigned_byte(tmp1, Address(obja, result)); 8066 load_unsigned_byte(tmp2, Address(objb, result)); 8067 xorl(tmp1, tmp2); 8068 testl(tmp1, tmp1); 8069 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found 8070 decq(length); 8071 jcc(Assembler::zero, SAME_TILL_END); 8072 incq(result); 8073 load_unsigned_byte(tmp1, Address(obja, result)); 8074 load_unsigned_byte(tmp2, Address(objb, result)); 8075 xorl(tmp1, tmp2); 8076 testl(tmp1, tmp1); 8077 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found 8078 decq(length); 8079 jcc(Assembler::zero, SAME_TILL_END); 8080 incq(result); 8081 load_unsigned_byte(tmp1, Address(obja, result)); 8082 load_unsigned_byte(tmp2, Address(objb, result)); 8083 xorl(tmp1, tmp2); 8084 testl(tmp1, tmp1); 8085 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found 8086 jmp(SAME_TILL_END); 8087 8088 if (UseAVX >= 2) { 8089 bind(VECTOR32_NOT_EQUAL); 8090 vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_256bit); 8091 vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_256bit); 8092 vpxor(rymm0, rymm0, rymm2, Assembler::AVX_256bit); 8093 vpmovmskb(tmp1, rymm0); 8094 bsfq(tmp1, tmp1); 8095 addq(result, tmp1); 8096 shrq(result); 8097 jmp(DONE); 8098 } 8099 8100 bind(VECTOR16_NOT_EQUAL); 8101 if (UseAVX >= 2) { 8102 vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_128bit); 8103 vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_128bit); 8104 pxor(rymm0, rymm2); 8105 } else { 8106 pcmpeqb(rymm2, rymm2); 8107 pxor(rymm0, rymm1); 8108 pcmpeqb(rymm0, rymm1); 8109 pxor(rymm0, rymm2); 8110 } 8111 pmovmskb(tmp1, rymm0); 8112 bsfq(tmp1, tmp1); 8113 addq(result, tmp1); 8114 shrq(result); 8115 jmpb(DONE); 8116 8117 bind(VECTOR8_NOT_EQUAL); 8118 bind(VECTOR4_NOT_EQUAL); 8119 bsfq(tmp1, tmp1); 8120 shrq(tmp1, 3); 8121 addq(result, tmp1); 8122 bind(BYTES_NOT_EQUAL); 8123 shrq(result); 8124 jmpb(DONE); 8125 8126 bind(SAME_TILL_END); 8127 mov64(result, -1); 8128 8129 bind(DONE); 8130 } 8131 8132 //Helper functions for square_to_len() 8133 8134 /** 8135 * Store the squares of x[], right shifted one bit (divided by 2) into z[] 8136 * Preserves x and z and modifies rest of the registers. 8137 */ 8138 void MacroAssembler::square_rshift(Register x, Register xlen, Register z, Register tmp1, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 8139 // Perform square and right shift by 1 8140 // Handle odd xlen case first, then for even xlen do the following 8141 // jlong carry = 0; 8142 // for (int j=0, i=0; j < xlen; j+=2, i+=4) { 8143 // huge_128 product = x[j:j+1] * x[j:j+1]; 8144 // z[i:i+1] = (carry << 63) | (jlong)(product >>> 65); 8145 // z[i+2:i+3] = (jlong)(product >>> 1); 8146 // carry = (jlong)product; 8147 // } 8148 8149 xorq(tmp5, tmp5); // carry 8150 xorq(rdxReg, rdxReg); 8151 xorl(tmp1, tmp1); // index for x 8152 xorl(tmp4, tmp4); // index for z 8153 8154 Label L_first_loop, L_first_loop_exit; 8155 8156 testl(xlen, 1); 8157 jccb(Assembler::zero, L_first_loop); //jump if xlen is even 8158 8159 // Square and right shift by 1 the odd element using 32 bit multiply 8160 movl(raxReg, Address(x, tmp1, Address::times_4, 0)); 8161 imulq(raxReg, raxReg); 8162 shrq(raxReg, 1); 8163 adcq(tmp5, 0); 8164 movq(Address(z, tmp4, Address::times_4, 0), raxReg); 8165 incrementl(tmp1); 8166 addl(tmp4, 2); 8167 8168 // Square and right shift by 1 the rest using 64 bit multiply 8169 bind(L_first_loop); 8170 cmpptr(tmp1, xlen); 8171 jccb(Assembler::equal, L_first_loop_exit); 8172 8173 // Square 8174 movq(raxReg, Address(x, tmp1, Address::times_4, 0)); 8175 rorq(raxReg, 32); // convert big-endian to little-endian 8176 mulq(raxReg); // 64-bit multiply rax * rax -> rdx:rax 8177 8178 // Right shift by 1 and save carry 8179 shrq(tmp5, 1); // rdx:rax:tmp5 = (tmp5:rdx:rax) >>> 1 8180 rcrq(rdxReg, 1); 8181 rcrq(raxReg, 1); 8182 adcq(tmp5, 0); 8183 8184 // Store result in z 8185 movq(Address(z, tmp4, Address::times_4, 0), rdxReg); 8186 movq(Address(z, tmp4, Address::times_4, 8), raxReg); 8187 8188 // Update indices for x and z 8189 addl(tmp1, 2); 8190 addl(tmp4, 4); 8191 jmp(L_first_loop); 8192 8193 bind(L_first_loop_exit); 8194 } 8195 8196 8197 /** 8198 * Perform the following multiply add operation using BMI2 instructions 8199 * carry:sum = sum + op1*op2 + carry 8200 * op2 should be in rdx 8201 * op2 is preserved, all other registers are modified 8202 */ 8203 void MacroAssembler::multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, Register tmp2) { 8204 // assert op2 is rdx 8205 mulxq(tmp2, op1, op1); // op1 * op2 -> tmp2:op1 8206 addq(sum, carry); 8207 adcq(tmp2, 0); 8208 addq(sum, op1); 8209 adcq(tmp2, 0); 8210 movq(carry, tmp2); 8211 } 8212 8213 /** 8214 * Perform the following multiply add operation: 8215 * carry:sum = sum + op1*op2 + carry 8216 * Preserves op1, op2 and modifies rest of registers 8217 */ 8218 void MacroAssembler::multiply_add_64(Register sum, Register op1, Register op2, Register carry, Register rdxReg, Register raxReg) { 8219 // rdx:rax = op1 * op2 8220 movq(raxReg, op2); 8221 mulq(op1); 8222 8223 // rdx:rax = sum + carry + rdx:rax 8224 addq(sum, carry); 8225 adcq(rdxReg, 0); 8226 addq(sum, raxReg); 8227 adcq(rdxReg, 0); 8228 8229 // carry:sum = rdx:sum 8230 movq(carry, rdxReg); 8231 } 8232 8233 /** 8234 * Add 64 bit long carry into z[] with carry propagation. 8235 * Preserves z and carry register values and modifies rest of registers. 8236 * 8237 */ 8238 void MacroAssembler::add_one_64(Register z, Register zlen, Register carry, Register tmp1) { 8239 Label L_fourth_loop, L_fourth_loop_exit; 8240 8241 movl(tmp1, 1); 8242 subl(zlen, 2); 8243 addq(Address(z, zlen, Address::times_4, 0), carry); 8244 8245 bind(L_fourth_loop); 8246 jccb(Assembler::carryClear, L_fourth_loop_exit); 8247 subl(zlen, 2); 8248 jccb(Assembler::negative, L_fourth_loop_exit); 8249 addq(Address(z, zlen, Address::times_4, 0), tmp1); 8250 jmp(L_fourth_loop); 8251 bind(L_fourth_loop_exit); 8252 } 8253 8254 /** 8255 * Shift z[] left by 1 bit. 8256 * Preserves x, len, z and zlen registers and modifies rest of the registers. 8257 * 8258 */ 8259 void MacroAssembler::lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4) { 8260 8261 Label L_fifth_loop, L_fifth_loop_exit; 8262 8263 // Fifth loop 8264 // Perform primitiveLeftShift(z, zlen, 1) 8265 8266 const Register prev_carry = tmp1; 8267 const Register new_carry = tmp4; 8268 const Register value = tmp2; 8269 const Register zidx = tmp3; 8270 8271 // int zidx, carry; 8272 // long value; 8273 // carry = 0; 8274 // for (zidx = zlen-2; zidx >=0; zidx -= 2) { 8275 // (carry:value) = (z[i] << 1) | carry ; 8276 // z[i] = value; 8277 // } 8278 8279 movl(zidx, zlen); 8280 xorl(prev_carry, prev_carry); // clear carry flag and prev_carry register 8281 8282 bind(L_fifth_loop); 8283 decl(zidx); // Use decl to preserve carry flag 8284 decl(zidx); 8285 jccb(Assembler::negative, L_fifth_loop_exit); 8286 8287 if (UseBMI2Instructions) { 8288 movq(value, Address(z, zidx, Address::times_4, 0)); 8289 rclq(value, 1); 8290 rorxq(value, value, 32); 8291 movq(Address(z, zidx, Address::times_4, 0), value); // Store back in big endian form 8292 } 8293 else { 8294 // clear new_carry 8295 xorl(new_carry, new_carry); 8296 8297 // Shift z[i] by 1, or in previous carry and save new carry 8298 movq(value, Address(z, zidx, Address::times_4, 0)); 8299 shlq(value, 1); 8300 adcl(new_carry, 0); 8301 8302 orq(value, prev_carry); 8303 rorq(value, 0x20); 8304 movq(Address(z, zidx, Address::times_4, 0), value); // Store back in big endian form 8305 8306 // Set previous carry = new carry 8307 movl(prev_carry, new_carry); 8308 } 8309 jmp(L_fifth_loop); 8310 8311 bind(L_fifth_loop_exit); 8312 } 8313 8314 8315 /** 8316 * Code for BigInteger::squareToLen() intrinsic 8317 * 8318 * rdi: x 8319 * rsi: len 8320 * r8: z 8321 * rcx: zlen 8322 * r12: tmp1 8323 * r13: tmp2 8324 * r14: tmp3 8325 * r15: tmp4 8326 * rbx: tmp5 8327 * 8328 */ 8329 void MacroAssembler::square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 8330 8331 Label L_second_loop, L_second_loop_exit, L_third_loop, L_third_loop_exit, L_last_x, L_multiply; 8332 push(tmp1); 8333 push(tmp2); 8334 push(tmp3); 8335 push(tmp4); 8336 push(tmp5); 8337 8338 // First loop 8339 // Store the squares, right shifted one bit (i.e., divided by 2). 8340 square_rshift(x, len, z, tmp1, tmp3, tmp4, tmp5, rdxReg, raxReg); 8341 8342 // Add in off-diagonal sums. 8343 // 8344 // Second, third (nested) and fourth loops. 8345 // zlen +=2; 8346 // for (int xidx=len-2,zidx=zlen-4; xidx > 0; xidx-=2,zidx-=4) { 8347 // carry = 0; 8348 // long op2 = x[xidx:xidx+1]; 8349 // for (int j=xidx-2,k=zidx; j >= 0; j-=2) { 8350 // k -= 2; 8351 // long op1 = x[j:j+1]; 8352 // long sum = z[k:k+1]; 8353 // carry:sum = multiply_add_64(sum, op1, op2, carry, tmp_regs); 8354 // z[k:k+1] = sum; 8355 // } 8356 // add_one_64(z, k, carry, tmp_regs); 8357 // } 8358 8359 const Register carry = tmp5; 8360 const Register sum = tmp3; 8361 const Register op1 = tmp4; 8362 Register op2 = tmp2; 8363 8364 push(zlen); 8365 push(len); 8366 addl(zlen,2); 8367 bind(L_second_loop); 8368 xorq(carry, carry); 8369 subl(zlen, 4); 8370 subl(len, 2); 8371 push(zlen); 8372 push(len); 8373 cmpl(len, 0); 8374 jccb(Assembler::lessEqual, L_second_loop_exit); 8375 8376 // Multiply an array by one 64 bit long. 8377 if (UseBMI2Instructions) { 8378 op2 = rdxReg; 8379 movq(op2, Address(x, len, Address::times_4, 0)); 8380 rorxq(op2, op2, 32); 8381 } 8382 else { 8383 movq(op2, Address(x, len, Address::times_4, 0)); 8384 rorq(op2, 32); 8385 } 8386 8387 bind(L_third_loop); 8388 decrementl(len); 8389 jccb(Assembler::negative, L_third_loop_exit); 8390 decrementl(len); 8391 jccb(Assembler::negative, L_last_x); 8392 8393 movq(op1, Address(x, len, Address::times_4, 0)); 8394 rorq(op1, 32); 8395 8396 bind(L_multiply); 8397 subl(zlen, 2); 8398 movq(sum, Address(z, zlen, Address::times_4, 0)); 8399 8400 // Multiply 64 bit by 64 bit and add 64 bits lower half and upper 64 bits as carry. 8401 if (UseBMI2Instructions) { 8402 multiply_add_64_bmi2(sum, op1, op2, carry, tmp2); 8403 } 8404 else { 8405 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 8406 } 8407 8408 movq(Address(z, zlen, Address::times_4, 0), sum); 8409 8410 jmp(L_third_loop); 8411 bind(L_third_loop_exit); 8412 8413 // Fourth loop 8414 // Add 64 bit long carry into z with carry propagation. 8415 // Uses offsetted zlen. 8416 add_one_64(z, zlen, carry, tmp1); 8417 8418 pop(len); 8419 pop(zlen); 8420 jmp(L_second_loop); 8421 8422 // Next infrequent code is moved outside loops. 8423 bind(L_last_x); 8424 movl(op1, Address(x, 0)); 8425 jmp(L_multiply); 8426 8427 bind(L_second_loop_exit); 8428 pop(len); 8429 pop(zlen); 8430 pop(len); 8431 pop(zlen); 8432 8433 // Fifth loop 8434 // Shift z left 1 bit. 8435 lshift_by_1(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4); 8436 8437 // z[zlen-1] |= x[len-1] & 1; 8438 movl(tmp3, Address(x, len, Address::times_4, -4)); 8439 andl(tmp3, 1); 8440 orl(Address(z, zlen, Address::times_4, -4), tmp3); 8441 8442 pop(tmp5); 8443 pop(tmp4); 8444 pop(tmp3); 8445 pop(tmp2); 8446 pop(tmp1); 8447 } 8448 8449 /** 8450 * Helper function for mul_add() 8451 * Multiply the in[] by int k and add to out[] starting at offset offs using 8452 * 128 bit by 32 bit multiply and return the carry in tmp5. 8453 * Only quad int aligned length of in[] is operated on in this function. 8454 * k is in rdxReg for BMI2Instructions, for others it is in tmp2. 8455 * This function preserves out, in and k registers. 8456 * len and offset point to the appropriate index in "in" & "out" correspondingly 8457 * tmp5 has the carry. 8458 * other registers are temporary and are modified. 8459 * 8460 */ 8461 void MacroAssembler::mul_add_128_x_32_loop(Register out, Register in, 8462 Register offset, Register len, Register tmp1, Register tmp2, Register tmp3, 8463 Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 8464 8465 Label L_first_loop, L_first_loop_exit; 8466 8467 movl(tmp1, len); 8468 shrl(tmp1, 2); 8469 8470 bind(L_first_loop); 8471 subl(tmp1, 1); 8472 jccb(Assembler::negative, L_first_loop_exit); 8473 8474 subl(len, 4); 8475 subl(offset, 4); 8476 8477 Register op2 = tmp2; 8478 const Register sum = tmp3; 8479 const Register op1 = tmp4; 8480 const Register carry = tmp5; 8481 8482 if (UseBMI2Instructions) { 8483 op2 = rdxReg; 8484 } 8485 8486 movq(op1, Address(in, len, Address::times_4, 8)); 8487 rorq(op1, 32); 8488 movq(sum, Address(out, offset, Address::times_4, 8)); 8489 rorq(sum, 32); 8490 if (UseBMI2Instructions) { 8491 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg); 8492 } 8493 else { 8494 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 8495 } 8496 // Store back in big endian from little endian 8497 rorq(sum, 0x20); 8498 movq(Address(out, offset, Address::times_4, 8), sum); 8499 8500 movq(op1, Address(in, len, Address::times_4, 0)); 8501 rorq(op1, 32); 8502 movq(sum, Address(out, offset, Address::times_4, 0)); 8503 rorq(sum, 32); 8504 if (UseBMI2Instructions) { 8505 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg); 8506 } 8507 else { 8508 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 8509 } 8510 // Store back in big endian from little endian 8511 rorq(sum, 0x20); 8512 movq(Address(out, offset, Address::times_4, 0), sum); 8513 8514 jmp(L_first_loop); 8515 bind(L_first_loop_exit); 8516 } 8517 8518 /** 8519 * Code for BigInteger::mulAdd() intrinsic 8520 * 8521 * rdi: out 8522 * rsi: in 8523 * r11: offs (out.length - offset) 8524 * rcx: len 8525 * r8: k 8526 * r12: tmp1 8527 * r13: tmp2 8528 * r14: tmp3 8529 * r15: tmp4 8530 * rbx: tmp5 8531 * Multiply the in[] by word k and add to out[], return the carry in rax 8532 */ 8533 void MacroAssembler::mul_add(Register out, Register in, Register offs, 8534 Register len, Register k, Register tmp1, Register tmp2, Register tmp3, 8535 Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 8536 8537 Label L_carry, L_last_in, L_done; 8538 8539 // carry = 0; 8540 // for (int j=len-1; j >= 0; j--) { 8541 // long product = (in[j] & LONG_MASK) * kLong + 8542 // (out[offs] & LONG_MASK) + carry; 8543 // out[offs--] = (int)product; 8544 // carry = product >>> 32; 8545 // } 8546 // 8547 push(tmp1); 8548 push(tmp2); 8549 push(tmp3); 8550 push(tmp4); 8551 push(tmp5); 8552 8553 Register op2 = tmp2; 8554 const Register sum = tmp3; 8555 const Register op1 = tmp4; 8556 const Register carry = tmp5; 8557 8558 if (UseBMI2Instructions) { 8559 op2 = rdxReg; 8560 movl(op2, k); 8561 } 8562 else { 8563 movl(op2, k); 8564 } 8565 8566 xorq(carry, carry); 8567 8568 //First loop 8569 8570 //Multiply in[] by k in a 4 way unrolled loop using 128 bit by 32 bit multiply 8571 //The carry is in tmp5 8572 mul_add_128_x_32_loop(out, in, offs, len, tmp1, tmp2, tmp3, tmp4, tmp5, rdxReg, raxReg); 8573 8574 //Multiply the trailing in[] entry using 64 bit by 32 bit, if any 8575 decrementl(len); 8576 jccb(Assembler::negative, L_carry); 8577 decrementl(len); 8578 jccb(Assembler::negative, L_last_in); 8579 8580 movq(op1, Address(in, len, Address::times_4, 0)); 8581 rorq(op1, 32); 8582 8583 subl(offs, 2); 8584 movq(sum, Address(out, offs, Address::times_4, 0)); 8585 rorq(sum, 32); 8586 8587 if (UseBMI2Instructions) { 8588 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg); 8589 } 8590 else { 8591 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 8592 } 8593 8594 // Store back in big endian from little endian 8595 rorq(sum, 0x20); 8596 movq(Address(out, offs, Address::times_4, 0), sum); 8597 8598 testl(len, len); 8599 jccb(Assembler::zero, L_carry); 8600 8601 //Multiply the last in[] entry, if any 8602 bind(L_last_in); 8603 movl(op1, Address(in, 0)); 8604 movl(sum, Address(out, offs, Address::times_4, -4)); 8605 8606 movl(raxReg, k); 8607 mull(op1); //tmp4 * eax -> edx:eax 8608 addl(sum, carry); 8609 adcl(rdxReg, 0); 8610 addl(sum, raxReg); 8611 adcl(rdxReg, 0); 8612 movl(carry, rdxReg); 8613 8614 movl(Address(out, offs, Address::times_4, -4), sum); 8615 8616 bind(L_carry); 8617 //return tmp5/carry as carry in rax 8618 movl(rax, carry); 8619 8620 bind(L_done); 8621 pop(tmp5); 8622 pop(tmp4); 8623 pop(tmp3); 8624 pop(tmp2); 8625 pop(tmp1); 8626 } 8627 #endif 8628 8629 /** 8630 * Emits code to update CRC-32 with a byte value according to constants in table 8631 * 8632 * @param [in,out]crc Register containing the crc. 8633 * @param [in]val Register containing the byte to fold into the CRC. 8634 * @param [in]table Register containing the table of crc constants. 8635 * 8636 * uint32_t crc; 8637 * val = crc_table[(val ^ crc) & 0xFF]; 8638 * crc = val ^ (crc >> 8); 8639 * 8640 */ 8641 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 8642 xorl(val, crc); 8643 andl(val, 0xFF); 8644 shrl(crc, 8); // unsigned shift 8645 xorl(crc, Address(table, val, Address::times_4, 0)); 8646 } 8647 8648 /** 8649 * Fold 128-bit data chunk 8650 */ 8651 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) { 8652 if (UseAVX > 0) { 8653 vpclmulhdq(xtmp, xK, xcrc); // [123:64] 8654 vpclmulldq(xcrc, xK, xcrc); // [63:0] 8655 vpxor(xcrc, xcrc, Address(buf, offset), 0 /* vector_len */); 8656 pxor(xcrc, xtmp); 8657 } else { 8658 movdqa(xtmp, xcrc); 8659 pclmulhdq(xtmp, xK); // [123:64] 8660 pclmulldq(xcrc, xK); // [63:0] 8661 pxor(xcrc, xtmp); 8662 movdqu(xtmp, Address(buf, offset)); 8663 pxor(xcrc, xtmp); 8664 } 8665 } 8666 8667 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf) { 8668 if (UseAVX > 0) { 8669 vpclmulhdq(xtmp, xK, xcrc); 8670 vpclmulldq(xcrc, xK, xcrc); 8671 pxor(xcrc, xbuf); 8672 pxor(xcrc, xtmp); 8673 } else { 8674 movdqa(xtmp, xcrc); 8675 pclmulhdq(xtmp, xK); 8676 pclmulldq(xcrc, xK); 8677 pxor(xcrc, xbuf); 8678 pxor(xcrc, xtmp); 8679 } 8680 } 8681 8682 /** 8683 * 8-bit folds to compute 32-bit CRC 8684 * 8685 * uint64_t xcrc; 8686 * timesXtoThe32[xcrc & 0xFF] ^ (xcrc >> 8); 8687 */ 8688 void MacroAssembler::fold_8bit_crc32(XMMRegister xcrc, Register table, XMMRegister xtmp, Register tmp) { 8689 movdl(tmp, xcrc); 8690 andl(tmp, 0xFF); 8691 movdl(xtmp, Address(table, tmp, Address::times_4, 0)); 8692 psrldq(xcrc, 1); // unsigned shift one byte 8693 pxor(xcrc, xtmp); 8694 } 8695 8696 /** 8697 * uint32_t crc; 8698 * timesXtoThe32[crc & 0xFF] ^ (crc >> 8); 8699 */ 8700 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) { 8701 movl(tmp, crc); 8702 andl(tmp, 0xFF); 8703 shrl(crc, 8); 8704 xorl(crc, Address(table, tmp, Address::times_4, 0)); 8705 } 8706 8707 /** 8708 * @param crc register containing existing CRC (32-bit) 8709 * @param buf register pointing to input byte buffer (byte*) 8710 * @param len register containing number of bytes 8711 * @param table register that will contain address of CRC table 8712 * @param tmp scratch register 8713 */ 8714 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp) { 8715 assert_different_registers(crc, buf, len, table, tmp, rax); 8716 8717 Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned; 8718 Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop; 8719 8720 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 8721 // context for the registers used, where all instructions below are using 128-bit mode 8722 // On EVEX without VL and BW, these instructions will all be AVX. 8723 lea(table, ExternalAddress(StubRoutines::crc_table_addr())); 8724 notl(crc); // ~crc 8725 cmpl(len, 16); 8726 jcc(Assembler::less, L_tail); 8727 8728 // Align buffer to 16 bytes 8729 movl(tmp, buf); 8730 andl(tmp, 0xF); 8731 jccb(Assembler::zero, L_aligned); 8732 subl(tmp, 16); 8733 addl(len, tmp); 8734 8735 align(4); 8736 BIND(L_align_loop); 8737 movsbl(rax, Address(buf, 0)); // load byte with sign extension 8738 update_byte_crc32(crc, rax, table); 8739 increment(buf); 8740 incrementl(tmp); 8741 jccb(Assembler::less, L_align_loop); 8742 8743 BIND(L_aligned); 8744 movl(tmp, len); // save 8745 shrl(len, 4); 8746 jcc(Assembler::zero, L_tail_restore); 8747 8748 // Fold crc into first bytes of vector 8749 movdqa(xmm1, Address(buf, 0)); 8750 movdl(rax, xmm1); 8751 xorl(crc, rax); 8752 if (VM_Version::supports_sse4_1()) { 8753 pinsrd(xmm1, crc, 0); 8754 } else { 8755 pinsrw(xmm1, crc, 0); 8756 shrl(crc, 16); 8757 pinsrw(xmm1, crc, 1); 8758 } 8759 addptr(buf, 16); 8760 subl(len, 4); // len > 0 8761 jcc(Assembler::less, L_fold_tail); 8762 8763 movdqa(xmm2, Address(buf, 0)); 8764 movdqa(xmm3, Address(buf, 16)); 8765 movdqa(xmm4, Address(buf, 32)); 8766 addptr(buf, 48); 8767 subl(len, 3); 8768 jcc(Assembler::lessEqual, L_fold_512b); 8769 8770 // Fold total 512 bits of polynomial on each iteration, 8771 // 128 bits per each of 4 parallel streams. 8772 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 32), rscratch1); 8773 8774 align32(); 8775 BIND(L_fold_512b_loop); 8776 fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0); 8777 fold_128bit_crc32(xmm2, xmm0, xmm5, buf, 16); 8778 fold_128bit_crc32(xmm3, xmm0, xmm5, buf, 32); 8779 fold_128bit_crc32(xmm4, xmm0, xmm5, buf, 48); 8780 addptr(buf, 64); 8781 subl(len, 4); 8782 jcc(Assembler::greater, L_fold_512b_loop); 8783 8784 // Fold 512 bits to 128 bits. 8785 BIND(L_fold_512b); 8786 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16), rscratch1); 8787 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm2); 8788 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm3); 8789 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm4); 8790 8791 // Fold the rest of 128 bits data chunks 8792 BIND(L_fold_tail); 8793 addl(len, 3); 8794 jccb(Assembler::lessEqual, L_fold_128b); 8795 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16), rscratch1); 8796 8797 BIND(L_fold_tail_loop); 8798 fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0); 8799 addptr(buf, 16); 8800 decrementl(len); 8801 jccb(Assembler::greater, L_fold_tail_loop); 8802 8803 // Fold 128 bits in xmm1 down into 32 bits in crc register. 8804 BIND(L_fold_128b); 8805 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr()), rscratch1); 8806 if (UseAVX > 0) { 8807 vpclmulqdq(xmm2, xmm0, xmm1, 0x1); 8808 vpand(xmm3, xmm0, xmm2, 0 /* vector_len */); 8809 vpclmulqdq(xmm0, xmm0, xmm3, 0x1); 8810 } else { 8811 movdqa(xmm2, xmm0); 8812 pclmulqdq(xmm2, xmm1, 0x1); 8813 movdqa(xmm3, xmm0); 8814 pand(xmm3, xmm2); 8815 pclmulqdq(xmm0, xmm3, 0x1); 8816 } 8817 psrldq(xmm1, 8); 8818 psrldq(xmm2, 4); 8819 pxor(xmm0, xmm1); 8820 pxor(xmm0, xmm2); 8821 8822 // 8 8-bit folds to compute 32-bit CRC. 8823 for (int j = 0; j < 4; j++) { 8824 fold_8bit_crc32(xmm0, table, xmm1, rax); 8825 } 8826 movdl(crc, xmm0); // mov 32 bits to general register 8827 for (int j = 0; j < 4; j++) { 8828 fold_8bit_crc32(crc, table, rax); 8829 } 8830 8831 BIND(L_tail_restore); 8832 movl(len, tmp); // restore 8833 BIND(L_tail); 8834 andl(len, 0xf); 8835 jccb(Assembler::zero, L_exit); 8836 8837 // Fold the rest of bytes 8838 align(4); 8839 BIND(L_tail_loop); 8840 movsbl(rax, Address(buf, 0)); // load byte with sign extension 8841 update_byte_crc32(crc, rax, table); 8842 increment(buf); 8843 decrementl(len); 8844 jccb(Assembler::greater, L_tail_loop); 8845 8846 BIND(L_exit); 8847 notl(crc); // ~c 8848 } 8849 8850 #ifdef _LP64 8851 // Helper function for AVX 512 CRC32 8852 // Fold 512-bit data chunks 8853 void MacroAssembler::fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, 8854 Register pos, int offset) { 8855 evmovdquq(xmm3, Address(buf, pos, Address::times_1, offset), Assembler::AVX_512bit); 8856 evpclmulqdq(xtmp, xcrc, xK, 0x10, Assembler::AVX_512bit); // [123:64] 8857 evpclmulqdq(xmm2, xcrc, xK, 0x01, Assembler::AVX_512bit); // [63:0] 8858 evpxorq(xcrc, xtmp, xmm2, Assembler::AVX_512bit /* vector_len */); 8859 evpxorq(xcrc, xcrc, xmm3, Assembler::AVX_512bit /* vector_len */); 8860 } 8861 8862 // Helper function for AVX 512 CRC32 8863 // Compute CRC32 for < 256B buffers 8864 void MacroAssembler::kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register table, Register pos, 8865 Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop, 8866 Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup) { 8867 8868 Label L_less_than_32, L_exact_16_left, L_less_than_16_left; 8869 Label L_less_than_8_left, L_less_than_4_left, L_less_than_2_left, L_zero_left; 8870 Label L_only_less_than_4, L_only_less_than_3, L_only_less_than_2; 8871 8872 // check if there is enough buffer to be able to fold 16B at a time 8873 cmpl(len, 32); 8874 jcc(Assembler::less, L_less_than_32); 8875 8876 // if there is, load the constants 8877 movdqu(xmm10, Address(table, 1 * 16)); //rk1 and rk2 in xmm10 8878 movdl(xmm0, crc); // get the initial crc value 8879 movdqu(xmm7, Address(buf, pos, Address::times_1, 0 * 16)); //load the plaintext 8880 pxor(xmm7, xmm0); 8881 8882 // update the buffer pointer 8883 addl(pos, 16); 8884 //update the counter.subtract 32 instead of 16 to save one instruction from the loop 8885 subl(len, 32); 8886 jmp(L_16B_reduction_loop); 8887 8888 bind(L_less_than_32); 8889 //mov initial crc to the return value. this is necessary for zero - length buffers. 8890 movl(rax, crc); 8891 testl(len, len); 8892 jcc(Assembler::equal, L_cleanup); 8893 8894 movdl(xmm0, crc); //get the initial crc value 8895 8896 cmpl(len, 16); 8897 jcc(Assembler::equal, L_exact_16_left); 8898 jcc(Assembler::less, L_less_than_16_left); 8899 8900 movdqu(xmm7, Address(buf, pos, Address::times_1, 0 * 16)); //load the plaintext 8901 pxor(xmm7, xmm0); //xor the initial crc value 8902 addl(pos, 16); 8903 subl(len, 16); 8904 movdqu(xmm10, Address(table, 1 * 16)); // rk1 and rk2 in xmm10 8905 jmp(L_get_last_two_xmms); 8906 8907 bind(L_less_than_16_left); 8908 //use stack space to load data less than 16 bytes, zero - out the 16B in memory first. 8909 pxor(xmm1, xmm1); 8910 movptr(tmp1, rsp); 8911 movdqu(Address(tmp1, 0 * 16), xmm1); 8912 8913 cmpl(len, 4); 8914 jcc(Assembler::less, L_only_less_than_4); 8915 8916 //backup the counter value 8917 movl(tmp2, len); 8918 cmpl(len, 8); 8919 jcc(Assembler::less, L_less_than_8_left); 8920 8921 //load 8 Bytes 8922 movq(rax, Address(buf, pos, Address::times_1, 0 * 16)); 8923 movq(Address(tmp1, 0 * 16), rax); 8924 addptr(tmp1, 8); 8925 subl(len, 8); 8926 addl(pos, 8); 8927 8928 bind(L_less_than_8_left); 8929 cmpl(len, 4); 8930 jcc(Assembler::less, L_less_than_4_left); 8931 8932 //load 4 Bytes 8933 movl(rax, Address(buf, pos, Address::times_1, 0)); 8934 movl(Address(tmp1, 0 * 16), rax); 8935 addptr(tmp1, 4); 8936 subl(len, 4); 8937 addl(pos, 4); 8938 8939 bind(L_less_than_4_left); 8940 cmpl(len, 2); 8941 jcc(Assembler::less, L_less_than_2_left); 8942 8943 // load 2 Bytes 8944 movw(rax, Address(buf, pos, Address::times_1, 0)); 8945 movl(Address(tmp1, 0 * 16), rax); 8946 addptr(tmp1, 2); 8947 subl(len, 2); 8948 addl(pos, 2); 8949 8950 bind(L_less_than_2_left); 8951 cmpl(len, 1); 8952 jcc(Assembler::less, L_zero_left); 8953 8954 // load 1 Byte 8955 movb(rax, Address(buf, pos, Address::times_1, 0)); 8956 movb(Address(tmp1, 0 * 16), rax); 8957 8958 bind(L_zero_left); 8959 movdqu(xmm7, Address(rsp, 0)); 8960 pxor(xmm7, xmm0); //xor the initial crc value 8961 8962 lea(rax, ExternalAddress(StubRoutines::x86::shuf_table_crc32_avx512_addr())); 8963 movdqu(xmm0, Address(rax, tmp2)); 8964 pshufb(xmm7, xmm0); 8965 jmp(L_128_done); 8966 8967 bind(L_exact_16_left); 8968 movdqu(xmm7, Address(buf, pos, Address::times_1, 0)); 8969 pxor(xmm7, xmm0); //xor the initial crc value 8970 jmp(L_128_done); 8971 8972 bind(L_only_less_than_4); 8973 cmpl(len, 3); 8974 jcc(Assembler::less, L_only_less_than_3); 8975 8976 // load 3 Bytes 8977 movb(rax, Address(buf, pos, Address::times_1, 0)); 8978 movb(Address(tmp1, 0), rax); 8979 8980 movb(rax, Address(buf, pos, Address::times_1, 1)); 8981 movb(Address(tmp1, 1), rax); 8982 8983 movb(rax, Address(buf, pos, Address::times_1, 2)); 8984 movb(Address(tmp1, 2), rax); 8985 8986 movdqu(xmm7, Address(rsp, 0)); 8987 pxor(xmm7, xmm0); //xor the initial crc value 8988 8989 pslldq(xmm7, 0x5); 8990 jmp(L_barrett); 8991 bind(L_only_less_than_3); 8992 cmpl(len, 2); 8993 jcc(Assembler::less, L_only_less_than_2); 8994 8995 // load 2 Bytes 8996 movb(rax, Address(buf, pos, Address::times_1, 0)); 8997 movb(Address(tmp1, 0), rax); 8998 8999 movb(rax, Address(buf, pos, Address::times_1, 1)); 9000 movb(Address(tmp1, 1), rax); 9001 9002 movdqu(xmm7, Address(rsp, 0)); 9003 pxor(xmm7, xmm0); //xor the initial crc value 9004 9005 pslldq(xmm7, 0x6); 9006 jmp(L_barrett); 9007 9008 bind(L_only_less_than_2); 9009 //load 1 Byte 9010 movb(rax, Address(buf, pos, Address::times_1, 0)); 9011 movb(Address(tmp1, 0), rax); 9012 9013 movdqu(xmm7, Address(rsp, 0)); 9014 pxor(xmm7, xmm0); //xor the initial crc value 9015 9016 pslldq(xmm7, 0x7); 9017 } 9018 9019 /** 9020 * Compute CRC32 using AVX512 instructions 9021 * param crc register containing existing CRC (32-bit) 9022 * param buf register pointing to input byte buffer (byte*) 9023 * param len register containing number of bytes 9024 * param table address of crc or crc32c table 9025 * param tmp1 scratch register 9026 * param tmp2 scratch register 9027 * return rax result register 9028 * 9029 * This routine is identical for crc32c with the exception of the precomputed constant 9030 * table which will be passed as the table argument. The calculation steps are 9031 * the same for both variants. 9032 */ 9033 void MacroAssembler::kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2) { 9034 assert_different_registers(crc, buf, len, table, tmp1, tmp2, rax, r12); 9035 9036 Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned; 9037 Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop; 9038 Label L_less_than_256, L_fold_128_B_loop, L_fold_256_B_loop; 9039 Label L_fold_128_B_register, L_final_reduction_for_128, L_16B_reduction_loop; 9040 Label L_128_done, L_get_last_two_xmms, L_barrett, L_cleanup; 9041 9042 const Register pos = r12; 9043 push(r12); 9044 subptr(rsp, 16 * 2 + 8); 9045 9046 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 9047 // context for the registers used, where all instructions below are using 128-bit mode 9048 // On EVEX without VL and BW, these instructions will all be AVX. 9049 movl(pos, 0); 9050 9051 // check if smaller than 256B 9052 cmpl(len, 256); 9053 jcc(Assembler::less, L_less_than_256); 9054 9055 // load the initial crc value 9056 movdl(xmm10, crc); 9057 9058 // receive the initial 64B data, xor the initial crc value 9059 evmovdquq(xmm0, Address(buf, pos, Address::times_1, 0 * 64), Assembler::AVX_512bit); 9060 evmovdquq(xmm4, Address(buf, pos, Address::times_1, 1 * 64), Assembler::AVX_512bit); 9061 evpxorq(xmm0, xmm0, xmm10, Assembler::AVX_512bit); 9062 evbroadcasti32x4(xmm10, Address(table, 2 * 16), Assembler::AVX_512bit); //zmm10 has rk3 and rk4 9063 9064 subl(len, 256); 9065 cmpl(len, 256); 9066 jcc(Assembler::less, L_fold_128_B_loop); 9067 9068 evmovdquq(xmm7, Address(buf, pos, Address::times_1, 2 * 64), Assembler::AVX_512bit); 9069 evmovdquq(xmm8, Address(buf, pos, Address::times_1, 3 * 64), Assembler::AVX_512bit); 9070 evbroadcasti32x4(xmm16, Address(table, 0 * 16), Assembler::AVX_512bit); //zmm16 has rk-1 and rk-2 9071 subl(len, 256); 9072 9073 bind(L_fold_256_B_loop); 9074 addl(pos, 256); 9075 fold512bit_crc32_avx512(xmm0, xmm16, xmm1, buf, pos, 0 * 64); 9076 fold512bit_crc32_avx512(xmm4, xmm16, xmm1, buf, pos, 1 * 64); 9077 fold512bit_crc32_avx512(xmm7, xmm16, xmm1, buf, pos, 2 * 64); 9078 fold512bit_crc32_avx512(xmm8, xmm16, xmm1, buf, pos, 3 * 64); 9079 9080 subl(len, 256); 9081 jcc(Assembler::greaterEqual, L_fold_256_B_loop); 9082 9083 // Fold 256 into 128 9084 addl(pos, 256); 9085 evpclmulqdq(xmm1, xmm0, xmm10, 0x01, Assembler::AVX_512bit); 9086 evpclmulqdq(xmm2, xmm0, xmm10, 0x10, Assembler::AVX_512bit); 9087 vpternlogq(xmm7, 0x96, xmm1, xmm2, Assembler::AVX_512bit); // xor ABC 9088 9089 evpclmulqdq(xmm5, xmm4, xmm10, 0x01, Assembler::AVX_512bit); 9090 evpclmulqdq(xmm6, xmm4, xmm10, 0x10, Assembler::AVX_512bit); 9091 vpternlogq(xmm8, 0x96, xmm5, xmm6, Assembler::AVX_512bit); // xor ABC 9092 9093 evmovdquq(xmm0, xmm7, Assembler::AVX_512bit); 9094 evmovdquq(xmm4, xmm8, Assembler::AVX_512bit); 9095 9096 addl(len, 128); 9097 jmp(L_fold_128_B_register); 9098 9099 // at this section of the code, there is 128 * x + y(0 <= y<128) bytes of buffer.The fold_128_B_loop 9100 // loop will fold 128B at a time until we have 128 + y Bytes of buffer 9101 9102 // fold 128B at a time.This section of the code folds 8 xmm registers in parallel 9103 bind(L_fold_128_B_loop); 9104 addl(pos, 128); 9105 fold512bit_crc32_avx512(xmm0, xmm10, xmm1, buf, pos, 0 * 64); 9106 fold512bit_crc32_avx512(xmm4, xmm10, xmm1, buf, pos, 1 * 64); 9107 9108 subl(len, 128); 9109 jcc(Assembler::greaterEqual, L_fold_128_B_loop); 9110 9111 addl(pos, 128); 9112 9113 // at this point, the buffer pointer is pointing at the last y Bytes of the buffer, where 0 <= y < 128 9114 // the 128B of folded data is in 8 of the xmm registers : xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7 9115 bind(L_fold_128_B_register); 9116 evmovdquq(xmm16, Address(table, 5 * 16), Assembler::AVX_512bit); // multiply by rk9-rk16 9117 evmovdquq(xmm11, Address(table, 9 * 16), Assembler::AVX_512bit); // multiply by rk17-rk20, rk1,rk2, 0,0 9118 evpclmulqdq(xmm1, xmm0, xmm16, 0x01, Assembler::AVX_512bit); 9119 evpclmulqdq(xmm2, xmm0, xmm16, 0x10, Assembler::AVX_512bit); 9120 // save last that has no multiplicand 9121 vextracti64x2(xmm7, xmm4, 3); 9122 9123 evpclmulqdq(xmm5, xmm4, xmm11, 0x01, Assembler::AVX_512bit); 9124 evpclmulqdq(xmm6, xmm4, xmm11, 0x10, Assembler::AVX_512bit); 9125 // Needed later in reduction loop 9126 movdqu(xmm10, Address(table, 1 * 16)); 9127 vpternlogq(xmm1, 0x96, xmm2, xmm5, Assembler::AVX_512bit); // xor ABC 9128 vpternlogq(xmm1, 0x96, xmm6, xmm7, Assembler::AVX_512bit); // xor ABC 9129 9130 // Swap 1,0,3,2 - 01 00 11 10 9131 evshufi64x2(xmm8, xmm1, xmm1, 0x4e, Assembler::AVX_512bit); 9132 evpxorq(xmm8, xmm8, xmm1, Assembler::AVX_256bit); 9133 vextracti128(xmm5, xmm8, 1); 9134 evpxorq(xmm7, xmm5, xmm8, Assembler::AVX_128bit); 9135 9136 // instead of 128, we add 128 - 16 to the loop counter to save 1 instruction from the loop 9137 // instead of a cmp instruction, we use the negative flag with the jl instruction 9138 addl(len, 128 - 16); 9139 jcc(Assembler::less, L_final_reduction_for_128); 9140 9141 bind(L_16B_reduction_loop); 9142 vpclmulqdq(xmm8, xmm7, xmm10, 0x01); 9143 vpclmulqdq(xmm7, xmm7, xmm10, 0x10); 9144 vpxor(xmm7, xmm7, xmm8, Assembler::AVX_128bit); 9145 movdqu(xmm0, Address(buf, pos, Address::times_1, 0 * 16)); 9146 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 9147 addl(pos, 16); 9148 subl(len, 16); 9149 jcc(Assembler::greaterEqual, L_16B_reduction_loop); 9150 9151 bind(L_final_reduction_for_128); 9152 addl(len, 16); 9153 jcc(Assembler::equal, L_128_done); 9154 9155 bind(L_get_last_two_xmms); 9156 movdqu(xmm2, xmm7); 9157 addl(pos, len); 9158 movdqu(xmm1, Address(buf, pos, Address::times_1, -16)); 9159 subl(pos, len); 9160 9161 // get rid of the extra data that was loaded before 9162 // load the shift constant 9163 lea(rax, ExternalAddress(StubRoutines::x86::shuf_table_crc32_avx512_addr())); 9164 movdqu(xmm0, Address(rax, len)); 9165 addl(rax, len); 9166 9167 vpshufb(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 9168 //Change mask to 512 9169 vpxor(xmm0, xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr() + 2 * 16), Assembler::AVX_128bit, tmp2); 9170 vpshufb(xmm2, xmm2, xmm0, Assembler::AVX_128bit); 9171 9172 blendvpb(xmm2, xmm2, xmm1, xmm0, Assembler::AVX_128bit); 9173 vpclmulqdq(xmm8, xmm7, xmm10, 0x01); 9174 vpclmulqdq(xmm7, xmm7, xmm10, 0x10); 9175 vpxor(xmm7, xmm7, xmm8, Assembler::AVX_128bit); 9176 vpxor(xmm7, xmm7, xmm2, Assembler::AVX_128bit); 9177 9178 bind(L_128_done); 9179 // compute crc of a 128-bit value 9180 movdqu(xmm10, Address(table, 3 * 16)); 9181 movdqu(xmm0, xmm7); 9182 9183 // 64b fold 9184 vpclmulqdq(xmm7, xmm7, xmm10, 0x0); 9185 vpsrldq(xmm0, xmm0, 0x8, Assembler::AVX_128bit); 9186 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 9187 9188 // 32b fold 9189 movdqu(xmm0, xmm7); 9190 vpslldq(xmm7, xmm7, 0x4, Assembler::AVX_128bit); 9191 vpclmulqdq(xmm7, xmm7, xmm10, 0x10); 9192 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 9193 jmp(L_barrett); 9194 9195 bind(L_less_than_256); 9196 kernel_crc32_avx512_256B(crc, buf, len, table, pos, tmp1, tmp2, L_barrett, L_16B_reduction_loop, L_get_last_two_xmms, L_128_done, L_cleanup); 9197 9198 //barrett reduction 9199 bind(L_barrett); 9200 vpand(xmm7, xmm7, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr() + 1 * 16), Assembler::AVX_128bit, tmp2); 9201 movdqu(xmm1, xmm7); 9202 movdqu(xmm2, xmm7); 9203 movdqu(xmm10, Address(table, 4 * 16)); 9204 9205 pclmulqdq(xmm7, xmm10, 0x0); 9206 pxor(xmm7, xmm2); 9207 vpand(xmm7, xmm7, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr()), Assembler::AVX_128bit, tmp2); 9208 movdqu(xmm2, xmm7); 9209 pclmulqdq(xmm7, xmm10, 0x10); 9210 pxor(xmm7, xmm2); 9211 pxor(xmm7, xmm1); 9212 pextrd(crc, xmm7, 2); 9213 9214 bind(L_cleanup); 9215 addptr(rsp, 16 * 2 + 8); 9216 pop(r12); 9217 } 9218 9219 // S. Gueron / Information Processing Letters 112 (2012) 184 9220 // Algorithm 4: Computing carry-less multiplication using a precomputed lookup table. 9221 // Input: A 32 bit value B = [byte3, byte2, byte1, byte0]. 9222 // Output: the 64-bit carry-less product of B * CONST 9223 void MacroAssembler::crc32c_ipl_alg4(Register in, uint32_t n, 9224 Register tmp1, Register tmp2, Register tmp3) { 9225 lea(tmp3, ExternalAddress(StubRoutines::crc32c_table_addr())); 9226 if (n > 0) { 9227 addq(tmp3, n * 256 * 8); 9228 } 9229 // Q1 = TABLEExt[n][B & 0xFF]; 9230 movl(tmp1, in); 9231 andl(tmp1, 0x000000FF); 9232 shll(tmp1, 3); 9233 addq(tmp1, tmp3); 9234 movq(tmp1, Address(tmp1, 0)); 9235 9236 // Q2 = TABLEExt[n][B >> 8 & 0xFF]; 9237 movl(tmp2, in); 9238 shrl(tmp2, 8); 9239 andl(tmp2, 0x000000FF); 9240 shll(tmp2, 3); 9241 addq(tmp2, tmp3); 9242 movq(tmp2, Address(tmp2, 0)); 9243 9244 shlq(tmp2, 8); 9245 xorq(tmp1, tmp2); 9246 9247 // Q3 = TABLEExt[n][B >> 16 & 0xFF]; 9248 movl(tmp2, in); 9249 shrl(tmp2, 16); 9250 andl(tmp2, 0x000000FF); 9251 shll(tmp2, 3); 9252 addq(tmp2, tmp3); 9253 movq(tmp2, Address(tmp2, 0)); 9254 9255 shlq(tmp2, 16); 9256 xorq(tmp1, tmp2); 9257 9258 // Q4 = TABLEExt[n][B >> 24 & 0xFF]; 9259 shrl(in, 24); 9260 andl(in, 0x000000FF); 9261 shll(in, 3); 9262 addq(in, tmp3); 9263 movq(in, Address(in, 0)); 9264 9265 shlq(in, 24); 9266 xorq(in, tmp1); 9267 // return Q1 ^ Q2 << 8 ^ Q3 << 16 ^ Q4 << 24; 9268 } 9269 9270 void MacroAssembler::crc32c_pclmulqdq(XMMRegister w_xtmp1, 9271 Register in_out, 9272 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, 9273 XMMRegister w_xtmp2, 9274 Register tmp1, 9275 Register n_tmp2, Register n_tmp3) { 9276 if (is_pclmulqdq_supported) { 9277 movdl(w_xtmp1, in_out); // modified blindly 9278 9279 movl(tmp1, const_or_pre_comp_const_index); 9280 movdl(w_xtmp2, tmp1); 9281 pclmulqdq(w_xtmp1, w_xtmp2, 0); 9282 9283 movdq(in_out, w_xtmp1); 9284 } else { 9285 crc32c_ipl_alg4(in_out, const_or_pre_comp_const_index, tmp1, n_tmp2, n_tmp3); 9286 } 9287 } 9288 9289 // Recombination Alternative 2: No bit-reflections 9290 // T1 = (CRC_A * U1) << 1 9291 // T2 = (CRC_B * U2) << 1 9292 // C1 = T1 >> 32 9293 // C2 = T2 >> 32 9294 // T1 = T1 & 0xFFFFFFFF 9295 // T2 = T2 & 0xFFFFFFFF 9296 // T1 = CRC32(0, T1) 9297 // T2 = CRC32(0, T2) 9298 // C1 = C1 ^ T1 9299 // C2 = C2 ^ T2 9300 // CRC = C1 ^ C2 ^ CRC_C 9301 void MacroAssembler::crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, 9302 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 9303 Register tmp1, Register tmp2, 9304 Register n_tmp3) { 9305 crc32c_pclmulqdq(w_xtmp1, in_out, const_or_pre_comp_const_index_u1, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 9306 crc32c_pclmulqdq(w_xtmp2, in1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 9307 shlq(in_out, 1); 9308 movl(tmp1, in_out); 9309 shrq(in_out, 32); 9310 xorl(tmp2, tmp2); 9311 crc32(tmp2, tmp1, 4); 9312 xorl(in_out, tmp2); // we don't care about upper 32 bit contents here 9313 shlq(in1, 1); 9314 movl(tmp1, in1); 9315 shrq(in1, 32); 9316 xorl(tmp2, tmp2); 9317 crc32(tmp2, tmp1, 4); 9318 xorl(in1, tmp2); 9319 xorl(in_out, in1); 9320 xorl(in_out, in2); 9321 } 9322 9323 // Set N to predefined value 9324 // Subtract from a length of a buffer 9325 // execute in a loop: 9326 // CRC_A = 0xFFFFFFFF, CRC_B = 0, CRC_C = 0 9327 // for i = 1 to N do 9328 // CRC_A = CRC32(CRC_A, A[i]) 9329 // CRC_B = CRC32(CRC_B, B[i]) 9330 // CRC_C = CRC32(CRC_C, C[i]) 9331 // end for 9332 // Recombine 9333 void MacroAssembler::crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, 9334 Register in_out1, Register in_out2, Register in_out3, 9335 Register tmp1, Register tmp2, Register tmp3, 9336 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 9337 Register tmp4, Register tmp5, 9338 Register n_tmp6) { 9339 Label L_processPartitions; 9340 Label L_processPartition; 9341 Label L_exit; 9342 9343 bind(L_processPartitions); 9344 cmpl(in_out1, 3 * size); 9345 jcc(Assembler::less, L_exit); 9346 xorl(tmp1, tmp1); 9347 xorl(tmp2, tmp2); 9348 movq(tmp3, in_out2); 9349 addq(tmp3, size); 9350 9351 bind(L_processPartition); 9352 crc32(in_out3, Address(in_out2, 0), 8); 9353 crc32(tmp1, Address(in_out2, size), 8); 9354 crc32(tmp2, Address(in_out2, size * 2), 8); 9355 addq(in_out2, 8); 9356 cmpq(in_out2, tmp3); 9357 jcc(Assembler::less, L_processPartition); 9358 crc32c_rec_alt2(const_or_pre_comp_const_index_u1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, in_out3, tmp1, tmp2, 9359 w_xtmp1, w_xtmp2, w_xtmp3, 9360 tmp4, tmp5, 9361 n_tmp6); 9362 addq(in_out2, 2 * size); 9363 subl(in_out1, 3 * size); 9364 jmp(L_processPartitions); 9365 9366 bind(L_exit); 9367 } 9368 #else 9369 void MacroAssembler::crc32c_ipl_alg4(Register in_out, uint32_t n, 9370 Register tmp1, Register tmp2, Register tmp3, 9371 XMMRegister xtmp1, XMMRegister xtmp2) { 9372 lea(tmp3, ExternalAddress(StubRoutines::crc32c_table_addr())); 9373 if (n > 0) { 9374 addl(tmp3, n * 256 * 8); 9375 } 9376 // Q1 = TABLEExt[n][B & 0xFF]; 9377 movl(tmp1, in_out); 9378 andl(tmp1, 0x000000FF); 9379 shll(tmp1, 3); 9380 addl(tmp1, tmp3); 9381 movq(xtmp1, Address(tmp1, 0)); 9382 9383 // Q2 = TABLEExt[n][B >> 8 & 0xFF]; 9384 movl(tmp2, in_out); 9385 shrl(tmp2, 8); 9386 andl(tmp2, 0x000000FF); 9387 shll(tmp2, 3); 9388 addl(tmp2, tmp3); 9389 movq(xtmp2, Address(tmp2, 0)); 9390 9391 psllq(xtmp2, 8); 9392 pxor(xtmp1, xtmp2); 9393 9394 // Q3 = TABLEExt[n][B >> 16 & 0xFF]; 9395 movl(tmp2, in_out); 9396 shrl(tmp2, 16); 9397 andl(tmp2, 0x000000FF); 9398 shll(tmp2, 3); 9399 addl(tmp2, tmp3); 9400 movq(xtmp2, Address(tmp2, 0)); 9401 9402 psllq(xtmp2, 16); 9403 pxor(xtmp1, xtmp2); 9404 9405 // Q4 = TABLEExt[n][B >> 24 & 0xFF]; 9406 shrl(in_out, 24); 9407 andl(in_out, 0x000000FF); 9408 shll(in_out, 3); 9409 addl(in_out, tmp3); 9410 movq(xtmp2, Address(in_out, 0)); 9411 9412 psllq(xtmp2, 24); 9413 pxor(xtmp1, xtmp2); // Result in CXMM 9414 // return Q1 ^ Q2 << 8 ^ Q3 << 16 ^ Q4 << 24; 9415 } 9416 9417 void MacroAssembler::crc32c_pclmulqdq(XMMRegister w_xtmp1, 9418 Register in_out, 9419 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, 9420 XMMRegister w_xtmp2, 9421 Register tmp1, 9422 Register n_tmp2, Register n_tmp3) { 9423 if (is_pclmulqdq_supported) { 9424 movdl(w_xtmp1, in_out); 9425 9426 movl(tmp1, const_or_pre_comp_const_index); 9427 movdl(w_xtmp2, tmp1); 9428 pclmulqdq(w_xtmp1, w_xtmp2, 0); 9429 // Keep result in XMM since GPR is 32 bit in length 9430 } else { 9431 crc32c_ipl_alg4(in_out, const_or_pre_comp_const_index, tmp1, n_tmp2, n_tmp3, w_xtmp1, w_xtmp2); 9432 } 9433 } 9434 9435 void MacroAssembler::crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, 9436 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 9437 Register tmp1, Register tmp2, 9438 Register n_tmp3) { 9439 crc32c_pclmulqdq(w_xtmp1, in_out, const_or_pre_comp_const_index_u1, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 9440 crc32c_pclmulqdq(w_xtmp2, in1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 9441 9442 psllq(w_xtmp1, 1); 9443 movdl(tmp1, w_xtmp1); 9444 psrlq(w_xtmp1, 32); 9445 movdl(in_out, w_xtmp1); 9446 9447 xorl(tmp2, tmp2); 9448 crc32(tmp2, tmp1, 4); 9449 xorl(in_out, tmp2); 9450 9451 psllq(w_xtmp2, 1); 9452 movdl(tmp1, w_xtmp2); 9453 psrlq(w_xtmp2, 32); 9454 movdl(in1, w_xtmp2); 9455 9456 xorl(tmp2, tmp2); 9457 crc32(tmp2, tmp1, 4); 9458 xorl(in1, tmp2); 9459 xorl(in_out, in1); 9460 xorl(in_out, in2); 9461 } 9462 9463 void MacroAssembler::crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, 9464 Register in_out1, Register in_out2, Register in_out3, 9465 Register tmp1, Register tmp2, Register tmp3, 9466 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 9467 Register tmp4, Register tmp5, 9468 Register n_tmp6) { 9469 Label L_processPartitions; 9470 Label L_processPartition; 9471 Label L_exit; 9472 9473 bind(L_processPartitions); 9474 cmpl(in_out1, 3 * size); 9475 jcc(Assembler::less, L_exit); 9476 xorl(tmp1, tmp1); 9477 xorl(tmp2, tmp2); 9478 movl(tmp3, in_out2); 9479 addl(tmp3, size); 9480 9481 bind(L_processPartition); 9482 crc32(in_out3, Address(in_out2, 0), 4); 9483 crc32(tmp1, Address(in_out2, size), 4); 9484 crc32(tmp2, Address(in_out2, size*2), 4); 9485 crc32(in_out3, Address(in_out2, 0+4), 4); 9486 crc32(tmp1, Address(in_out2, size+4), 4); 9487 crc32(tmp2, Address(in_out2, size*2+4), 4); 9488 addl(in_out2, 8); 9489 cmpl(in_out2, tmp3); 9490 jcc(Assembler::less, L_processPartition); 9491 9492 push(tmp3); 9493 push(in_out1); 9494 push(in_out2); 9495 tmp4 = tmp3; 9496 tmp5 = in_out1; 9497 n_tmp6 = in_out2; 9498 9499 crc32c_rec_alt2(const_or_pre_comp_const_index_u1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, in_out3, tmp1, tmp2, 9500 w_xtmp1, w_xtmp2, w_xtmp3, 9501 tmp4, tmp5, 9502 n_tmp6); 9503 9504 pop(in_out2); 9505 pop(in_out1); 9506 pop(tmp3); 9507 9508 addl(in_out2, 2 * size); 9509 subl(in_out1, 3 * size); 9510 jmp(L_processPartitions); 9511 9512 bind(L_exit); 9513 } 9514 #endif //LP64 9515 9516 #ifdef _LP64 9517 // Algorithm 2: Pipelined usage of the CRC32 instruction. 9518 // Input: A buffer I of L bytes. 9519 // Output: the CRC32C value of the buffer. 9520 // Notations: 9521 // Write L = 24N + r, with N = floor (L/24). 9522 // r = L mod 24 (0 <= r < 24). 9523 // Consider I as the concatenation of A|B|C|R, where A, B, C, each, 9524 // N quadwords, and R consists of r bytes. 9525 // A[j] = I [8j+7:8j], j= 0, 1, ..., N-1 9526 // B[j] = I [N + 8j+7:N + 8j], j= 0, 1, ..., N-1 9527 // C[j] = I [2N + 8j+7:2N + 8j], j= 0, 1, ..., N-1 9528 // if r > 0 R[j] = I [3N +j], j= 0, 1, ...,r-1 9529 void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, 9530 Register tmp1, Register tmp2, Register tmp3, 9531 Register tmp4, Register tmp5, Register tmp6, 9532 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 9533 bool is_pclmulqdq_supported) { 9534 uint32_t const_or_pre_comp_const_index[CRC32C_NUM_PRECOMPUTED_CONSTANTS]; 9535 Label L_wordByWord; 9536 Label L_byteByByteProlog; 9537 Label L_byteByByte; 9538 Label L_exit; 9539 9540 if (is_pclmulqdq_supported ) { 9541 const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::_crc32c_table_addr; 9542 const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::_crc32c_table_addr+1); 9543 9544 const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 2); 9545 const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 3); 9546 9547 const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 4); 9548 const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 5); 9549 assert((CRC32C_NUM_PRECOMPUTED_CONSTANTS - 1 ) == 5, "Checking whether you declared all of the constants based on the number of \"chunks\""); 9550 } else { 9551 const_or_pre_comp_const_index[0] = 1; 9552 const_or_pre_comp_const_index[1] = 0; 9553 9554 const_or_pre_comp_const_index[2] = 3; 9555 const_or_pre_comp_const_index[3] = 2; 9556 9557 const_or_pre_comp_const_index[4] = 5; 9558 const_or_pre_comp_const_index[5] = 4; 9559 } 9560 crc32c_proc_chunk(CRC32C_HIGH, const_or_pre_comp_const_index[0], const_or_pre_comp_const_index[1], is_pclmulqdq_supported, 9561 in2, in1, in_out, 9562 tmp1, tmp2, tmp3, 9563 w_xtmp1, w_xtmp2, w_xtmp3, 9564 tmp4, tmp5, 9565 tmp6); 9566 crc32c_proc_chunk(CRC32C_MIDDLE, const_or_pre_comp_const_index[2], const_or_pre_comp_const_index[3], is_pclmulqdq_supported, 9567 in2, in1, in_out, 9568 tmp1, tmp2, tmp3, 9569 w_xtmp1, w_xtmp2, w_xtmp3, 9570 tmp4, tmp5, 9571 tmp6); 9572 crc32c_proc_chunk(CRC32C_LOW, const_or_pre_comp_const_index[4], const_or_pre_comp_const_index[5], is_pclmulqdq_supported, 9573 in2, in1, in_out, 9574 tmp1, tmp2, tmp3, 9575 w_xtmp1, w_xtmp2, w_xtmp3, 9576 tmp4, tmp5, 9577 tmp6); 9578 movl(tmp1, in2); 9579 andl(tmp1, 0x00000007); 9580 negl(tmp1); 9581 addl(tmp1, in2); 9582 addq(tmp1, in1); 9583 9584 cmpq(in1, tmp1); 9585 jccb(Assembler::greaterEqual, L_byteByByteProlog); 9586 align(16); 9587 BIND(L_wordByWord); 9588 crc32(in_out, Address(in1, 0), 8); 9589 addq(in1, 8); 9590 cmpq(in1, tmp1); 9591 jcc(Assembler::less, L_wordByWord); 9592 9593 BIND(L_byteByByteProlog); 9594 andl(in2, 0x00000007); 9595 movl(tmp2, 1); 9596 9597 cmpl(tmp2, in2); 9598 jccb(Assembler::greater, L_exit); 9599 BIND(L_byteByByte); 9600 crc32(in_out, Address(in1, 0), 1); 9601 incq(in1); 9602 incl(tmp2); 9603 cmpl(tmp2, in2); 9604 jcc(Assembler::lessEqual, L_byteByByte); 9605 9606 BIND(L_exit); 9607 } 9608 #else 9609 void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, 9610 Register tmp1, Register tmp2, Register tmp3, 9611 Register tmp4, Register tmp5, Register tmp6, 9612 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 9613 bool is_pclmulqdq_supported) { 9614 uint32_t const_or_pre_comp_const_index[CRC32C_NUM_PRECOMPUTED_CONSTANTS]; 9615 Label L_wordByWord; 9616 Label L_byteByByteProlog; 9617 Label L_byteByByte; 9618 Label L_exit; 9619 9620 if (is_pclmulqdq_supported) { 9621 const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::_crc32c_table_addr; 9622 const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 1); 9623 9624 const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 2); 9625 const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 3); 9626 9627 const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 4); 9628 const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 5); 9629 } else { 9630 const_or_pre_comp_const_index[0] = 1; 9631 const_or_pre_comp_const_index[1] = 0; 9632 9633 const_or_pre_comp_const_index[2] = 3; 9634 const_or_pre_comp_const_index[3] = 2; 9635 9636 const_or_pre_comp_const_index[4] = 5; 9637 const_or_pre_comp_const_index[5] = 4; 9638 } 9639 crc32c_proc_chunk(CRC32C_HIGH, const_or_pre_comp_const_index[0], const_or_pre_comp_const_index[1], is_pclmulqdq_supported, 9640 in2, in1, in_out, 9641 tmp1, tmp2, tmp3, 9642 w_xtmp1, w_xtmp2, w_xtmp3, 9643 tmp4, tmp5, 9644 tmp6); 9645 crc32c_proc_chunk(CRC32C_MIDDLE, const_or_pre_comp_const_index[2], const_or_pre_comp_const_index[3], is_pclmulqdq_supported, 9646 in2, in1, in_out, 9647 tmp1, tmp2, tmp3, 9648 w_xtmp1, w_xtmp2, w_xtmp3, 9649 tmp4, tmp5, 9650 tmp6); 9651 crc32c_proc_chunk(CRC32C_LOW, const_or_pre_comp_const_index[4], const_or_pre_comp_const_index[5], is_pclmulqdq_supported, 9652 in2, in1, in_out, 9653 tmp1, tmp2, tmp3, 9654 w_xtmp1, w_xtmp2, w_xtmp3, 9655 tmp4, tmp5, 9656 tmp6); 9657 movl(tmp1, in2); 9658 andl(tmp1, 0x00000007); 9659 negl(tmp1); 9660 addl(tmp1, in2); 9661 addl(tmp1, in1); 9662 9663 BIND(L_wordByWord); 9664 cmpl(in1, tmp1); 9665 jcc(Assembler::greaterEqual, L_byteByByteProlog); 9666 crc32(in_out, Address(in1,0), 4); 9667 addl(in1, 4); 9668 jmp(L_wordByWord); 9669 9670 BIND(L_byteByByteProlog); 9671 andl(in2, 0x00000007); 9672 movl(tmp2, 1); 9673 9674 BIND(L_byteByByte); 9675 cmpl(tmp2, in2); 9676 jccb(Assembler::greater, L_exit); 9677 movb(tmp1, Address(in1, 0)); 9678 crc32(in_out, tmp1, 1); 9679 incl(in1); 9680 incl(tmp2); 9681 jmp(L_byteByByte); 9682 9683 BIND(L_exit); 9684 } 9685 #endif // LP64 9686 #undef BIND 9687 #undef BLOCK_COMMENT 9688 9689 // Compress char[] array to byte[]. 9690 // Intrinsic for java.lang.StringUTF16.compress(char[] src, int srcOff, byte[] dst, int dstOff, int len) 9691 // Return the array length if every element in array can be encoded, 9692 // otherwise, the index of first non-latin1 (> 0xff) character. 9693 // @IntrinsicCandidate 9694 // public static int compress(char[] src, int srcOff, byte[] dst, int dstOff, int len) { 9695 // for (int i = 0; i < len; i++) { 9696 // char c = src[srcOff]; 9697 // if (c > 0xff) { 9698 // return i; // return index of non-latin1 char 9699 // } 9700 // dst[dstOff] = (byte)c; 9701 // srcOff++; 9702 // dstOff++; 9703 // } 9704 // return len; 9705 // } 9706 void MacroAssembler::char_array_compress(Register src, Register dst, Register len, 9707 XMMRegister tmp1Reg, XMMRegister tmp2Reg, 9708 XMMRegister tmp3Reg, XMMRegister tmp4Reg, 9709 Register tmp5, Register result, KRegister mask1, KRegister mask2) { 9710 Label copy_chars_loop, done, reset_sp, copy_tail; 9711 9712 // rsi: src 9713 // rdi: dst 9714 // rdx: len 9715 // rcx: tmp5 9716 // rax: result 9717 9718 // rsi holds start addr of source char[] to be compressed 9719 // rdi holds start addr of destination byte[] 9720 // rdx holds length 9721 9722 assert(len != result, ""); 9723 9724 // save length for return 9725 movl(result, len); 9726 9727 if ((AVX3Threshold == 0) && (UseAVX > 2) && // AVX512 9728 VM_Version::supports_avx512vlbw() && 9729 VM_Version::supports_bmi2()) { 9730 9731 Label copy_32_loop, copy_loop_tail, below_threshold, reset_for_copy_tail; 9732 9733 // alignment 9734 Label post_alignment; 9735 9736 // if length of the string is less than 32, handle it the old fashioned way 9737 testl(len, -32); 9738 jcc(Assembler::zero, below_threshold); 9739 9740 // First check whether a character is compressible ( <= 0xFF). 9741 // Create mask to test for Unicode chars inside zmm vector 9742 movl(tmp5, 0x00FF); 9743 evpbroadcastw(tmp2Reg, tmp5, Assembler::AVX_512bit); 9744 9745 testl(len, -64); 9746 jccb(Assembler::zero, post_alignment); 9747 9748 movl(tmp5, dst); 9749 andl(tmp5, (32 - 1)); 9750 negl(tmp5); 9751 andl(tmp5, (32 - 1)); 9752 9753 // bail out when there is nothing to be done 9754 testl(tmp5, 0xFFFFFFFF); 9755 jccb(Assembler::zero, post_alignment); 9756 9757 // ~(~0 << len), where len is the # of remaining elements to process 9758 movl(len, 0xFFFFFFFF); 9759 shlxl(len, len, tmp5); 9760 notl(len); 9761 kmovdl(mask2, len); 9762 movl(len, result); 9763 9764 evmovdquw(tmp1Reg, mask2, Address(src, 0), /*merge*/ false, Assembler::AVX_512bit); 9765 evpcmpw(mask1, mask2, tmp1Reg, tmp2Reg, Assembler::le, /*signed*/ false, Assembler::AVX_512bit); 9766 ktestd(mask1, mask2); 9767 jcc(Assembler::carryClear, copy_tail); 9768 9769 evpmovwb(Address(dst, 0), mask2, tmp1Reg, Assembler::AVX_512bit); 9770 9771 addptr(src, tmp5); 9772 addptr(src, tmp5); 9773 addptr(dst, tmp5); 9774 subl(len, tmp5); 9775 9776 bind(post_alignment); 9777 // end of alignment 9778 9779 movl(tmp5, len); 9780 andl(tmp5, (32 - 1)); // tail count (in chars) 9781 andl(len, ~(32 - 1)); // vector count (in chars) 9782 jccb(Assembler::zero, copy_loop_tail); 9783 9784 lea(src, Address(src, len, Address::times_2)); 9785 lea(dst, Address(dst, len, Address::times_1)); 9786 negptr(len); 9787 9788 bind(copy_32_loop); 9789 evmovdquw(tmp1Reg, Address(src, len, Address::times_2), Assembler::AVX_512bit); 9790 evpcmpuw(mask1, tmp1Reg, tmp2Reg, Assembler::le, Assembler::AVX_512bit); 9791 kortestdl(mask1, mask1); 9792 jccb(Assembler::carryClear, reset_for_copy_tail); 9793 9794 // All elements in current processed chunk are valid candidates for 9795 // compression. Write a truncated byte elements to the memory. 9796 evpmovwb(Address(dst, len, Address::times_1), tmp1Reg, Assembler::AVX_512bit); 9797 addptr(len, 32); 9798 jccb(Assembler::notZero, copy_32_loop); 9799 9800 bind(copy_loop_tail); 9801 // bail out when there is nothing to be done 9802 testl(tmp5, 0xFFFFFFFF); 9803 jcc(Assembler::zero, done); 9804 9805 movl(len, tmp5); 9806 9807 // ~(~0 << len), where len is the # of remaining elements to process 9808 movl(tmp5, 0xFFFFFFFF); 9809 shlxl(tmp5, tmp5, len); 9810 notl(tmp5); 9811 9812 kmovdl(mask2, tmp5); 9813 9814 evmovdquw(tmp1Reg, mask2, Address(src, 0), /*merge*/ false, Assembler::AVX_512bit); 9815 evpcmpw(mask1, mask2, tmp1Reg, tmp2Reg, Assembler::le, /*signed*/ false, Assembler::AVX_512bit); 9816 ktestd(mask1, mask2); 9817 jcc(Assembler::carryClear, copy_tail); 9818 9819 evpmovwb(Address(dst, 0), mask2, tmp1Reg, Assembler::AVX_512bit); 9820 jmp(done); 9821 9822 bind(reset_for_copy_tail); 9823 lea(src, Address(src, tmp5, Address::times_2)); 9824 lea(dst, Address(dst, tmp5, Address::times_1)); 9825 subptr(len, tmp5); 9826 jmp(copy_chars_loop); 9827 9828 bind(below_threshold); 9829 } 9830 9831 if (UseSSE42Intrinsics) { 9832 Label copy_32_loop, copy_16, copy_tail_sse, reset_for_copy_tail; 9833 9834 // vectored compression 9835 testl(len, 0xfffffff8); 9836 jcc(Assembler::zero, copy_tail); 9837 9838 movl(tmp5, 0xff00ff00); // create mask to test for Unicode chars in vectors 9839 movdl(tmp1Reg, tmp5); 9840 pshufd(tmp1Reg, tmp1Reg, 0); // store Unicode mask in tmp1Reg 9841 9842 andl(len, 0xfffffff0); 9843 jccb(Assembler::zero, copy_16); 9844 9845 // compress 16 chars per iter 9846 pxor(tmp4Reg, tmp4Reg); 9847 9848 lea(src, Address(src, len, Address::times_2)); 9849 lea(dst, Address(dst, len, Address::times_1)); 9850 negptr(len); 9851 9852 bind(copy_32_loop); 9853 movdqu(tmp2Reg, Address(src, len, Address::times_2)); // load 1st 8 characters 9854 por(tmp4Reg, tmp2Reg); 9855 movdqu(tmp3Reg, Address(src, len, Address::times_2, 16)); // load next 8 characters 9856 por(tmp4Reg, tmp3Reg); 9857 ptest(tmp4Reg, tmp1Reg); // check for Unicode chars in next vector 9858 jccb(Assembler::notZero, reset_for_copy_tail); 9859 packuswb(tmp2Reg, tmp3Reg); // only ASCII chars; compress each to 1 byte 9860 movdqu(Address(dst, len, Address::times_1), tmp2Reg); 9861 addptr(len, 16); 9862 jccb(Assembler::notZero, copy_32_loop); 9863 9864 // compress next vector of 8 chars (if any) 9865 bind(copy_16); 9866 // len = 0 9867 testl(result, 0x00000008); // check if there's a block of 8 chars to compress 9868 jccb(Assembler::zero, copy_tail_sse); 9869 9870 pxor(tmp3Reg, tmp3Reg); 9871 9872 movdqu(tmp2Reg, Address(src, 0)); 9873 ptest(tmp2Reg, tmp1Reg); // check for Unicode chars in vector 9874 jccb(Assembler::notZero, reset_for_copy_tail); 9875 packuswb(tmp2Reg, tmp3Reg); // only LATIN1 chars; compress each to 1 byte 9876 movq(Address(dst, 0), tmp2Reg); 9877 addptr(src, 16); 9878 addptr(dst, 8); 9879 jmpb(copy_tail_sse); 9880 9881 bind(reset_for_copy_tail); 9882 movl(tmp5, result); 9883 andl(tmp5, 0x0000000f); 9884 lea(src, Address(src, tmp5, Address::times_2)); 9885 lea(dst, Address(dst, tmp5, Address::times_1)); 9886 subptr(len, tmp5); 9887 jmpb(copy_chars_loop); 9888 9889 bind(copy_tail_sse); 9890 movl(len, result); 9891 andl(len, 0x00000007); // tail count (in chars) 9892 } 9893 // compress 1 char per iter 9894 bind(copy_tail); 9895 testl(len, len); 9896 jccb(Assembler::zero, done); 9897 lea(src, Address(src, len, Address::times_2)); 9898 lea(dst, Address(dst, len, Address::times_1)); 9899 negptr(len); 9900 9901 bind(copy_chars_loop); 9902 load_unsigned_short(tmp5, Address(src, len, Address::times_2)); 9903 testl(tmp5, 0xff00); // check if Unicode char 9904 jccb(Assembler::notZero, reset_sp); 9905 movb(Address(dst, len, Address::times_1), tmp5); // ASCII char; compress to 1 byte 9906 increment(len); 9907 jccb(Assembler::notZero, copy_chars_loop); 9908 9909 // add len then return (len will be zero if compress succeeded, otherwise negative) 9910 bind(reset_sp); 9911 addl(result, len); 9912 9913 bind(done); 9914 } 9915 9916 // Inflate byte[] array to char[]. 9917 // ..\jdk\src\java.base\share\classes\java\lang\StringLatin1.java 9918 // @IntrinsicCandidate 9919 // private static void inflate(byte[] src, int srcOff, char[] dst, int dstOff, int len) { 9920 // for (int i = 0; i < len; i++) { 9921 // dst[dstOff++] = (char)(src[srcOff++] & 0xff); 9922 // } 9923 // } 9924 void MacroAssembler::byte_array_inflate(Register src, Register dst, Register len, 9925 XMMRegister tmp1, Register tmp2, KRegister mask) { 9926 Label copy_chars_loop, done, below_threshold, avx3_threshold; 9927 // rsi: src 9928 // rdi: dst 9929 // rdx: len 9930 // rcx: tmp2 9931 9932 // rsi holds start addr of source byte[] to be inflated 9933 // rdi holds start addr of destination char[] 9934 // rdx holds length 9935 assert_different_registers(src, dst, len, tmp2); 9936 movl(tmp2, len); 9937 if ((UseAVX > 2) && // AVX512 9938 VM_Version::supports_avx512vlbw() && 9939 VM_Version::supports_bmi2()) { 9940 9941 Label copy_32_loop, copy_tail; 9942 Register tmp3_aliased = len; 9943 9944 // if length of the string is less than 16, handle it in an old fashioned way 9945 testl(len, -16); 9946 jcc(Assembler::zero, below_threshold); 9947 9948 testl(len, -1 * AVX3Threshold); 9949 jcc(Assembler::zero, avx3_threshold); 9950 9951 // In order to use only one arithmetic operation for the main loop we use 9952 // this pre-calculation 9953 andl(tmp2, (32 - 1)); // tail count (in chars), 32 element wide loop 9954 andl(len, -32); // vector count 9955 jccb(Assembler::zero, copy_tail); 9956 9957 lea(src, Address(src, len, Address::times_1)); 9958 lea(dst, Address(dst, len, Address::times_2)); 9959 negptr(len); 9960 9961 9962 // inflate 32 chars per iter 9963 bind(copy_32_loop); 9964 vpmovzxbw(tmp1, Address(src, len, Address::times_1), Assembler::AVX_512bit); 9965 evmovdquw(Address(dst, len, Address::times_2), tmp1, Assembler::AVX_512bit); 9966 addptr(len, 32); 9967 jcc(Assembler::notZero, copy_32_loop); 9968 9969 bind(copy_tail); 9970 // bail out when there is nothing to be done 9971 testl(tmp2, -1); // we don't destroy the contents of tmp2 here 9972 jcc(Assembler::zero, done); 9973 9974 // ~(~0 << length), where length is the # of remaining elements to process 9975 movl(tmp3_aliased, -1); 9976 shlxl(tmp3_aliased, tmp3_aliased, tmp2); 9977 notl(tmp3_aliased); 9978 kmovdl(mask, tmp3_aliased); 9979 evpmovzxbw(tmp1, mask, Address(src, 0), Assembler::AVX_512bit); 9980 evmovdquw(Address(dst, 0), mask, tmp1, /*merge*/ true, Assembler::AVX_512bit); 9981 9982 jmp(done); 9983 bind(avx3_threshold); 9984 } 9985 if (UseSSE42Intrinsics) { 9986 Label copy_16_loop, copy_8_loop, copy_bytes, copy_new_tail, copy_tail; 9987 9988 if (UseAVX > 1) { 9989 andl(tmp2, (16 - 1)); 9990 andl(len, -16); 9991 jccb(Assembler::zero, copy_new_tail); 9992 } else { 9993 andl(tmp2, 0x00000007); // tail count (in chars) 9994 andl(len, 0xfffffff8); // vector count (in chars) 9995 jccb(Assembler::zero, copy_tail); 9996 } 9997 9998 // vectored inflation 9999 lea(src, Address(src, len, Address::times_1)); 10000 lea(dst, Address(dst, len, Address::times_2)); 10001 negptr(len); 10002 10003 if (UseAVX > 1) { 10004 bind(copy_16_loop); 10005 vpmovzxbw(tmp1, Address(src, len, Address::times_1), Assembler::AVX_256bit); 10006 vmovdqu(Address(dst, len, Address::times_2), tmp1); 10007 addptr(len, 16); 10008 jcc(Assembler::notZero, copy_16_loop); 10009 10010 bind(below_threshold); 10011 bind(copy_new_tail); 10012 movl(len, tmp2); 10013 andl(tmp2, 0x00000007); 10014 andl(len, 0xFFFFFFF8); 10015 jccb(Assembler::zero, copy_tail); 10016 10017 pmovzxbw(tmp1, Address(src, 0)); 10018 movdqu(Address(dst, 0), tmp1); 10019 addptr(src, 8); 10020 addptr(dst, 2 * 8); 10021 10022 jmp(copy_tail, true); 10023 } 10024 10025 // inflate 8 chars per iter 10026 bind(copy_8_loop); 10027 pmovzxbw(tmp1, Address(src, len, Address::times_1)); // unpack to 8 words 10028 movdqu(Address(dst, len, Address::times_2), tmp1); 10029 addptr(len, 8); 10030 jcc(Assembler::notZero, copy_8_loop); 10031 10032 bind(copy_tail); 10033 movl(len, tmp2); 10034 10035 cmpl(len, 4); 10036 jccb(Assembler::less, copy_bytes); 10037 10038 movdl(tmp1, Address(src, 0)); // load 4 byte chars 10039 pmovzxbw(tmp1, tmp1); 10040 movq(Address(dst, 0), tmp1); 10041 subptr(len, 4); 10042 addptr(src, 4); 10043 addptr(dst, 8); 10044 10045 bind(copy_bytes); 10046 } else { 10047 bind(below_threshold); 10048 } 10049 10050 testl(len, len); 10051 jccb(Assembler::zero, done); 10052 lea(src, Address(src, len, Address::times_1)); 10053 lea(dst, Address(dst, len, Address::times_2)); 10054 negptr(len); 10055 10056 // inflate 1 char per iter 10057 bind(copy_chars_loop); 10058 load_unsigned_byte(tmp2, Address(src, len, Address::times_1)); // load byte char 10059 movw(Address(dst, len, Address::times_2), tmp2); // inflate byte char to word 10060 increment(len); 10061 jcc(Assembler::notZero, copy_chars_loop); 10062 10063 bind(done); 10064 } 10065 10066 10067 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, bool merge, int vector_len) { 10068 switch(type) { 10069 case T_BYTE: 10070 case T_BOOLEAN: 10071 evmovdqub(dst, kmask, src, merge, vector_len); 10072 break; 10073 case T_CHAR: 10074 case T_SHORT: 10075 evmovdquw(dst, kmask, src, merge, vector_len); 10076 break; 10077 case T_INT: 10078 case T_FLOAT: 10079 evmovdqul(dst, kmask, src, merge, vector_len); 10080 break; 10081 case T_LONG: 10082 case T_DOUBLE: 10083 evmovdquq(dst, kmask, src, merge, vector_len); 10084 break; 10085 default: 10086 fatal("Unexpected type argument %s", type2name(type)); 10087 break; 10088 } 10089 } 10090 10091 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, bool merge, int vector_len) { 10092 switch(type) { 10093 case T_BYTE: 10094 case T_BOOLEAN: 10095 evmovdqub(dst, kmask, src, merge, vector_len); 10096 break; 10097 case T_CHAR: 10098 case T_SHORT: 10099 evmovdquw(dst, kmask, src, merge, vector_len); 10100 break; 10101 case T_INT: 10102 case T_FLOAT: 10103 evmovdqul(dst, kmask, src, merge, vector_len); 10104 break; 10105 case T_LONG: 10106 case T_DOUBLE: 10107 evmovdquq(dst, kmask, src, merge, vector_len); 10108 break; 10109 default: 10110 fatal("Unexpected type argument %s", type2name(type)); 10111 break; 10112 } 10113 } 10114 10115 void MacroAssembler::knot(uint masklen, KRegister dst, KRegister src, KRegister ktmp, Register rtmp) { 10116 switch(masklen) { 10117 case 2: 10118 knotbl(dst, src); 10119 movl(rtmp, 3); 10120 kmovbl(ktmp, rtmp); 10121 kandbl(dst, ktmp, dst); 10122 break; 10123 case 4: 10124 knotbl(dst, src); 10125 movl(rtmp, 15); 10126 kmovbl(ktmp, rtmp); 10127 kandbl(dst, ktmp, dst); 10128 break; 10129 case 8: 10130 knotbl(dst, src); 10131 break; 10132 case 16: 10133 knotwl(dst, src); 10134 break; 10135 case 32: 10136 knotdl(dst, src); 10137 break; 10138 case 64: 10139 knotql(dst, src); 10140 break; 10141 default: 10142 fatal("Unexpected vector length %d", masklen); 10143 break; 10144 } 10145 } 10146 10147 void MacroAssembler::kand(BasicType type, KRegister dst, KRegister src1, KRegister src2) { 10148 switch(type) { 10149 case T_BOOLEAN: 10150 case T_BYTE: 10151 kandbl(dst, src1, src2); 10152 break; 10153 case T_CHAR: 10154 case T_SHORT: 10155 kandwl(dst, src1, src2); 10156 break; 10157 case T_INT: 10158 case T_FLOAT: 10159 kanddl(dst, src1, src2); 10160 break; 10161 case T_LONG: 10162 case T_DOUBLE: 10163 kandql(dst, src1, src2); 10164 break; 10165 default: 10166 fatal("Unexpected type argument %s", type2name(type)); 10167 break; 10168 } 10169 } 10170 10171 void MacroAssembler::kor(BasicType type, KRegister dst, KRegister src1, KRegister src2) { 10172 switch(type) { 10173 case T_BOOLEAN: 10174 case T_BYTE: 10175 korbl(dst, src1, src2); 10176 break; 10177 case T_CHAR: 10178 case T_SHORT: 10179 korwl(dst, src1, src2); 10180 break; 10181 case T_INT: 10182 case T_FLOAT: 10183 kordl(dst, src1, src2); 10184 break; 10185 case T_LONG: 10186 case T_DOUBLE: 10187 korql(dst, src1, src2); 10188 break; 10189 default: 10190 fatal("Unexpected type argument %s", type2name(type)); 10191 break; 10192 } 10193 } 10194 10195 void MacroAssembler::kxor(BasicType type, KRegister dst, KRegister src1, KRegister src2) { 10196 switch(type) { 10197 case T_BOOLEAN: 10198 case T_BYTE: 10199 kxorbl(dst, src1, src2); 10200 break; 10201 case T_CHAR: 10202 case T_SHORT: 10203 kxorwl(dst, src1, src2); 10204 break; 10205 case T_INT: 10206 case T_FLOAT: 10207 kxordl(dst, src1, src2); 10208 break; 10209 case T_LONG: 10210 case T_DOUBLE: 10211 kxorql(dst, src1, src2); 10212 break; 10213 default: 10214 fatal("Unexpected type argument %s", type2name(type)); 10215 break; 10216 } 10217 } 10218 10219 void MacroAssembler::evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 10220 switch(type) { 10221 case T_BOOLEAN: 10222 case T_BYTE: 10223 evpermb(dst, mask, nds, src, merge, vector_len); break; 10224 case T_CHAR: 10225 case T_SHORT: 10226 evpermw(dst, mask, nds, src, merge, vector_len); break; 10227 case T_INT: 10228 case T_FLOAT: 10229 evpermd(dst, mask, nds, src, merge, vector_len); break; 10230 case T_LONG: 10231 case T_DOUBLE: 10232 evpermq(dst, mask, nds, src, merge, vector_len); break; 10233 default: 10234 fatal("Unexpected type argument %s", type2name(type)); break; 10235 } 10236 } 10237 10238 void MacroAssembler::evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 10239 switch(type) { 10240 case T_BOOLEAN: 10241 case T_BYTE: 10242 evpermb(dst, mask, nds, src, merge, vector_len); break; 10243 case T_CHAR: 10244 case T_SHORT: 10245 evpermw(dst, mask, nds, src, merge, vector_len); break; 10246 case T_INT: 10247 case T_FLOAT: 10248 evpermd(dst, mask, nds, src, merge, vector_len); break; 10249 case T_LONG: 10250 case T_DOUBLE: 10251 evpermq(dst, mask, nds, src, merge, vector_len); break; 10252 default: 10253 fatal("Unexpected type argument %s", type2name(type)); break; 10254 } 10255 } 10256 10257 void MacroAssembler::evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 10258 switch(type) { 10259 case T_BYTE: 10260 evpminsb(dst, mask, nds, src, merge, vector_len); break; 10261 case T_SHORT: 10262 evpminsw(dst, mask, nds, src, merge, vector_len); break; 10263 case T_INT: 10264 evpminsd(dst, mask, nds, src, merge, vector_len); break; 10265 case T_LONG: 10266 evpminsq(dst, mask, nds, src, merge, vector_len); break; 10267 default: 10268 fatal("Unexpected type argument %s", type2name(type)); break; 10269 } 10270 } 10271 10272 void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 10273 switch(type) { 10274 case T_BYTE: 10275 evpmaxsb(dst, mask, nds, src, merge, vector_len); break; 10276 case T_SHORT: 10277 evpmaxsw(dst, mask, nds, src, merge, vector_len); break; 10278 case T_INT: 10279 evpmaxsd(dst, mask, nds, src, merge, vector_len); break; 10280 case T_LONG: 10281 evpmaxsq(dst, mask, nds, src, merge, vector_len); break; 10282 default: 10283 fatal("Unexpected type argument %s", type2name(type)); break; 10284 } 10285 } 10286 10287 void MacroAssembler::evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 10288 switch(type) { 10289 case T_BYTE: 10290 evpminsb(dst, mask, nds, src, merge, vector_len); break; 10291 case T_SHORT: 10292 evpminsw(dst, mask, nds, src, merge, vector_len); break; 10293 case T_INT: 10294 evpminsd(dst, mask, nds, src, merge, vector_len); break; 10295 case T_LONG: 10296 evpminsq(dst, mask, nds, src, merge, vector_len); break; 10297 default: 10298 fatal("Unexpected type argument %s", type2name(type)); break; 10299 } 10300 } 10301 10302 void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 10303 switch(type) { 10304 case T_BYTE: 10305 evpmaxsb(dst, mask, nds, src, merge, vector_len); break; 10306 case T_SHORT: 10307 evpmaxsw(dst, mask, nds, src, merge, vector_len); break; 10308 case T_INT: 10309 evpmaxsd(dst, mask, nds, src, merge, vector_len); break; 10310 case T_LONG: 10311 evpmaxsq(dst, mask, nds, src, merge, vector_len); break; 10312 default: 10313 fatal("Unexpected type argument %s", type2name(type)); break; 10314 } 10315 } 10316 10317 void MacroAssembler::evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 10318 switch(type) { 10319 case T_INT: 10320 evpxord(dst, mask, nds, src, merge, vector_len); break; 10321 case T_LONG: 10322 evpxorq(dst, mask, nds, src, merge, vector_len); break; 10323 default: 10324 fatal("Unexpected type argument %s", type2name(type)); break; 10325 } 10326 } 10327 10328 void MacroAssembler::evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 10329 switch(type) { 10330 case T_INT: 10331 evpxord(dst, mask, nds, src, merge, vector_len); break; 10332 case T_LONG: 10333 evpxorq(dst, mask, nds, src, merge, vector_len); break; 10334 default: 10335 fatal("Unexpected type argument %s", type2name(type)); break; 10336 } 10337 } 10338 10339 void MacroAssembler::evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 10340 switch(type) { 10341 case T_INT: 10342 Assembler::evpord(dst, mask, nds, src, merge, vector_len); break; 10343 case T_LONG: 10344 evporq(dst, mask, nds, src, merge, vector_len); break; 10345 default: 10346 fatal("Unexpected type argument %s", type2name(type)); break; 10347 } 10348 } 10349 10350 void MacroAssembler::evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 10351 switch(type) { 10352 case T_INT: 10353 Assembler::evpord(dst, mask, nds, src, merge, vector_len); break; 10354 case T_LONG: 10355 evporq(dst, mask, nds, src, merge, vector_len); break; 10356 default: 10357 fatal("Unexpected type argument %s", type2name(type)); break; 10358 } 10359 } 10360 10361 void MacroAssembler::evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 10362 switch(type) { 10363 case T_INT: 10364 evpandd(dst, mask, nds, src, merge, vector_len); break; 10365 case T_LONG: 10366 evpandq(dst, mask, nds, src, merge, vector_len); break; 10367 default: 10368 fatal("Unexpected type argument %s", type2name(type)); break; 10369 } 10370 } 10371 10372 void MacroAssembler::evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 10373 switch(type) { 10374 case T_INT: 10375 evpandd(dst, mask, nds, src, merge, vector_len); break; 10376 case T_LONG: 10377 evpandq(dst, mask, nds, src, merge, vector_len); break; 10378 default: 10379 fatal("Unexpected type argument %s", type2name(type)); break; 10380 } 10381 } 10382 10383 void MacroAssembler::kortest(uint masklen, KRegister src1, KRegister src2) { 10384 switch(masklen) { 10385 case 8: 10386 kortestbl(src1, src2); 10387 break; 10388 case 16: 10389 kortestwl(src1, src2); 10390 break; 10391 case 32: 10392 kortestdl(src1, src2); 10393 break; 10394 case 64: 10395 kortestql(src1, src2); 10396 break; 10397 default: 10398 fatal("Unexpected mask length %d", masklen); 10399 break; 10400 } 10401 } 10402 10403 10404 void MacroAssembler::ktest(uint masklen, KRegister src1, KRegister src2) { 10405 switch(masklen) { 10406 case 8: 10407 ktestbl(src1, src2); 10408 break; 10409 case 16: 10410 ktestwl(src1, src2); 10411 break; 10412 case 32: 10413 ktestdl(src1, src2); 10414 break; 10415 case 64: 10416 ktestql(src1, src2); 10417 break; 10418 default: 10419 fatal("Unexpected mask length %d", masklen); 10420 break; 10421 } 10422 } 10423 10424 void MacroAssembler::evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc) { 10425 switch(type) { 10426 case T_INT: 10427 evprold(dst, mask, src, shift, merge, vlen_enc); break; 10428 case T_LONG: 10429 evprolq(dst, mask, src, shift, merge, vlen_enc); break; 10430 default: 10431 fatal("Unexpected type argument %s", type2name(type)); break; 10432 break; 10433 } 10434 } 10435 10436 void MacroAssembler::evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc) { 10437 switch(type) { 10438 case T_INT: 10439 evprord(dst, mask, src, shift, merge, vlen_enc); break; 10440 case T_LONG: 10441 evprorq(dst, mask, src, shift, merge, vlen_enc); break; 10442 default: 10443 fatal("Unexpected type argument %s", type2name(type)); break; 10444 } 10445 } 10446 10447 void MacroAssembler::evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc) { 10448 switch(type) { 10449 case T_INT: 10450 evprolvd(dst, mask, src1, src2, merge, vlen_enc); break; 10451 case T_LONG: 10452 evprolvq(dst, mask, src1, src2, merge, vlen_enc); break; 10453 default: 10454 fatal("Unexpected type argument %s", type2name(type)); break; 10455 } 10456 } 10457 10458 void MacroAssembler::evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc) { 10459 switch(type) { 10460 case T_INT: 10461 evprorvd(dst, mask, src1, src2, merge, vlen_enc); break; 10462 case T_LONG: 10463 evprorvq(dst, mask, src1, src2, merge, vlen_enc); break; 10464 default: 10465 fatal("Unexpected type argument %s", type2name(type)); break; 10466 } 10467 } 10468 10469 void MacroAssembler::evpandq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 10470 assert(rscratch != noreg || always_reachable(src), "missing"); 10471 10472 if (reachable(src)) { 10473 evpandq(dst, nds, as_Address(src), vector_len); 10474 } else { 10475 lea(rscratch, src); 10476 evpandq(dst, nds, Address(rscratch, 0), vector_len); 10477 } 10478 } 10479 10480 void MacroAssembler::evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch) { 10481 assert(rscratch != noreg || always_reachable(src), "missing"); 10482 10483 if (reachable(src)) { 10484 Assembler::evpaddq(dst, mask, nds, as_Address(src), merge, vector_len); 10485 } else { 10486 lea(rscratch, src); 10487 Assembler::evpaddq(dst, mask, nds, Address(rscratch, 0), merge, vector_len); 10488 } 10489 } 10490 10491 void MacroAssembler::evporq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 10492 assert(rscratch != noreg || always_reachable(src), "missing"); 10493 10494 if (reachable(src)) { 10495 evporq(dst, nds, as_Address(src), vector_len); 10496 } else { 10497 lea(rscratch, src); 10498 evporq(dst, nds, Address(rscratch, 0), vector_len); 10499 } 10500 } 10501 10502 void MacroAssembler::vpshufb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 10503 assert(rscratch != noreg || always_reachable(src), "missing"); 10504 10505 if (reachable(src)) { 10506 vpshufb(dst, nds, as_Address(src), vector_len); 10507 } else { 10508 lea(rscratch, src); 10509 vpshufb(dst, nds, Address(rscratch, 0), vector_len); 10510 } 10511 } 10512 10513 void MacroAssembler::vpor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 10514 assert(rscratch != noreg || always_reachable(src), "missing"); 10515 10516 if (reachable(src)) { 10517 Assembler::vpor(dst, nds, as_Address(src), vector_len); 10518 } else { 10519 lea(rscratch, src); 10520 Assembler::vpor(dst, nds, Address(rscratch, 0), vector_len); 10521 } 10522 } 10523 10524 void MacroAssembler::vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, AddressLiteral src3, int vector_len, Register rscratch) { 10525 assert(rscratch != noreg || always_reachable(src3), "missing"); 10526 10527 if (reachable(src3)) { 10528 vpternlogq(dst, imm8, src2, as_Address(src3), vector_len); 10529 } else { 10530 lea(rscratch, src3); 10531 vpternlogq(dst, imm8, src2, Address(rscratch, 0), vector_len); 10532 } 10533 } 10534 10535 #if COMPILER2_OR_JVMCI 10536 10537 void MacroAssembler::fill_masked(BasicType bt, Address dst, XMMRegister xmm, KRegister mask, 10538 Register length, Register temp, int vec_enc) { 10539 // Computing mask for predicated vector store. 10540 movptr(temp, -1); 10541 bzhiq(temp, temp, length); 10542 kmov(mask, temp); 10543 evmovdqu(bt, mask, dst, xmm, true, vec_enc); 10544 } 10545 10546 // Set memory operation for length "less than" 64 bytes. 10547 void MacroAssembler::fill64_masked(uint shift, Register dst, int disp, 10548 XMMRegister xmm, KRegister mask, Register length, 10549 Register temp, bool use64byteVector) { 10550 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 10551 const BasicType type[] = { T_BYTE, T_SHORT, T_INT, T_LONG}; 10552 if (!use64byteVector) { 10553 fill32(dst, disp, xmm); 10554 subptr(length, 32 >> shift); 10555 fill32_masked(shift, dst, disp + 32, xmm, mask, length, temp); 10556 } else { 10557 assert(MaxVectorSize == 64, "vector length != 64"); 10558 fill_masked(type[shift], Address(dst, disp), xmm, mask, length, temp, Assembler::AVX_512bit); 10559 } 10560 } 10561 10562 10563 void MacroAssembler::fill32_masked(uint shift, Register dst, int disp, 10564 XMMRegister xmm, KRegister mask, Register length, 10565 Register temp) { 10566 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 10567 const BasicType type[] = { T_BYTE, T_SHORT, T_INT, T_LONG}; 10568 fill_masked(type[shift], Address(dst, disp), xmm, mask, length, temp, Assembler::AVX_256bit); 10569 } 10570 10571 10572 void MacroAssembler::fill32(Address dst, XMMRegister xmm) { 10573 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 10574 vmovdqu(dst, xmm); 10575 } 10576 10577 void MacroAssembler::fill32(Register dst, int disp, XMMRegister xmm) { 10578 fill32(Address(dst, disp), xmm); 10579 } 10580 10581 void MacroAssembler::fill64(Address dst, XMMRegister xmm, bool use64byteVector) { 10582 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 10583 if (!use64byteVector) { 10584 fill32(dst, xmm); 10585 fill32(dst.plus_disp(32), xmm); 10586 } else { 10587 evmovdquq(dst, xmm, Assembler::AVX_512bit); 10588 } 10589 } 10590 10591 void MacroAssembler::fill64(Register dst, int disp, XMMRegister xmm, bool use64byteVector) { 10592 fill64(Address(dst, disp), xmm, use64byteVector); 10593 } 10594 10595 #ifdef _LP64 10596 void MacroAssembler::generate_fill_avx3(BasicType type, Register to, Register value, 10597 Register count, Register rtmp, XMMRegister xtmp) { 10598 Label L_exit; 10599 Label L_fill_start; 10600 Label L_fill_64_bytes; 10601 Label L_fill_96_bytes; 10602 Label L_fill_128_bytes; 10603 Label L_fill_128_bytes_loop; 10604 Label L_fill_128_loop_header; 10605 Label L_fill_128_bytes_loop_header; 10606 Label L_fill_128_bytes_loop_pre_header; 10607 Label L_fill_zmm_sequence; 10608 10609 int shift = -1; 10610 int avx3threshold = VM_Version::avx3_threshold(); 10611 switch(type) { 10612 case T_BYTE: shift = 0; 10613 break; 10614 case T_SHORT: shift = 1; 10615 break; 10616 case T_INT: shift = 2; 10617 break; 10618 /* Uncomment when LONG fill stubs are supported. 10619 case T_LONG: shift = 3; 10620 break; 10621 */ 10622 default: 10623 fatal("Unhandled type: %s\n", type2name(type)); 10624 } 10625 10626 if ((avx3threshold != 0) || (MaxVectorSize == 32)) { 10627 10628 if (MaxVectorSize == 64) { 10629 cmpq(count, avx3threshold >> shift); 10630 jcc(Assembler::greater, L_fill_zmm_sequence); 10631 } 10632 10633 evpbroadcast(type, xtmp, value, Assembler::AVX_256bit); 10634 10635 bind(L_fill_start); 10636 10637 cmpq(count, 32 >> shift); 10638 jccb(Assembler::greater, L_fill_64_bytes); 10639 fill32_masked(shift, to, 0, xtmp, k2, count, rtmp); 10640 jmp(L_exit); 10641 10642 bind(L_fill_64_bytes); 10643 cmpq(count, 64 >> shift); 10644 jccb(Assembler::greater, L_fill_96_bytes); 10645 fill64_masked(shift, to, 0, xtmp, k2, count, rtmp); 10646 jmp(L_exit); 10647 10648 bind(L_fill_96_bytes); 10649 cmpq(count, 96 >> shift); 10650 jccb(Assembler::greater, L_fill_128_bytes); 10651 fill64(to, 0, xtmp); 10652 subq(count, 64 >> shift); 10653 fill32_masked(shift, to, 64, xtmp, k2, count, rtmp); 10654 jmp(L_exit); 10655 10656 bind(L_fill_128_bytes); 10657 cmpq(count, 128 >> shift); 10658 jccb(Assembler::greater, L_fill_128_bytes_loop_pre_header); 10659 fill64(to, 0, xtmp); 10660 fill32(to, 64, xtmp); 10661 subq(count, 96 >> shift); 10662 fill32_masked(shift, to, 96, xtmp, k2, count, rtmp); 10663 jmp(L_exit); 10664 10665 bind(L_fill_128_bytes_loop_pre_header); 10666 { 10667 mov(rtmp, to); 10668 andq(rtmp, 31); 10669 jccb(Assembler::zero, L_fill_128_bytes_loop_header); 10670 negq(rtmp); 10671 addq(rtmp, 32); 10672 mov64(r8, -1L); 10673 bzhiq(r8, r8, rtmp); 10674 kmovql(k2, r8); 10675 evmovdqu(T_BYTE, k2, Address(to, 0), xtmp, true, Assembler::AVX_256bit); 10676 addq(to, rtmp); 10677 shrq(rtmp, shift); 10678 subq(count, rtmp); 10679 } 10680 10681 cmpq(count, 128 >> shift); 10682 jcc(Assembler::less, L_fill_start); 10683 10684 bind(L_fill_128_bytes_loop_header); 10685 subq(count, 128 >> shift); 10686 10687 align32(); 10688 bind(L_fill_128_bytes_loop); 10689 fill64(to, 0, xtmp); 10690 fill64(to, 64, xtmp); 10691 addq(to, 128); 10692 subq(count, 128 >> shift); 10693 jccb(Assembler::greaterEqual, L_fill_128_bytes_loop); 10694 10695 addq(count, 128 >> shift); 10696 jcc(Assembler::zero, L_exit); 10697 jmp(L_fill_start); 10698 } 10699 10700 if (MaxVectorSize == 64) { 10701 // Sequence using 64 byte ZMM register. 10702 Label L_fill_128_bytes_zmm; 10703 Label L_fill_192_bytes_zmm; 10704 Label L_fill_192_bytes_loop_zmm; 10705 Label L_fill_192_bytes_loop_header_zmm; 10706 Label L_fill_192_bytes_loop_pre_header_zmm; 10707 Label L_fill_start_zmm_sequence; 10708 10709 bind(L_fill_zmm_sequence); 10710 evpbroadcast(type, xtmp, value, Assembler::AVX_512bit); 10711 10712 bind(L_fill_start_zmm_sequence); 10713 cmpq(count, 64 >> shift); 10714 jccb(Assembler::greater, L_fill_128_bytes_zmm); 10715 fill64_masked(shift, to, 0, xtmp, k2, count, rtmp, true); 10716 jmp(L_exit); 10717 10718 bind(L_fill_128_bytes_zmm); 10719 cmpq(count, 128 >> shift); 10720 jccb(Assembler::greater, L_fill_192_bytes_zmm); 10721 fill64(to, 0, xtmp, true); 10722 subq(count, 64 >> shift); 10723 fill64_masked(shift, to, 64, xtmp, k2, count, rtmp, true); 10724 jmp(L_exit); 10725 10726 bind(L_fill_192_bytes_zmm); 10727 cmpq(count, 192 >> shift); 10728 jccb(Assembler::greater, L_fill_192_bytes_loop_pre_header_zmm); 10729 fill64(to, 0, xtmp, true); 10730 fill64(to, 64, xtmp, true); 10731 subq(count, 128 >> shift); 10732 fill64_masked(shift, to, 128, xtmp, k2, count, rtmp, true); 10733 jmp(L_exit); 10734 10735 bind(L_fill_192_bytes_loop_pre_header_zmm); 10736 { 10737 movq(rtmp, to); 10738 andq(rtmp, 63); 10739 jccb(Assembler::zero, L_fill_192_bytes_loop_header_zmm); 10740 negq(rtmp); 10741 addq(rtmp, 64); 10742 mov64(r8, -1L); 10743 bzhiq(r8, r8, rtmp); 10744 kmovql(k2, r8); 10745 evmovdqu(T_BYTE, k2, Address(to, 0), xtmp, true, Assembler::AVX_512bit); 10746 addq(to, rtmp); 10747 shrq(rtmp, shift); 10748 subq(count, rtmp); 10749 } 10750 10751 cmpq(count, 192 >> shift); 10752 jcc(Assembler::less, L_fill_start_zmm_sequence); 10753 10754 bind(L_fill_192_bytes_loop_header_zmm); 10755 subq(count, 192 >> shift); 10756 10757 align32(); 10758 bind(L_fill_192_bytes_loop_zmm); 10759 fill64(to, 0, xtmp, true); 10760 fill64(to, 64, xtmp, true); 10761 fill64(to, 128, xtmp, true); 10762 addq(to, 192); 10763 subq(count, 192 >> shift); 10764 jccb(Assembler::greaterEqual, L_fill_192_bytes_loop_zmm); 10765 10766 addq(count, 192 >> shift); 10767 jcc(Assembler::zero, L_exit); 10768 jmp(L_fill_start_zmm_sequence); 10769 } 10770 bind(L_exit); 10771 } 10772 #endif 10773 #endif //COMPILER2_OR_JVMCI 10774 10775 10776 #ifdef _LP64 10777 void MacroAssembler::convert_f2i(Register dst, XMMRegister src) { 10778 Label done; 10779 cvttss2sil(dst, src); 10780 // Conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub 10781 cmpl(dst, 0x80000000); // float_sign_flip 10782 jccb(Assembler::notEqual, done); 10783 subptr(rsp, 8); 10784 movflt(Address(rsp, 0), src); 10785 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::f2i_fixup()))); 10786 pop(dst); 10787 bind(done); 10788 } 10789 10790 void MacroAssembler::convert_d2i(Register dst, XMMRegister src) { 10791 Label done; 10792 cvttsd2sil(dst, src); 10793 // Conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub 10794 cmpl(dst, 0x80000000); // float_sign_flip 10795 jccb(Assembler::notEqual, done); 10796 subptr(rsp, 8); 10797 movdbl(Address(rsp, 0), src); 10798 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::d2i_fixup()))); 10799 pop(dst); 10800 bind(done); 10801 } 10802 10803 void MacroAssembler::convert_f2l(Register dst, XMMRegister src) { 10804 Label done; 10805 cvttss2siq(dst, src); 10806 cmp64(dst, ExternalAddress((address) StubRoutines::x86::double_sign_flip())); 10807 jccb(Assembler::notEqual, done); 10808 subptr(rsp, 8); 10809 movflt(Address(rsp, 0), src); 10810 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::f2l_fixup()))); 10811 pop(dst); 10812 bind(done); 10813 } 10814 10815 void MacroAssembler::round_float(Register dst, XMMRegister src, Register rtmp, Register rcx) { 10816 // Following code is line by line assembly translation rounding algorithm. 10817 // Please refer to java.lang.Math.round(float) algorithm for details. 10818 const int32_t FloatConsts_EXP_BIT_MASK = 0x7F800000; 10819 const int32_t FloatConsts_SIGNIFICAND_WIDTH = 24; 10820 const int32_t FloatConsts_EXP_BIAS = 127; 10821 const int32_t FloatConsts_SIGNIF_BIT_MASK = 0x007FFFFF; 10822 const int32_t MINUS_32 = 0xFFFFFFE0; 10823 Label L_special_case, L_block1, L_exit; 10824 movl(rtmp, FloatConsts_EXP_BIT_MASK); 10825 movdl(dst, src); 10826 andl(dst, rtmp); 10827 sarl(dst, FloatConsts_SIGNIFICAND_WIDTH - 1); 10828 movl(rtmp, FloatConsts_SIGNIFICAND_WIDTH - 2 + FloatConsts_EXP_BIAS); 10829 subl(rtmp, dst); 10830 movl(rcx, rtmp); 10831 movl(dst, MINUS_32); 10832 testl(rtmp, dst); 10833 jccb(Assembler::notEqual, L_special_case); 10834 movdl(dst, src); 10835 andl(dst, FloatConsts_SIGNIF_BIT_MASK); 10836 orl(dst, FloatConsts_SIGNIF_BIT_MASK + 1); 10837 movdl(rtmp, src); 10838 testl(rtmp, rtmp); 10839 jccb(Assembler::greaterEqual, L_block1); 10840 negl(dst); 10841 bind(L_block1); 10842 sarl(dst); 10843 addl(dst, 0x1); 10844 sarl(dst, 0x1); 10845 jmp(L_exit); 10846 bind(L_special_case); 10847 convert_f2i(dst, src); 10848 bind(L_exit); 10849 } 10850 10851 void MacroAssembler::round_double(Register dst, XMMRegister src, Register rtmp, Register rcx) { 10852 // Following code is line by line assembly translation rounding algorithm. 10853 // Please refer to java.lang.Math.round(double) algorithm for details. 10854 const int64_t DoubleConsts_EXP_BIT_MASK = 0x7FF0000000000000L; 10855 const int64_t DoubleConsts_SIGNIFICAND_WIDTH = 53; 10856 const int64_t DoubleConsts_EXP_BIAS = 1023; 10857 const int64_t DoubleConsts_SIGNIF_BIT_MASK = 0x000FFFFFFFFFFFFFL; 10858 const int64_t MINUS_64 = 0xFFFFFFFFFFFFFFC0L; 10859 Label L_special_case, L_block1, L_exit; 10860 mov64(rtmp, DoubleConsts_EXP_BIT_MASK); 10861 movq(dst, src); 10862 andq(dst, rtmp); 10863 sarq(dst, DoubleConsts_SIGNIFICAND_WIDTH - 1); 10864 mov64(rtmp, DoubleConsts_SIGNIFICAND_WIDTH - 2 + DoubleConsts_EXP_BIAS); 10865 subq(rtmp, dst); 10866 movq(rcx, rtmp); 10867 mov64(dst, MINUS_64); 10868 testq(rtmp, dst); 10869 jccb(Assembler::notEqual, L_special_case); 10870 movq(dst, src); 10871 mov64(rtmp, DoubleConsts_SIGNIF_BIT_MASK); 10872 andq(dst, rtmp); 10873 mov64(rtmp, DoubleConsts_SIGNIF_BIT_MASK + 1); 10874 orq(dst, rtmp); 10875 movq(rtmp, src); 10876 testq(rtmp, rtmp); 10877 jccb(Assembler::greaterEqual, L_block1); 10878 negq(dst); 10879 bind(L_block1); 10880 sarq(dst); 10881 addq(dst, 0x1); 10882 sarq(dst, 0x1); 10883 jmp(L_exit); 10884 bind(L_special_case); 10885 convert_d2l(dst, src); 10886 bind(L_exit); 10887 } 10888 10889 void MacroAssembler::convert_d2l(Register dst, XMMRegister src) { 10890 Label done; 10891 cvttsd2siq(dst, src); 10892 cmp64(dst, ExternalAddress((address) StubRoutines::x86::double_sign_flip())); 10893 jccb(Assembler::notEqual, done); 10894 subptr(rsp, 8); 10895 movdbl(Address(rsp, 0), src); 10896 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::d2l_fixup()))); 10897 pop(dst); 10898 bind(done); 10899 } 10900 10901 void MacroAssembler::cache_wb(Address line) 10902 { 10903 // 64 bit cpus always support clflush 10904 assert(VM_Version::supports_clflush(), "clflush should be available"); 10905 bool optimized = VM_Version::supports_clflushopt(); 10906 bool no_evict = VM_Version::supports_clwb(); 10907 10908 // prefer clwb (writeback without evict) otherwise 10909 // prefer clflushopt (potentially parallel writeback with evict) 10910 // otherwise fallback on clflush (serial writeback with evict) 10911 10912 if (optimized) { 10913 if (no_evict) { 10914 clwb(line); 10915 } else { 10916 clflushopt(line); 10917 } 10918 } else { 10919 // no need for fence when using CLFLUSH 10920 clflush(line); 10921 } 10922 } 10923 10924 void MacroAssembler::cache_wbsync(bool is_pre) 10925 { 10926 assert(VM_Version::supports_clflush(), "clflush should be available"); 10927 bool optimized = VM_Version::supports_clflushopt(); 10928 bool no_evict = VM_Version::supports_clwb(); 10929 10930 // pick the correct implementation 10931 10932 if (!is_pre && (optimized || no_evict)) { 10933 // need an sfence for post flush when using clflushopt or clwb 10934 // otherwise no no need for any synchroniaztion 10935 10936 sfence(); 10937 } 10938 } 10939 10940 #endif // _LP64 10941 10942 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) { 10943 switch (cond) { 10944 // Note some conditions are synonyms for others 10945 case Assembler::zero: return Assembler::notZero; 10946 case Assembler::notZero: return Assembler::zero; 10947 case Assembler::less: return Assembler::greaterEqual; 10948 case Assembler::lessEqual: return Assembler::greater; 10949 case Assembler::greater: return Assembler::lessEqual; 10950 case Assembler::greaterEqual: return Assembler::less; 10951 case Assembler::below: return Assembler::aboveEqual; 10952 case Assembler::belowEqual: return Assembler::above; 10953 case Assembler::above: return Assembler::belowEqual; 10954 case Assembler::aboveEqual: return Assembler::below; 10955 case Assembler::overflow: return Assembler::noOverflow; 10956 case Assembler::noOverflow: return Assembler::overflow; 10957 case Assembler::negative: return Assembler::positive; 10958 case Assembler::positive: return Assembler::negative; 10959 case Assembler::parity: return Assembler::noParity; 10960 case Assembler::noParity: return Assembler::parity; 10961 } 10962 ShouldNotReachHere(); return Assembler::overflow; 10963 } 10964 10965 SkipIfEqual::SkipIfEqual( 10966 MacroAssembler* masm, const bool* flag_addr, bool value, Register rscratch) { 10967 _masm = masm; 10968 _masm->cmp8(ExternalAddress((address)flag_addr), value, rscratch); 10969 _masm->jcc(Assembler::equal, _label); 10970 } 10971 10972 SkipIfEqual::~SkipIfEqual() { 10973 _masm->bind(_label); 10974 } 10975 10976 // 32-bit Windows has its own fast-path implementation 10977 // of get_thread 10978 #if !defined(WIN32) || defined(_LP64) 10979 10980 // This is simply a call to Thread::current() 10981 void MacroAssembler::get_thread(Register thread) { 10982 if (thread != rax) { 10983 push(rax); 10984 } 10985 LP64_ONLY(push(rdi);) 10986 LP64_ONLY(push(rsi);) 10987 push(rdx); 10988 push(rcx); 10989 #ifdef _LP64 10990 push(r8); 10991 push(r9); 10992 push(r10); 10993 push(r11); 10994 #endif 10995 10996 MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, Thread::current), 0); 10997 10998 #ifdef _LP64 10999 pop(r11); 11000 pop(r10); 11001 pop(r9); 11002 pop(r8); 11003 #endif 11004 pop(rcx); 11005 pop(rdx); 11006 LP64_ONLY(pop(rsi);) 11007 LP64_ONLY(pop(rdi);) 11008 if (thread != rax) { 11009 mov(thread, rax); 11010 pop(rax); 11011 } 11012 } 11013 11014 11015 #endif // !WIN32 || _LP64 11016 11017 void MacroAssembler::check_stack_alignment(Register sp, const char* msg, unsigned bias, Register tmp) { 11018 Label L_stack_ok; 11019 if (bias == 0) { 11020 testptr(sp, 2 * wordSize - 1); 11021 } else { 11022 // lea(tmp, Address(rsp, bias); 11023 mov(tmp, sp); 11024 addptr(tmp, bias); 11025 testptr(tmp, 2 * wordSize - 1); 11026 } 11027 jcc(Assembler::equal, L_stack_ok); 11028 block_comment(msg); 11029 stop(msg); 11030 bind(L_stack_ok); 11031 } 11032 11033 // Implements lightweight-locking. 11034 // 11035 // obj: the object to be locked 11036 // reg_rax: rax 11037 // thread: the thread which attempts to lock obj 11038 // tmp: a temporary register 11039 void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Register reg_rax, Register thread, Register tmp, Label& slow) { 11040 assert(reg_rax == rax, ""); 11041 assert_different_registers(basic_lock, obj, reg_rax, thread, tmp); 11042 11043 Label push; 11044 const Register top = tmp; 11045 11046 // Preload the markWord. It is important that this is the first 11047 // instruction emitted as it is part of C1's null check semantics. 11048 movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes())); 11049 11050 if (UseObjectMonitorTable) { 11051 // Clear cache in case fast locking succeeds. 11052 movptr(Address(basic_lock, BasicObjectLock::lock_offset() + in_ByteSize((BasicLock::object_monitor_cache_offset_in_bytes()))), 0); 11053 } 11054 11055 // Load top. 11056 movl(top, Address(thread, JavaThread::lock_stack_top_offset())); 11057 11058 // Check if the lock-stack is full. 11059 cmpl(top, LockStack::end_offset()); 11060 jcc(Assembler::greaterEqual, slow); 11061 11062 // Check for recursion. 11063 cmpptr(obj, Address(thread, top, Address::times_1, -oopSize)); 11064 jcc(Assembler::equal, push); 11065 11066 // Check header for monitor (0b10). 11067 testptr(reg_rax, markWord::monitor_value); 11068 jcc(Assembler::notZero, slow); 11069 11070 // Try to lock. Transition lock bits 0b01 => 0b00 11071 movptr(tmp, reg_rax); 11072 andptr(tmp, ~(int32_t)markWord::unlocked_value); 11073 orptr(reg_rax, markWord::unlocked_value); 11074 if (EnableValhalla) { 11075 // Mask inline_type bit such that we go to the slow path if object is an inline type 11076 andptr(reg_rax, ~((int) markWord::inline_type_bit_in_place)); 11077 } 11078 lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes())); 11079 jcc(Assembler::notEqual, slow); 11080 11081 // Restore top, CAS clobbers register. 11082 movl(top, Address(thread, JavaThread::lock_stack_top_offset())); 11083 11084 bind(push); 11085 // After successful lock, push object on lock-stack. 11086 movptr(Address(thread, top), obj); 11087 incrementl(top, oopSize); 11088 movl(Address(thread, JavaThread::lock_stack_top_offset()), top); 11089 } 11090 11091 // Implements lightweight-unlocking. 11092 // 11093 // obj: the object to be unlocked 11094 // reg_rax: rax 11095 // thread: the thread 11096 // tmp: a temporary register 11097 void MacroAssembler::lightweight_unlock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow) { 11098 assert(reg_rax == rax, ""); 11099 assert_different_registers(obj, reg_rax, thread, tmp); 11100 11101 Label unlocked, push_and_slow; 11102 const Register top = tmp; 11103 11104 // Check if obj is top of lock-stack. 11105 movl(top, Address(thread, JavaThread::lock_stack_top_offset())); 11106 cmpptr(obj, Address(thread, top, Address::times_1, -oopSize)); 11107 jcc(Assembler::notEqual, slow); 11108 11109 // Pop lock-stack. 11110 DEBUG_ONLY(movptr(Address(thread, top, Address::times_1, -oopSize), 0);) 11111 subl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize); 11112 11113 // Check if recursive. 11114 cmpptr(obj, Address(thread, top, Address::times_1, -2 * oopSize)); 11115 jcc(Assembler::equal, unlocked); 11116 11117 // Not recursive. Check header for monitor (0b10). 11118 movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes())); 11119 testptr(reg_rax, markWord::monitor_value); 11120 jcc(Assembler::notZero, push_and_slow); 11121 11122 #ifdef ASSERT 11123 // Check header not unlocked (0b01). 11124 Label not_unlocked; 11125 testptr(reg_rax, markWord::unlocked_value); 11126 jcc(Assembler::zero, not_unlocked); 11127 stop("lightweight_unlock already unlocked"); 11128 bind(not_unlocked); 11129 #endif 11130 11131 // Try to unlock. Transition lock bits 0b00 => 0b01 11132 movptr(tmp, reg_rax); 11133 orptr(tmp, markWord::unlocked_value); 11134 lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes())); 11135 jcc(Assembler::equal, unlocked); 11136 11137 bind(push_and_slow); 11138 // Restore lock-stack and handle the unlock in runtime. 11139 #ifdef ASSERT 11140 movl(top, Address(thread, JavaThread::lock_stack_top_offset())); 11141 movptr(Address(thread, top), obj); 11142 #endif 11143 addl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize); 11144 jmp(slow); 11145 11146 bind(unlocked); 11147 } 11148 11149 #ifdef _LP64 11150 // Saves legacy GPRs state on stack. 11151 void MacroAssembler::save_legacy_gprs() { 11152 subq(rsp, 16 * wordSize); 11153 movq(Address(rsp, 15 * wordSize), rax); 11154 movq(Address(rsp, 14 * wordSize), rcx); 11155 movq(Address(rsp, 13 * wordSize), rdx); 11156 movq(Address(rsp, 12 * wordSize), rbx); 11157 movq(Address(rsp, 10 * wordSize), rbp); 11158 movq(Address(rsp, 9 * wordSize), rsi); 11159 movq(Address(rsp, 8 * wordSize), rdi); 11160 movq(Address(rsp, 7 * wordSize), r8); 11161 movq(Address(rsp, 6 * wordSize), r9); 11162 movq(Address(rsp, 5 * wordSize), r10); 11163 movq(Address(rsp, 4 * wordSize), r11); 11164 movq(Address(rsp, 3 * wordSize), r12); 11165 movq(Address(rsp, 2 * wordSize), r13); 11166 movq(Address(rsp, wordSize), r14); 11167 movq(Address(rsp, 0), r15); 11168 } 11169 11170 // Resotres back legacy GPRs state from stack. 11171 void MacroAssembler::restore_legacy_gprs() { 11172 movq(r15, Address(rsp, 0)); 11173 movq(r14, Address(rsp, wordSize)); 11174 movq(r13, Address(rsp, 2 * wordSize)); 11175 movq(r12, Address(rsp, 3 * wordSize)); 11176 movq(r11, Address(rsp, 4 * wordSize)); 11177 movq(r10, Address(rsp, 5 * wordSize)); 11178 movq(r9, Address(rsp, 6 * wordSize)); 11179 movq(r8, Address(rsp, 7 * wordSize)); 11180 movq(rdi, Address(rsp, 8 * wordSize)); 11181 movq(rsi, Address(rsp, 9 * wordSize)); 11182 movq(rbp, Address(rsp, 10 * wordSize)); 11183 movq(rbx, Address(rsp, 12 * wordSize)); 11184 movq(rdx, Address(rsp, 13 * wordSize)); 11185 movq(rcx, Address(rsp, 14 * wordSize)); 11186 movq(rax, Address(rsp, 15 * wordSize)); 11187 addq(rsp, 16 * wordSize); 11188 } 11189 #endif