1 /* 2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "asm/assembler.inline.hpp" 28 #include "code/compiledIC.hpp" 29 #include "compiler/compiler_globals.hpp" 30 #include "compiler/disassembler.hpp" 31 #include "ci/ciInlineKlass.hpp" 32 #include "crc32c.h" 33 #include "gc/shared/barrierSet.hpp" 34 #include "gc/shared/barrierSetAssembler.hpp" 35 #include "gc/shared/collectedHeap.inline.hpp" 36 #include "gc/shared/tlab_globals.hpp" 37 #include "interpreter/bytecodeHistogram.hpp" 38 #include "interpreter/interpreter.hpp" 39 #include "jvm.h" 40 #include "memory/resourceArea.hpp" 41 #include "memory/universe.hpp" 42 #include "oops/accessDecorators.hpp" 43 #include "oops/compressedKlass.inline.hpp" 44 #include "oops/compressedOops.inline.hpp" 45 #include "oops/klass.inline.hpp" 46 #include "oops/resolvedFieldEntry.hpp" 47 #include "prims/methodHandles.hpp" 48 #include "runtime/continuation.hpp" 49 #include "runtime/interfaceSupport.inline.hpp" 50 #include "runtime/javaThread.hpp" 51 #include "runtime/jniHandles.hpp" 52 #include "runtime/objectMonitor.hpp" 53 #include "runtime/os.hpp" 54 #include "runtime/safepoint.hpp" 55 #include "runtime/safepointMechanism.hpp" 56 #include "runtime/sharedRuntime.hpp" 57 #include "runtime/signature_cc.hpp" 58 #include "runtime/stubRoutines.hpp" 59 #include "utilities/checkedCast.hpp" 60 #include "utilities/macros.hpp" 61 #include "vmreg_x86.inline.hpp" 62 #ifdef COMPILER2 63 #include "opto/output.hpp" 64 #endif 65 66 #ifdef PRODUCT 67 #define BLOCK_COMMENT(str) /* nothing */ 68 #define STOP(error) stop(error) 69 #else 70 #define BLOCK_COMMENT(str) block_comment(str) 71 #define STOP(error) block_comment(error); stop(error) 72 #endif 73 74 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 75 76 #ifdef ASSERT 77 bool AbstractAssembler::pd_check_instruction_mark() { return true; } 78 #endif 79 80 static const Assembler::Condition reverse[] = { 81 Assembler::noOverflow /* overflow = 0x0 */ , 82 Assembler::overflow /* noOverflow = 0x1 */ , 83 Assembler::aboveEqual /* carrySet = 0x2, below = 0x2 */ , 84 Assembler::below /* aboveEqual = 0x3, carryClear = 0x3 */ , 85 Assembler::notZero /* zero = 0x4, equal = 0x4 */ , 86 Assembler::zero /* notZero = 0x5, notEqual = 0x5 */ , 87 Assembler::above /* belowEqual = 0x6 */ , 88 Assembler::belowEqual /* above = 0x7 */ , 89 Assembler::positive /* negative = 0x8 */ , 90 Assembler::negative /* positive = 0x9 */ , 91 Assembler::noParity /* parity = 0xa */ , 92 Assembler::parity /* noParity = 0xb */ , 93 Assembler::greaterEqual /* less = 0xc */ , 94 Assembler::less /* greaterEqual = 0xd */ , 95 Assembler::greater /* lessEqual = 0xe */ , 96 Assembler::lessEqual /* greater = 0xf, */ 97 98 }; 99 100 101 // Implementation of MacroAssembler 102 103 // First all the versions that have distinct versions depending on 32/64 bit 104 // Unless the difference is trivial (1 line or so). 105 106 #ifndef _LP64 107 108 // 32bit versions 109 110 Address MacroAssembler::as_Address(AddressLiteral adr) { 111 return Address(adr.target(), adr.rspec()); 112 } 113 114 Address MacroAssembler::as_Address(ArrayAddress adr, Register rscratch) { 115 assert(rscratch == noreg, ""); 116 return Address::make_array(adr); 117 } 118 119 void MacroAssembler::call_VM_leaf_base(address entry_point, 120 int number_of_arguments) { 121 call(RuntimeAddress(entry_point)); 122 increment(rsp, number_of_arguments * wordSize); 123 } 124 125 void MacroAssembler::cmpklass(Address src1, Metadata* obj) { 126 cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate()); 127 } 128 129 130 void MacroAssembler::cmpklass(Register src1, Metadata* obj) { 131 cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate()); 132 } 133 134 void MacroAssembler::cmpoop(Address src1, jobject obj) { 135 cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate()); 136 } 137 138 void MacroAssembler::cmpoop(Register src1, jobject obj, Register rscratch) { 139 assert(rscratch == noreg, "redundant"); 140 cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate()); 141 } 142 143 void MacroAssembler::extend_sign(Register hi, Register lo) { 144 // According to Intel Doc. AP-526, "Integer Divide", p.18. 145 if (VM_Version::is_P6() && hi == rdx && lo == rax) { 146 cdql(); 147 } else { 148 movl(hi, lo); 149 sarl(hi, 31); 150 } 151 } 152 153 void MacroAssembler::jC2(Register tmp, Label& L) { 154 // set parity bit if FPU flag C2 is set (via rax) 155 save_rax(tmp); 156 fwait(); fnstsw_ax(); 157 sahf(); 158 restore_rax(tmp); 159 // branch 160 jcc(Assembler::parity, L); 161 } 162 163 void MacroAssembler::jnC2(Register tmp, Label& L) { 164 // set parity bit if FPU flag C2 is set (via rax) 165 save_rax(tmp); 166 fwait(); fnstsw_ax(); 167 sahf(); 168 restore_rax(tmp); 169 // branch 170 jcc(Assembler::noParity, L); 171 } 172 173 // 32bit can do a case table jump in one instruction but we no longer allow the base 174 // to be installed in the Address class 175 void MacroAssembler::jump(ArrayAddress entry, Register rscratch) { 176 assert(rscratch == noreg, "not needed"); 177 jmp(as_Address(entry, noreg)); 178 } 179 180 // Note: y_lo will be destroyed 181 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) { 182 // Long compare for Java (semantics as described in JVM spec.) 183 Label high, low, done; 184 185 cmpl(x_hi, y_hi); 186 jcc(Assembler::less, low); 187 jcc(Assembler::greater, high); 188 // x_hi is the return register 189 xorl(x_hi, x_hi); 190 cmpl(x_lo, y_lo); 191 jcc(Assembler::below, low); 192 jcc(Assembler::equal, done); 193 194 bind(high); 195 xorl(x_hi, x_hi); 196 increment(x_hi); 197 jmp(done); 198 199 bind(low); 200 xorl(x_hi, x_hi); 201 decrementl(x_hi); 202 203 bind(done); 204 } 205 206 void MacroAssembler::lea(Register dst, AddressLiteral src) { 207 mov_literal32(dst, (int32_t)src.target(), src.rspec()); 208 } 209 210 void MacroAssembler::lea(Address dst, AddressLiteral adr, Register rscratch) { 211 assert(rscratch == noreg, "not needed"); 212 213 // leal(dst, as_Address(adr)); 214 // see note in movl as to why we must use a move 215 mov_literal32(dst, (int32_t)adr.target(), adr.rspec()); 216 } 217 218 void MacroAssembler::leave() { 219 mov(rsp, rbp); 220 pop(rbp); 221 } 222 223 void MacroAssembler::lmul(int x_rsp_offset, int y_rsp_offset) { 224 // Multiplication of two Java long values stored on the stack 225 // as illustrated below. Result is in rdx:rax. 226 // 227 // rsp ---> [ ?? ] \ \ 228 // .... | y_rsp_offset | 229 // [ y_lo ] / (in bytes) | x_rsp_offset 230 // [ y_hi ] | (in bytes) 231 // .... | 232 // [ x_lo ] / 233 // [ x_hi ] 234 // .... 235 // 236 // Basic idea: lo(result) = lo(x_lo * y_lo) 237 // hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) + lo(x_lo * y_hi) 238 Address x_hi(rsp, x_rsp_offset + wordSize); Address x_lo(rsp, x_rsp_offset); 239 Address y_hi(rsp, y_rsp_offset + wordSize); Address y_lo(rsp, y_rsp_offset); 240 Label quick; 241 // load x_hi, y_hi and check if quick 242 // multiplication is possible 243 movl(rbx, x_hi); 244 movl(rcx, y_hi); 245 movl(rax, rbx); 246 orl(rbx, rcx); // rbx, = 0 <=> x_hi = 0 and y_hi = 0 247 jcc(Assembler::zero, quick); // if rbx, = 0 do quick multiply 248 // do full multiplication 249 // 1st step 250 mull(y_lo); // x_hi * y_lo 251 movl(rbx, rax); // save lo(x_hi * y_lo) in rbx, 252 // 2nd step 253 movl(rax, x_lo); 254 mull(rcx); // x_lo * y_hi 255 addl(rbx, rax); // add lo(x_lo * y_hi) to rbx, 256 // 3rd step 257 bind(quick); // note: rbx, = 0 if quick multiply! 258 movl(rax, x_lo); 259 mull(y_lo); // x_lo * y_lo 260 addl(rdx, rbx); // correct hi(x_lo * y_lo) 261 } 262 263 void MacroAssembler::lneg(Register hi, Register lo) { 264 negl(lo); 265 adcl(hi, 0); 266 negl(hi); 267 } 268 269 void MacroAssembler::lshl(Register hi, Register lo) { 270 // Java shift left long support (semantics as described in JVM spec., p.305) 271 // (basic idea for shift counts s >= n: x << s == (x << n) << (s - n)) 272 // shift value is in rcx ! 273 assert(hi != rcx, "must not use rcx"); 274 assert(lo != rcx, "must not use rcx"); 275 const Register s = rcx; // shift count 276 const int n = BitsPerWord; 277 Label L; 278 andl(s, 0x3f); // s := s & 0x3f (s < 0x40) 279 cmpl(s, n); // if (s < n) 280 jcc(Assembler::less, L); // else (s >= n) 281 movl(hi, lo); // x := x << n 282 xorl(lo, lo); 283 // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n! 284 bind(L); // s (mod n) < n 285 shldl(hi, lo); // x := x << s 286 shll(lo); 287 } 288 289 290 void MacroAssembler::lshr(Register hi, Register lo, bool sign_extension) { 291 // Java shift right long support (semantics as described in JVM spec., p.306 & p.310) 292 // (basic idea for shift counts s >= n: x >> s == (x >> n) >> (s - n)) 293 assert(hi != rcx, "must not use rcx"); 294 assert(lo != rcx, "must not use rcx"); 295 const Register s = rcx; // shift count 296 const int n = BitsPerWord; 297 Label L; 298 andl(s, 0x3f); // s := s & 0x3f (s < 0x40) 299 cmpl(s, n); // if (s < n) 300 jcc(Assembler::less, L); // else (s >= n) 301 movl(lo, hi); // x := x >> n 302 if (sign_extension) sarl(hi, 31); 303 else xorl(hi, hi); 304 // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n! 305 bind(L); // s (mod n) < n 306 shrdl(lo, hi); // x := x >> s 307 if (sign_extension) sarl(hi); 308 else shrl(hi); 309 } 310 311 void MacroAssembler::movoop(Register dst, jobject obj) { 312 mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate()); 313 } 314 315 void MacroAssembler::movoop(Address dst, jobject obj, Register rscratch) { 316 assert(rscratch == noreg, "redundant"); 317 mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate()); 318 } 319 320 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { 321 mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate()); 322 } 323 324 void MacroAssembler::mov_metadata(Address dst, Metadata* obj, Register rscratch) { 325 assert(rscratch == noreg, "redundant"); 326 mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate()); 327 } 328 329 void MacroAssembler::movptr(Register dst, AddressLiteral src) { 330 if (src.is_lval()) { 331 mov_literal32(dst, (intptr_t)src.target(), src.rspec()); 332 } else { 333 movl(dst, as_Address(src)); 334 } 335 } 336 337 void MacroAssembler::movptr(ArrayAddress dst, Register src, Register rscratch) { 338 assert(rscratch == noreg, "redundant"); 339 movl(as_Address(dst, noreg), src); 340 } 341 342 void MacroAssembler::movptr(Register dst, ArrayAddress src) { 343 movl(dst, as_Address(src, noreg)); 344 } 345 346 void MacroAssembler::movptr(Address dst, intptr_t src, Register rscratch) { 347 assert(rscratch == noreg, "redundant"); 348 movl(dst, src); 349 } 350 351 void MacroAssembler::pushoop(jobject obj, Register rscratch) { 352 assert(rscratch == noreg, "redundant"); 353 push_literal32((int32_t)obj, oop_Relocation::spec_for_immediate()); 354 } 355 356 void MacroAssembler::pushklass(Metadata* obj, Register rscratch) { 357 assert(rscratch == noreg, "redundant"); 358 push_literal32((int32_t)obj, metadata_Relocation::spec_for_immediate()); 359 } 360 361 void MacroAssembler::pushptr(AddressLiteral src, Register rscratch) { 362 assert(rscratch == noreg, "redundant"); 363 if (src.is_lval()) { 364 push_literal32((int32_t)src.target(), src.rspec()); 365 } else { 366 pushl(as_Address(src)); 367 } 368 } 369 370 static void pass_arg0(MacroAssembler* masm, Register arg) { 371 masm->push(arg); 372 } 373 374 static void pass_arg1(MacroAssembler* masm, Register arg) { 375 masm->push(arg); 376 } 377 378 static void pass_arg2(MacroAssembler* masm, Register arg) { 379 masm->push(arg); 380 } 381 382 static void pass_arg3(MacroAssembler* masm, Register arg) { 383 masm->push(arg); 384 } 385 386 #ifndef PRODUCT 387 extern "C" void findpc(intptr_t x); 388 #endif 389 390 void MacroAssembler::debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg) { 391 // In order to get locks to work, we need to fake a in_VM state 392 JavaThread* thread = JavaThread::current(); 393 JavaThreadState saved_state = thread->thread_state(); 394 thread->set_thread_state(_thread_in_vm); 395 if (ShowMessageBoxOnError) { 396 JavaThread* thread = JavaThread::current(); 397 JavaThreadState saved_state = thread->thread_state(); 398 thread->set_thread_state(_thread_in_vm); 399 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 400 ttyLocker ttyl; 401 BytecodeCounter::print(); 402 } 403 // To see where a verify_oop failed, get $ebx+40/X for this frame. 404 // This is the value of eip which points to where verify_oop will return. 405 if (os::message_box(msg, "Execution stopped, print registers?")) { 406 print_state32(rdi, rsi, rbp, rsp, rbx, rdx, rcx, rax, eip); 407 BREAKPOINT; 408 } 409 } 410 fatal("DEBUG MESSAGE: %s", msg); 411 } 412 413 void MacroAssembler::print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip) { 414 ttyLocker ttyl; 415 DebuggingContext debugging{}; 416 tty->print_cr("eip = 0x%08x", eip); 417 #ifndef PRODUCT 418 if ((WizardMode || Verbose) && PrintMiscellaneous) { 419 tty->cr(); 420 findpc(eip); 421 tty->cr(); 422 } 423 #endif 424 #define PRINT_REG(rax) \ 425 { tty->print("%s = ", #rax); os::print_location(tty, rax); } 426 PRINT_REG(rax); 427 PRINT_REG(rbx); 428 PRINT_REG(rcx); 429 PRINT_REG(rdx); 430 PRINT_REG(rdi); 431 PRINT_REG(rsi); 432 PRINT_REG(rbp); 433 PRINT_REG(rsp); 434 #undef PRINT_REG 435 // Print some words near top of staack. 436 int* dump_sp = (int*) rsp; 437 for (int col1 = 0; col1 < 8; col1++) { 438 tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 439 os::print_location(tty, *dump_sp++); 440 } 441 for (int row = 0; row < 16; row++) { 442 tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 443 for (int col = 0; col < 8; col++) { 444 tty->print(" 0x%08x", *dump_sp++); 445 } 446 tty->cr(); 447 } 448 // Print some instructions around pc: 449 Disassembler::decode((address)eip-64, (address)eip); 450 tty->print_cr("--------"); 451 Disassembler::decode((address)eip, (address)eip+32); 452 } 453 454 void MacroAssembler::stop(const char* msg) { 455 // push address of message 456 ExternalAddress message((address)msg); 457 pushptr(message.addr(), noreg); 458 { Label L; call(L, relocInfo::none); bind(L); } // push eip 459 pusha(); // push registers 460 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32))); 461 hlt(); 462 } 463 464 void MacroAssembler::warn(const char* msg) { 465 push_CPU_state(); 466 467 // push address of message 468 ExternalAddress message((address)msg); 469 pushptr(message.addr(), noreg); 470 471 call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning))); 472 addl(rsp, wordSize); // discard argument 473 pop_CPU_state(); 474 } 475 476 void MacroAssembler::print_state() { 477 { Label L; call(L, relocInfo::none); bind(L); } // push eip 478 pusha(); // push registers 479 480 push_CPU_state(); 481 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::print_state32))); 482 pop_CPU_state(); 483 484 popa(); 485 addl(rsp, wordSize); 486 } 487 488 #else // _LP64 489 490 // 64 bit versions 491 492 Address MacroAssembler::as_Address(AddressLiteral adr) { 493 // amd64 always does this as a pc-rel 494 // we can be absolute or disp based on the instruction type 495 // jmp/call are displacements others are absolute 496 assert(!adr.is_lval(), "must be rval"); 497 assert(reachable(adr), "must be"); 498 return Address(checked_cast<int32_t>(adr.target() - pc()), adr.target(), adr.reloc()); 499 500 } 501 502 Address MacroAssembler::as_Address(ArrayAddress adr, Register rscratch) { 503 AddressLiteral base = adr.base(); 504 lea(rscratch, base); 505 Address index = adr.index(); 506 assert(index._disp == 0, "must not have disp"); // maybe it can? 507 Address array(rscratch, index._index, index._scale, index._disp); 508 return array; 509 } 510 511 void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) { 512 Label L, E; 513 514 #ifdef _WIN64 515 // Windows always allocates space for it's register args 516 assert(num_args <= 4, "only register arguments supported"); 517 subq(rsp, frame::arg_reg_save_area_bytes); 518 #endif 519 520 // Align stack if necessary 521 testl(rsp, 15); 522 jcc(Assembler::zero, L); 523 524 subq(rsp, 8); 525 call(RuntimeAddress(entry_point)); 526 addq(rsp, 8); 527 jmp(E); 528 529 bind(L); 530 call(RuntimeAddress(entry_point)); 531 532 bind(E); 533 534 #ifdef _WIN64 535 // restore stack pointer 536 addq(rsp, frame::arg_reg_save_area_bytes); 537 #endif 538 539 } 540 541 void MacroAssembler::cmp64(Register src1, AddressLiteral src2, Register rscratch) { 542 assert(!src2.is_lval(), "should use cmpptr"); 543 assert(rscratch != noreg || always_reachable(src2), "missing"); 544 545 if (reachable(src2)) { 546 cmpq(src1, as_Address(src2)); 547 } else { 548 lea(rscratch, src2); 549 Assembler::cmpq(src1, Address(rscratch, 0)); 550 } 551 } 552 553 int MacroAssembler::corrected_idivq(Register reg) { 554 // Full implementation of Java ldiv and lrem; checks for special 555 // case as described in JVM spec., p.243 & p.271. The function 556 // returns the (pc) offset of the idivl instruction - may be needed 557 // for implicit exceptions. 558 // 559 // normal case special case 560 // 561 // input : rax: dividend min_long 562 // reg: divisor (may not be eax/edx) -1 563 // 564 // output: rax: quotient (= rax idiv reg) min_long 565 // rdx: remainder (= rax irem reg) 0 566 assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register"); 567 static const int64_t min_long = 0x8000000000000000; 568 Label normal_case, special_case; 569 570 // check for special case 571 cmp64(rax, ExternalAddress((address) &min_long), rdx /*rscratch*/); 572 jcc(Assembler::notEqual, normal_case); 573 xorl(rdx, rdx); // prepare rdx for possible special case (where 574 // remainder = 0) 575 cmpq(reg, -1); 576 jcc(Assembler::equal, special_case); 577 578 // handle normal case 579 bind(normal_case); 580 cdqq(); 581 int idivq_offset = offset(); 582 idivq(reg); 583 584 // normal and special case exit 585 bind(special_case); 586 587 return idivq_offset; 588 } 589 590 void MacroAssembler::decrementq(Register reg, int value) { 591 if (value == min_jint) { subq(reg, value); return; } 592 if (value < 0) { incrementq(reg, -value); return; } 593 if (value == 0) { ; return; } 594 if (value == 1 && UseIncDec) { decq(reg) ; return; } 595 /* else */ { subq(reg, value) ; return; } 596 } 597 598 void MacroAssembler::decrementq(Address dst, int value) { 599 if (value == min_jint) { subq(dst, value); return; } 600 if (value < 0) { incrementq(dst, -value); return; } 601 if (value == 0) { ; return; } 602 if (value == 1 && UseIncDec) { decq(dst) ; return; } 603 /* else */ { subq(dst, value) ; return; } 604 } 605 606 void MacroAssembler::incrementq(AddressLiteral dst, Register rscratch) { 607 assert(rscratch != noreg || always_reachable(dst), "missing"); 608 609 if (reachable(dst)) { 610 incrementq(as_Address(dst)); 611 } else { 612 lea(rscratch, dst); 613 incrementq(Address(rscratch, 0)); 614 } 615 } 616 617 void MacroAssembler::incrementq(Register reg, int value) { 618 if (value == min_jint) { addq(reg, value); return; } 619 if (value < 0) { decrementq(reg, -value); return; } 620 if (value == 0) { ; return; } 621 if (value == 1 && UseIncDec) { incq(reg) ; return; } 622 /* else */ { addq(reg, value) ; return; } 623 } 624 625 void MacroAssembler::incrementq(Address dst, int value) { 626 if (value == min_jint) { addq(dst, value); return; } 627 if (value < 0) { decrementq(dst, -value); return; } 628 if (value == 0) { ; return; } 629 if (value == 1 && UseIncDec) { incq(dst) ; return; } 630 /* else */ { addq(dst, value) ; return; } 631 } 632 633 // 32bit can do a case table jump in one instruction but we no longer allow the base 634 // to be installed in the Address class 635 void MacroAssembler::jump(ArrayAddress entry, Register rscratch) { 636 lea(rscratch, entry.base()); 637 Address dispatch = entry.index(); 638 assert(dispatch._base == noreg, "must be"); 639 dispatch._base = rscratch; 640 jmp(dispatch); 641 } 642 643 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) { 644 ShouldNotReachHere(); // 64bit doesn't use two regs 645 cmpq(x_lo, y_lo); 646 } 647 648 void MacroAssembler::lea(Register dst, AddressLiteral src) { 649 mov_literal64(dst, (intptr_t)src.target(), src.rspec()); 650 } 651 652 void MacroAssembler::lea(Address dst, AddressLiteral adr, Register rscratch) { 653 lea(rscratch, adr); 654 movptr(dst, rscratch); 655 } 656 657 void MacroAssembler::leave() { 658 // %%% is this really better? Why not on 32bit too? 659 emit_int8((unsigned char)0xC9); // LEAVE 660 } 661 662 void MacroAssembler::lneg(Register hi, Register lo) { 663 ShouldNotReachHere(); // 64bit doesn't use two regs 664 negq(lo); 665 } 666 667 void MacroAssembler::movoop(Register dst, jobject obj) { 668 mov_literal64(dst, (intptr_t)obj, oop_Relocation::spec_for_immediate()); 669 } 670 671 void MacroAssembler::movoop(Address dst, jobject obj, Register rscratch) { 672 mov_literal64(rscratch, (intptr_t)obj, oop_Relocation::spec_for_immediate()); 673 movq(dst, rscratch); 674 } 675 676 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { 677 mov_literal64(dst, (intptr_t)obj, metadata_Relocation::spec_for_immediate()); 678 } 679 680 void MacroAssembler::mov_metadata(Address dst, Metadata* obj, Register rscratch) { 681 mov_literal64(rscratch, (intptr_t)obj, metadata_Relocation::spec_for_immediate()); 682 movq(dst, rscratch); 683 } 684 685 void MacroAssembler::movptr(Register dst, AddressLiteral src) { 686 if (src.is_lval()) { 687 mov_literal64(dst, (intptr_t)src.target(), src.rspec()); 688 } else { 689 if (reachable(src)) { 690 movq(dst, as_Address(src)); 691 } else { 692 lea(dst, src); 693 movq(dst, Address(dst, 0)); 694 } 695 } 696 } 697 698 void MacroAssembler::movptr(ArrayAddress dst, Register src, Register rscratch) { 699 movq(as_Address(dst, rscratch), src); 700 } 701 702 void MacroAssembler::movptr(Register dst, ArrayAddress src) { 703 movq(dst, as_Address(src, dst /*rscratch*/)); 704 } 705 706 // src should NEVER be a real pointer. Use AddressLiteral for true pointers 707 void MacroAssembler::movptr(Address dst, intptr_t src, Register rscratch) { 708 if (is_simm32(src)) { 709 movptr(dst, checked_cast<int32_t>(src)); 710 } else { 711 mov64(rscratch, src); 712 movq(dst, rscratch); 713 } 714 } 715 716 void MacroAssembler::pushoop(jobject obj, Register rscratch) { 717 movoop(rscratch, obj); 718 push(rscratch); 719 } 720 721 void MacroAssembler::pushklass(Metadata* obj, Register rscratch) { 722 mov_metadata(rscratch, obj); 723 push(rscratch); 724 } 725 726 void MacroAssembler::pushptr(AddressLiteral src, Register rscratch) { 727 lea(rscratch, src); 728 if (src.is_lval()) { 729 push(rscratch); 730 } else { 731 pushq(Address(rscratch, 0)); 732 } 733 } 734 735 void MacroAssembler::reset_last_Java_frame(bool clear_fp) { 736 reset_last_Java_frame(r15_thread, clear_fp); 737 } 738 739 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 740 Register last_java_fp, 741 address last_java_pc, 742 Register rscratch) { 743 set_last_Java_frame(r15_thread, last_java_sp, last_java_fp, last_java_pc, rscratch); 744 } 745 746 static void pass_arg0(MacroAssembler* masm, Register arg) { 747 if (c_rarg0 != arg ) { 748 masm->mov(c_rarg0, arg); 749 } 750 } 751 752 static void pass_arg1(MacroAssembler* masm, Register arg) { 753 if (c_rarg1 != arg ) { 754 masm->mov(c_rarg1, arg); 755 } 756 } 757 758 static void pass_arg2(MacroAssembler* masm, Register arg) { 759 if (c_rarg2 != arg ) { 760 masm->mov(c_rarg2, arg); 761 } 762 } 763 764 static void pass_arg3(MacroAssembler* masm, Register arg) { 765 if (c_rarg3 != arg ) { 766 masm->mov(c_rarg3, arg); 767 } 768 } 769 770 void MacroAssembler::stop(const char* msg) { 771 if (ShowMessageBoxOnError) { 772 address rip = pc(); 773 pusha(); // get regs on stack 774 lea(c_rarg1, InternalAddress(rip)); 775 movq(c_rarg2, rsp); // pass pointer to regs array 776 } 777 lea(c_rarg0, ExternalAddress((address) msg)); 778 andq(rsp, -16); // align stack as required by ABI 779 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); 780 hlt(); 781 } 782 783 void MacroAssembler::warn(const char* msg) { 784 push(rbp); 785 movq(rbp, rsp); 786 andq(rsp, -16); // align stack as required by push_CPU_state and call 787 push_CPU_state(); // keeps alignment at 16 bytes 788 789 lea(c_rarg0, ExternalAddress((address) msg)); 790 call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning))); 791 792 pop_CPU_state(); 793 mov(rsp, rbp); 794 pop(rbp); 795 } 796 797 void MacroAssembler::print_state() { 798 address rip = pc(); 799 pusha(); // get regs on stack 800 push(rbp); 801 movq(rbp, rsp); 802 andq(rsp, -16); // align stack as required by push_CPU_state and call 803 push_CPU_state(); // keeps alignment at 16 bytes 804 805 lea(c_rarg0, InternalAddress(rip)); 806 lea(c_rarg1, Address(rbp, wordSize)); // pass pointer to regs array 807 call_VM_leaf(CAST_FROM_FN_PTR(address, MacroAssembler::print_state64), c_rarg0, c_rarg1); 808 809 pop_CPU_state(); 810 mov(rsp, rbp); 811 pop(rbp); 812 popa(); 813 } 814 815 #ifndef PRODUCT 816 extern "C" void findpc(intptr_t x); 817 #endif 818 819 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) { 820 // In order to get locks to work, we need to fake a in_VM state 821 if (ShowMessageBoxOnError) { 822 JavaThread* thread = JavaThread::current(); 823 JavaThreadState saved_state = thread->thread_state(); 824 thread->set_thread_state(_thread_in_vm); 825 #ifndef PRODUCT 826 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 827 ttyLocker ttyl; 828 BytecodeCounter::print(); 829 } 830 #endif 831 // To see where a verify_oop failed, get $ebx+40/X for this frame. 832 // XXX correct this offset for amd64 833 // This is the value of eip which points to where verify_oop will return. 834 if (os::message_box(msg, "Execution stopped, print registers?")) { 835 print_state64(pc, regs); 836 BREAKPOINT; 837 } 838 } 839 fatal("DEBUG MESSAGE: %s", msg); 840 } 841 842 void MacroAssembler::print_state64(int64_t pc, int64_t regs[]) { 843 ttyLocker ttyl; 844 DebuggingContext debugging{}; 845 tty->print_cr("rip = 0x%016lx", (intptr_t)pc); 846 #ifndef PRODUCT 847 tty->cr(); 848 findpc(pc); 849 tty->cr(); 850 #endif 851 #define PRINT_REG(rax, value) \ 852 { tty->print("%s = ", #rax); os::print_location(tty, value); } 853 PRINT_REG(rax, regs[15]); 854 PRINT_REG(rbx, regs[12]); 855 PRINT_REG(rcx, regs[14]); 856 PRINT_REG(rdx, regs[13]); 857 PRINT_REG(rdi, regs[8]); 858 PRINT_REG(rsi, regs[9]); 859 PRINT_REG(rbp, regs[10]); 860 // rsp is actually not stored by pusha(), compute the old rsp from regs (rsp after pusha): regs + 16 = old rsp 861 PRINT_REG(rsp, (intptr_t)(®s[16])); 862 PRINT_REG(r8 , regs[7]); 863 PRINT_REG(r9 , regs[6]); 864 PRINT_REG(r10, regs[5]); 865 PRINT_REG(r11, regs[4]); 866 PRINT_REG(r12, regs[3]); 867 PRINT_REG(r13, regs[2]); 868 PRINT_REG(r14, regs[1]); 869 PRINT_REG(r15, regs[0]); 870 #undef PRINT_REG 871 // Print some words near the top of the stack. 872 int64_t* rsp = ®s[16]; 873 int64_t* dump_sp = rsp; 874 for (int col1 = 0; col1 < 8; col1++) { 875 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 876 os::print_location(tty, *dump_sp++); 877 } 878 for (int row = 0; row < 25; row++) { 879 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 880 for (int col = 0; col < 4; col++) { 881 tty->print(" 0x%016lx", (intptr_t)*dump_sp++); 882 } 883 tty->cr(); 884 } 885 // Print some instructions around pc: 886 Disassembler::decode((address)pc-64, (address)pc); 887 tty->print_cr("--------"); 888 Disassembler::decode((address)pc, (address)pc+32); 889 } 890 891 // The java_calling_convention describes stack locations as ideal slots on 892 // a frame with no abi restrictions. Since we must observe abi restrictions 893 // (like the placement of the register window) the slots must be biased by 894 // the following value. 895 static int reg2offset_in(VMReg r) { 896 // Account for saved rbp and return address 897 // This should really be in_preserve_stack_slots 898 return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size; 899 } 900 901 static int reg2offset_out(VMReg r) { 902 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 903 } 904 905 // A long move 906 void MacroAssembler::long_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 907 908 // The calling conventions assures us that each VMregpair is either 909 // all really one physical register or adjacent stack slots. 910 911 if (src.is_single_phys_reg() ) { 912 if (dst.is_single_phys_reg()) { 913 if (dst.first() != src.first()) { 914 mov(dst.first()->as_Register(), src.first()->as_Register()); 915 } 916 } else { 917 assert(dst.is_single_reg(), "not a stack pair: (%s, %s), (%s, %s)", 918 src.first()->name(), src.second()->name(), dst.first()->name(), dst.second()->name()); 919 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_Register()); 920 } 921 } else if (dst.is_single_phys_reg()) { 922 assert(src.is_single_reg(), "not a stack pair"); 923 movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 924 } else { 925 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs"); 926 movq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 927 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 928 } 929 } 930 931 // A double move 932 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 933 934 // The calling conventions assures us that each VMregpair is either 935 // all really one physical register or adjacent stack slots. 936 937 if (src.is_single_phys_reg() ) { 938 if (dst.is_single_phys_reg()) { 939 // In theory these overlap but the ordering is such that this is likely a nop 940 if ( src.first() != dst.first()) { 941 movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister()); 942 } 943 } else { 944 assert(dst.is_single_reg(), "not a stack pair"); 945 movdbl(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_XMMRegister()); 946 } 947 } else if (dst.is_single_phys_reg()) { 948 assert(src.is_single_reg(), "not a stack pair"); 949 movdbl(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 950 } else { 951 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs"); 952 movq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 953 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 954 } 955 } 956 957 958 // A float arg may have to do float reg int reg conversion 959 void MacroAssembler::float_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 960 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move"); 961 962 // The calling conventions assures us that each VMregpair is either 963 // all really one physical register or adjacent stack slots. 964 965 if (src.first()->is_stack()) { 966 if (dst.first()->is_stack()) { 967 movl(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 968 movptr(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 969 } else { 970 // stack to reg 971 assert(dst.first()->is_XMMRegister(), "only expect xmm registers as parameters"); 972 movflt(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 973 } 974 } else if (dst.first()->is_stack()) { 975 // reg to stack 976 assert(src.first()->is_XMMRegister(), "only expect xmm registers as parameters"); 977 movflt(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_XMMRegister()); 978 } else { 979 // reg to reg 980 // In theory these overlap but the ordering is such that this is likely a nop 981 if ( src.first() != dst.first()) { 982 movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister()); 983 } 984 } 985 } 986 987 // On 64 bit we will store integer like items to the stack as 988 // 64 bits items (x86_32/64 abi) even though java would only store 989 // 32bits for a parameter. On 32bit it will simply be 32 bits 990 // So this routine will do 32->32 on 32bit and 32->64 on 64bit 991 void MacroAssembler::move32_64(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 992 if (src.first()->is_stack()) { 993 if (dst.first()->is_stack()) { 994 // stack to stack 995 movslq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 996 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 997 } else { 998 // stack to reg 999 movslq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 1000 } 1001 } else if (dst.first()->is_stack()) { 1002 // reg to stack 1003 // Do we really have to sign extend??? 1004 // __ movslq(src.first()->as_Register(), src.first()->as_Register()); 1005 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_Register()); 1006 } else { 1007 // Do we really have to sign extend??? 1008 // __ movslq(dst.first()->as_Register(), src.first()->as_Register()); 1009 if (dst.first() != src.first()) { 1010 movq(dst.first()->as_Register(), src.first()->as_Register()); 1011 } 1012 } 1013 } 1014 1015 void MacroAssembler::move_ptr(VMRegPair src, VMRegPair dst) { 1016 if (src.first()->is_stack()) { 1017 if (dst.first()->is_stack()) { 1018 // stack to stack 1019 movq(rax, Address(rbp, reg2offset_in(src.first()))); 1020 movq(Address(rsp, reg2offset_out(dst.first())), rax); 1021 } else { 1022 // stack to reg 1023 movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()))); 1024 } 1025 } else if (dst.first()->is_stack()) { 1026 // reg to stack 1027 movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register()); 1028 } else { 1029 if (dst.first() != src.first()) { 1030 movq(dst.first()->as_Register(), src.first()->as_Register()); 1031 } 1032 } 1033 } 1034 1035 // An oop arg. Must pass a handle not the oop itself 1036 void MacroAssembler::object_move(OopMap* map, 1037 int oop_handle_offset, 1038 int framesize_in_slots, 1039 VMRegPair src, 1040 VMRegPair dst, 1041 bool is_receiver, 1042 int* receiver_offset) { 1043 1044 // must pass a handle. First figure out the location we use as a handle 1045 1046 Register rHandle = dst.first()->is_stack() ? rax : dst.first()->as_Register(); 1047 1048 // See if oop is null if it is we need no handle 1049 1050 if (src.first()->is_stack()) { 1051 1052 // Oop is already on the stack as an argument 1053 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 1054 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots)); 1055 if (is_receiver) { 1056 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size; 1057 } 1058 1059 cmpptr(Address(rbp, reg2offset_in(src.first())), NULL_WORD); 1060 lea(rHandle, Address(rbp, reg2offset_in(src.first()))); 1061 // conditionally move a null 1062 cmovptr(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first()))); 1063 } else { 1064 1065 // Oop is in a register we must store it to the space we reserve 1066 // on the stack for oop_handles and pass a handle if oop is non-null 1067 1068 const Register rOop = src.first()->as_Register(); 1069 int oop_slot; 1070 if (rOop == j_rarg0) 1071 oop_slot = 0; 1072 else if (rOop == j_rarg1) 1073 oop_slot = 1; 1074 else if (rOop == j_rarg2) 1075 oop_slot = 2; 1076 else if (rOop == j_rarg3) 1077 oop_slot = 3; 1078 else if (rOop == j_rarg4) 1079 oop_slot = 4; 1080 else { 1081 assert(rOop == j_rarg5, "wrong register"); 1082 oop_slot = 5; 1083 } 1084 1085 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset; 1086 int offset = oop_slot*VMRegImpl::stack_slot_size; 1087 1088 map->set_oop(VMRegImpl::stack2reg(oop_slot)); 1089 // Store oop in handle area, may be null 1090 movptr(Address(rsp, offset), rOop); 1091 if (is_receiver) { 1092 *receiver_offset = offset; 1093 } 1094 1095 cmpptr(rOop, NULL_WORD); 1096 lea(rHandle, Address(rsp, offset)); 1097 // conditionally move a null from the handle area where it was just stored 1098 cmovptr(Assembler::equal, rHandle, Address(rsp, offset)); 1099 } 1100 1101 // If arg is on the stack then place it otherwise it is already in correct reg. 1102 if (dst.first()->is_stack()) { 1103 movptr(Address(rsp, reg2offset_out(dst.first())), rHandle); 1104 } 1105 } 1106 1107 #endif // _LP64 1108 1109 // Now versions that are common to 32/64 bit 1110 1111 void MacroAssembler::addptr(Register dst, int32_t imm32) { 1112 LP64_ONLY(addq(dst, imm32)) NOT_LP64(addl(dst, imm32)); 1113 } 1114 1115 void MacroAssembler::addptr(Register dst, Register src) { 1116 LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); 1117 } 1118 1119 void MacroAssembler::addptr(Address dst, Register src) { 1120 LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); 1121 } 1122 1123 void MacroAssembler::addsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 1124 assert(rscratch != noreg || always_reachable(src), "missing"); 1125 1126 if (reachable(src)) { 1127 Assembler::addsd(dst, as_Address(src)); 1128 } else { 1129 lea(rscratch, src); 1130 Assembler::addsd(dst, Address(rscratch, 0)); 1131 } 1132 } 1133 1134 void MacroAssembler::addss(XMMRegister dst, AddressLiteral src, Register rscratch) { 1135 assert(rscratch != noreg || always_reachable(src), "missing"); 1136 1137 if (reachable(src)) { 1138 addss(dst, as_Address(src)); 1139 } else { 1140 lea(rscratch, src); 1141 addss(dst, Address(rscratch, 0)); 1142 } 1143 } 1144 1145 void MacroAssembler::addpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 1146 assert(rscratch != noreg || always_reachable(src), "missing"); 1147 1148 if (reachable(src)) { 1149 Assembler::addpd(dst, as_Address(src)); 1150 } else { 1151 lea(rscratch, src); 1152 Assembler::addpd(dst, Address(rscratch, 0)); 1153 } 1154 } 1155 1156 // See 8273459. Function for ensuring 64-byte alignment, intended for stubs only. 1157 // Stub code is generated once and never copied. 1158 // NMethods can't use this because they get copied and we can't force alignment > 32 bytes. 1159 void MacroAssembler::align64() { 1160 align(64, (uint)(uintptr_t)pc()); 1161 } 1162 1163 void MacroAssembler::align32() { 1164 align(32, (uint)(uintptr_t)pc()); 1165 } 1166 1167 void MacroAssembler::align(uint modulus) { 1168 // 8273459: Ensure alignment is possible with current segment alignment 1169 assert(modulus <= (uintx)CodeEntryAlignment, "Alignment must be <= CodeEntryAlignment"); 1170 align(modulus, offset()); 1171 } 1172 1173 void MacroAssembler::align(uint modulus, uint target) { 1174 if (target % modulus != 0) { 1175 nop(modulus - (target % modulus)); 1176 } 1177 } 1178 1179 void MacroAssembler::push_f(XMMRegister r) { 1180 subptr(rsp, wordSize); 1181 movflt(Address(rsp, 0), r); 1182 } 1183 1184 void MacroAssembler::pop_f(XMMRegister r) { 1185 movflt(r, Address(rsp, 0)); 1186 addptr(rsp, wordSize); 1187 } 1188 1189 void MacroAssembler::push_d(XMMRegister r) { 1190 subptr(rsp, 2 * wordSize); 1191 movdbl(Address(rsp, 0), r); 1192 } 1193 1194 void MacroAssembler::pop_d(XMMRegister r) { 1195 movdbl(r, Address(rsp, 0)); 1196 addptr(rsp, 2 * Interpreter::stackElementSize); 1197 } 1198 1199 void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 1200 // Used in sign-masking with aligned address. 1201 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 1202 assert(rscratch != noreg || always_reachable(src), "missing"); 1203 1204 if (reachable(src)) { 1205 Assembler::andpd(dst, as_Address(src)); 1206 } else { 1207 lea(rscratch, src); 1208 Assembler::andpd(dst, Address(rscratch, 0)); 1209 } 1210 } 1211 1212 void MacroAssembler::andps(XMMRegister dst, AddressLiteral src, Register rscratch) { 1213 // Used in sign-masking with aligned address. 1214 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 1215 assert(rscratch != noreg || always_reachable(src), "missing"); 1216 1217 if (reachable(src)) { 1218 Assembler::andps(dst, as_Address(src)); 1219 } else { 1220 lea(rscratch, src); 1221 Assembler::andps(dst, Address(rscratch, 0)); 1222 } 1223 } 1224 1225 void MacroAssembler::andptr(Register dst, int32_t imm32) { 1226 LP64_ONLY(andq(dst, imm32)) NOT_LP64(andl(dst, imm32)); 1227 } 1228 1229 #ifdef _LP64 1230 void MacroAssembler::andq(Register dst, AddressLiteral src, Register rscratch) { 1231 assert(rscratch != noreg || always_reachable(src), "missing"); 1232 1233 if (reachable(src)) { 1234 andq(dst, as_Address(src)); 1235 } else { 1236 lea(rscratch, src); 1237 andq(dst, Address(rscratch, 0)); 1238 } 1239 } 1240 #endif 1241 1242 void MacroAssembler::atomic_incl(Address counter_addr) { 1243 lock(); 1244 incrementl(counter_addr); 1245 } 1246 1247 void MacroAssembler::atomic_incl(AddressLiteral counter_addr, Register rscratch) { 1248 assert(rscratch != noreg || always_reachable(counter_addr), "missing"); 1249 1250 if (reachable(counter_addr)) { 1251 atomic_incl(as_Address(counter_addr)); 1252 } else { 1253 lea(rscratch, counter_addr); 1254 atomic_incl(Address(rscratch, 0)); 1255 } 1256 } 1257 1258 #ifdef _LP64 1259 void MacroAssembler::atomic_incq(Address counter_addr) { 1260 lock(); 1261 incrementq(counter_addr); 1262 } 1263 1264 void MacroAssembler::atomic_incq(AddressLiteral counter_addr, Register rscratch) { 1265 assert(rscratch != noreg || always_reachable(counter_addr), "missing"); 1266 1267 if (reachable(counter_addr)) { 1268 atomic_incq(as_Address(counter_addr)); 1269 } else { 1270 lea(rscratch, counter_addr); 1271 atomic_incq(Address(rscratch, 0)); 1272 } 1273 } 1274 #endif 1275 1276 // Writes to stack successive pages until offset reached to check for 1277 // stack overflow + shadow pages. This clobbers tmp. 1278 void MacroAssembler::bang_stack_size(Register size, Register tmp) { 1279 movptr(tmp, rsp); 1280 // Bang stack for total size given plus shadow page size. 1281 // Bang one page at a time because large size can bang beyond yellow and 1282 // red zones. 1283 Label loop; 1284 bind(loop); 1285 movl(Address(tmp, (-(int)os::vm_page_size())), size ); 1286 subptr(tmp, (int)os::vm_page_size()); 1287 subl(size, (int)os::vm_page_size()); 1288 jcc(Assembler::greater, loop); 1289 1290 // Bang down shadow pages too. 1291 // At this point, (tmp-0) is the last address touched, so don't 1292 // touch it again. (It was touched as (tmp-pagesize) but then tmp 1293 // was post-decremented.) Skip this address by starting at i=1, and 1294 // touch a few more pages below. N.B. It is important to touch all 1295 // the way down including all pages in the shadow zone. 1296 for (int i = 1; i < ((int)StackOverflow::stack_shadow_zone_size() / (int)os::vm_page_size()); i++) { 1297 // this could be any sized move but this is can be a debugging crumb 1298 // so the bigger the better. 1299 movptr(Address(tmp, (-i*(int)os::vm_page_size())), size ); 1300 } 1301 } 1302 1303 void MacroAssembler::reserved_stack_check() { 1304 // testing if reserved zone needs to be enabled 1305 Label no_reserved_zone_enabling; 1306 Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); 1307 NOT_LP64(get_thread(rsi);) 1308 1309 cmpptr(rsp, Address(thread, JavaThread::reserved_stack_activation_offset())); 1310 jcc(Assembler::below, no_reserved_zone_enabling); 1311 1312 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), thread); 1313 jump(RuntimeAddress(SharedRuntime::throw_delayed_StackOverflowError_entry())); 1314 should_not_reach_here(); 1315 1316 bind(no_reserved_zone_enabling); 1317 } 1318 1319 void MacroAssembler::c2bool(Register x) { 1320 // implements x == 0 ? 0 : 1 1321 // note: must only look at least-significant byte of x 1322 // since C-style booleans are stored in one byte 1323 // only! (was bug) 1324 andl(x, 0xFF); 1325 setb(Assembler::notZero, x); 1326 } 1327 1328 // Wouldn't need if AddressLiteral version had new name 1329 void MacroAssembler::call(Label& L, relocInfo::relocType rtype) { 1330 Assembler::call(L, rtype); 1331 } 1332 1333 void MacroAssembler::call(Register entry) { 1334 Assembler::call(entry); 1335 } 1336 1337 void MacroAssembler::call(AddressLiteral entry, Register rscratch) { 1338 assert(rscratch != noreg || always_reachable(entry), "missing"); 1339 1340 if (reachable(entry)) { 1341 Assembler::call_literal(entry.target(), entry.rspec()); 1342 } else { 1343 lea(rscratch, entry); 1344 Assembler::call(rscratch); 1345 } 1346 } 1347 1348 void MacroAssembler::ic_call(address entry, jint method_index) { 1349 RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index); 1350 #ifdef _LP64 1351 // Needs full 64-bit immediate for later patching. 1352 mov64(rax, (int64_t)Universe::non_oop_word()); 1353 #else 1354 movptr(rax, (intptr_t)Universe::non_oop_word()); 1355 #endif 1356 call(AddressLiteral(entry, rh)); 1357 } 1358 1359 int MacroAssembler::ic_check_size() { 1360 return LP64_ONLY(14) NOT_LP64(12); 1361 } 1362 1363 int MacroAssembler::ic_check(int end_alignment) { 1364 Register receiver = LP64_ONLY(j_rarg0) NOT_LP64(rcx); 1365 Register data = rax; 1366 Register temp = LP64_ONLY(rscratch1) NOT_LP64(rbx); 1367 1368 // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed 1369 // before the inline cache check, so we don't have to execute any nop instructions when dispatching 1370 // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align 1371 // before the inline cache check here, and not after 1372 align(end_alignment, offset() + ic_check_size()); 1373 1374 int uep_offset = offset(); 1375 1376 if (UseCompressedClassPointers) { 1377 movl(temp, Address(receiver, oopDesc::klass_offset_in_bytes())); 1378 cmpl(temp, Address(data, CompiledICData::speculated_klass_offset())); 1379 } else { 1380 movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes())); 1381 cmpptr(temp, Address(data, CompiledICData::speculated_klass_offset())); 1382 } 1383 1384 // if inline cache check fails, then jump to runtime routine 1385 jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 1386 assert((offset() % end_alignment) == 0, "Misaligned verified entry point"); 1387 1388 return uep_offset; 1389 } 1390 1391 void MacroAssembler::emit_static_call_stub() { 1392 // Static stub relocation also tags the Method* in the code-stream. 1393 mov_metadata(rbx, (Metadata*) nullptr); // Method is zapped till fixup time. 1394 // This is recognized as unresolved by relocs/nativeinst/ic code. 1395 jump(RuntimeAddress(pc())); 1396 } 1397 1398 // Implementation of call_VM versions 1399 1400 void MacroAssembler::call_VM(Register oop_result, 1401 address entry_point, 1402 bool check_exceptions) { 1403 Label C, E; 1404 call(C, relocInfo::none); 1405 jmp(E); 1406 1407 bind(C); 1408 call_VM_helper(oop_result, entry_point, 0, check_exceptions); 1409 ret(0); 1410 1411 bind(E); 1412 } 1413 1414 void MacroAssembler::call_VM(Register oop_result, 1415 address entry_point, 1416 Register arg_1, 1417 bool check_exceptions) { 1418 Label C, E; 1419 call(C, relocInfo::none); 1420 jmp(E); 1421 1422 bind(C); 1423 pass_arg1(this, arg_1); 1424 call_VM_helper(oop_result, entry_point, 1, check_exceptions); 1425 ret(0); 1426 1427 bind(E); 1428 } 1429 1430 void MacroAssembler::call_VM(Register oop_result, 1431 address entry_point, 1432 Register arg_1, 1433 Register arg_2, 1434 bool check_exceptions) { 1435 Label C, E; 1436 call(C, relocInfo::none); 1437 jmp(E); 1438 1439 bind(C); 1440 1441 LP64_ONLY(assert_different_registers(arg_1, c_rarg2)); 1442 1443 pass_arg2(this, arg_2); 1444 pass_arg1(this, arg_1); 1445 call_VM_helper(oop_result, entry_point, 2, check_exceptions); 1446 ret(0); 1447 1448 bind(E); 1449 } 1450 1451 void MacroAssembler::call_VM(Register oop_result, 1452 address entry_point, 1453 Register arg_1, 1454 Register arg_2, 1455 Register arg_3, 1456 bool check_exceptions) { 1457 Label C, E; 1458 call(C, relocInfo::none); 1459 jmp(E); 1460 1461 bind(C); 1462 1463 LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3)); 1464 LP64_ONLY(assert_different_registers(arg_2, c_rarg3)); 1465 pass_arg3(this, arg_3); 1466 pass_arg2(this, arg_2); 1467 pass_arg1(this, arg_1); 1468 call_VM_helper(oop_result, entry_point, 3, check_exceptions); 1469 ret(0); 1470 1471 bind(E); 1472 } 1473 1474 void MacroAssembler::call_VM(Register oop_result, 1475 Register last_java_sp, 1476 address entry_point, 1477 int number_of_arguments, 1478 bool check_exceptions) { 1479 Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg); 1480 call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions); 1481 } 1482 1483 void MacroAssembler::call_VM(Register oop_result, 1484 Register last_java_sp, 1485 address entry_point, 1486 Register arg_1, 1487 bool check_exceptions) { 1488 pass_arg1(this, arg_1); 1489 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 1490 } 1491 1492 void MacroAssembler::call_VM(Register oop_result, 1493 Register last_java_sp, 1494 address entry_point, 1495 Register arg_1, 1496 Register arg_2, 1497 bool check_exceptions) { 1498 1499 LP64_ONLY(assert_different_registers(arg_1, c_rarg2)); 1500 pass_arg2(this, arg_2); 1501 pass_arg1(this, arg_1); 1502 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 1503 } 1504 1505 void MacroAssembler::call_VM(Register oop_result, 1506 Register last_java_sp, 1507 address entry_point, 1508 Register arg_1, 1509 Register arg_2, 1510 Register arg_3, 1511 bool check_exceptions) { 1512 LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3)); 1513 LP64_ONLY(assert_different_registers(arg_2, c_rarg3)); 1514 pass_arg3(this, arg_3); 1515 pass_arg2(this, arg_2); 1516 pass_arg1(this, arg_1); 1517 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 1518 } 1519 1520 void MacroAssembler::super_call_VM(Register oop_result, 1521 Register last_java_sp, 1522 address entry_point, 1523 int number_of_arguments, 1524 bool check_exceptions) { 1525 Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg); 1526 MacroAssembler::call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions); 1527 } 1528 1529 void MacroAssembler::super_call_VM(Register oop_result, 1530 Register last_java_sp, 1531 address entry_point, 1532 Register arg_1, 1533 bool check_exceptions) { 1534 pass_arg1(this, arg_1); 1535 super_call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 1536 } 1537 1538 void MacroAssembler::super_call_VM(Register oop_result, 1539 Register last_java_sp, 1540 address entry_point, 1541 Register arg_1, 1542 Register arg_2, 1543 bool check_exceptions) { 1544 1545 LP64_ONLY(assert_different_registers(arg_1, c_rarg2)); 1546 pass_arg2(this, arg_2); 1547 pass_arg1(this, arg_1); 1548 super_call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 1549 } 1550 1551 void MacroAssembler::super_call_VM(Register oop_result, 1552 Register last_java_sp, 1553 address entry_point, 1554 Register arg_1, 1555 Register arg_2, 1556 Register arg_3, 1557 bool check_exceptions) { 1558 LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3)); 1559 LP64_ONLY(assert_different_registers(arg_2, c_rarg3)); 1560 pass_arg3(this, arg_3); 1561 pass_arg2(this, arg_2); 1562 pass_arg1(this, arg_1); 1563 super_call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 1564 } 1565 1566 void MacroAssembler::call_VM_base(Register oop_result, 1567 Register java_thread, 1568 Register last_java_sp, 1569 address entry_point, 1570 int number_of_arguments, 1571 bool check_exceptions) { 1572 // determine java_thread register 1573 if (!java_thread->is_valid()) { 1574 #ifdef _LP64 1575 java_thread = r15_thread; 1576 #else 1577 java_thread = rdi; 1578 get_thread(java_thread); 1579 #endif // LP64 1580 } 1581 // determine last_java_sp register 1582 if (!last_java_sp->is_valid()) { 1583 last_java_sp = rsp; 1584 } 1585 // debugging support 1586 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 1587 LP64_ONLY(assert(java_thread == r15_thread, "unexpected register")); 1588 #ifdef ASSERT 1589 // TraceBytecodes does not use r12 but saves it over the call, so don't verify 1590 // r12 is the heapbase. 1591 LP64_ONLY(if (UseCompressedOops && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");) 1592 #endif // ASSERT 1593 1594 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"); 1595 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp"); 1596 1597 // push java thread (becomes first argument of C function) 1598 1599 NOT_LP64(push(java_thread); number_of_arguments++); 1600 LP64_ONLY(mov(c_rarg0, r15_thread)); 1601 1602 // set last Java frame before call 1603 assert(last_java_sp != rbp, "can't use ebp/rbp"); 1604 1605 // Only interpreter should have to set fp 1606 set_last_Java_frame(java_thread, last_java_sp, rbp, nullptr, rscratch1); 1607 1608 // do the call, remove parameters 1609 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments); 1610 1611 // restore the thread (cannot use the pushed argument since arguments 1612 // may be overwritten by C code generated by an optimizing compiler); 1613 // however can use the register value directly if it is callee saved. 1614 if (LP64_ONLY(true ||) java_thread == rdi || java_thread == rsi) { 1615 // rdi & rsi (also r15) are callee saved -> nothing to do 1616 #ifdef ASSERT 1617 guarantee(java_thread != rax, "change this code"); 1618 push(rax); 1619 { Label L; 1620 get_thread(rax); 1621 cmpptr(java_thread, rax); 1622 jcc(Assembler::equal, L); 1623 STOP("MacroAssembler::call_VM_base: rdi not callee saved?"); 1624 bind(L); 1625 } 1626 pop(rax); 1627 #endif 1628 } else { 1629 get_thread(java_thread); 1630 } 1631 // reset last Java frame 1632 // Only interpreter should have to clear fp 1633 reset_last_Java_frame(java_thread, true); 1634 1635 // C++ interp handles this in the interpreter 1636 check_and_handle_popframe(java_thread); 1637 check_and_handle_earlyret(java_thread); 1638 1639 if (check_exceptions) { 1640 // check for pending exceptions (java_thread is set upon return) 1641 cmpptr(Address(java_thread, Thread::pending_exception_offset()), NULL_WORD); 1642 #ifndef _LP64 1643 jump_cc(Assembler::notEqual, 1644 RuntimeAddress(StubRoutines::forward_exception_entry())); 1645 #else 1646 // This used to conditionally jump to forward_exception however it is 1647 // possible if we relocate that the branch will not reach. So we must jump 1648 // around so we can always reach 1649 1650 Label ok; 1651 jcc(Assembler::equal, ok); 1652 jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 1653 bind(ok); 1654 #endif // LP64 1655 } 1656 1657 // get oop result if there is one and reset the value in the thread 1658 if (oop_result->is_valid()) { 1659 get_vm_result(oop_result, java_thread); 1660 } 1661 } 1662 1663 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 1664 1665 // Calculate the value for last_Java_sp 1666 // somewhat subtle. call_VM does an intermediate call 1667 // which places a return address on the stack just under the 1668 // stack pointer as the user finished with it. This allows 1669 // use to retrieve last_Java_pc from last_Java_sp[-1]. 1670 // On 32bit we then have to push additional args on the stack to accomplish 1671 // the actual requested call. On 64bit call_VM only can use register args 1672 // so the only extra space is the return address that call_VM created. 1673 // This hopefully explains the calculations here. 1674 1675 #ifdef _LP64 1676 // We've pushed one address, correct last_Java_sp 1677 lea(rax, Address(rsp, wordSize)); 1678 #else 1679 lea(rax, Address(rsp, (1 + number_of_arguments) * wordSize)); 1680 #endif // LP64 1681 1682 call_VM_base(oop_result, noreg, rax, entry_point, number_of_arguments, check_exceptions); 1683 1684 } 1685 1686 // Use this method when MacroAssembler version of call_VM_leaf_base() should be called from Interpreter. 1687 void MacroAssembler::call_VM_leaf0(address entry_point) { 1688 MacroAssembler::call_VM_leaf_base(entry_point, 0); 1689 } 1690 1691 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) { 1692 call_VM_leaf_base(entry_point, number_of_arguments); 1693 } 1694 1695 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) { 1696 pass_arg0(this, arg_0); 1697 call_VM_leaf(entry_point, 1); 1698 } 1699 1700 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 1701 1702 LP64_ONLY(assert_different_registers(arg_0, c_rarg1)); 1703 pass_arg1(this, arg_1); 1704 pass_arg0(this, arg_0); 1705 call_VM_leaf(entry_point, 2); 1706 } 1707 1708 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { 1709 LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2)); 1710 LP64_ONLY(assert_different_registers(arg_1, c_rarg2)); 1711 pass_arg2(this, arg_2); 1712 pass_arg1(this, arg_1); 1713 pass_arg0(this, arg_0); 1714 call_VM_leaf(entry_point, 3); 1715 } 1716 1717 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { 1718 LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3)); 1719 LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3)); 1720 LP64_ONLY(assert_different_registers(arg_2, c_rarg3)); 1721 pass_arg3(this, arg_3); 1722 pass_arg2(this, arg_2); 1723 pass_arg1(this, arg_1); 1724 pass_arg0(this, arg_0); 1725 call_VM_leaf(entry_point, 3); 1726 } 1727 1728 void MacroAssembler::super_call_VM_leaf(address entry_point) { 1729 MacroAssembler::call_VM_leaf_base(entry_point, 1); 1730 } 1731 1732 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) { 1733 pass_arg0(this, arg_0); 1734 MacroAssembler::call_VM_leaf_base(entry_point, 1); 1735 } 1736 1737 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 1738 LP64_ONLY(assert_different_registers(arg_0, c_rarg1)); 1739 pass_arg1(this, arg_1); 1740 pass_arg0(this, arg_0); 1741 MacroAssembler::call_VM_leaf_base(entry_point, 2); 1742 } 1743 1744 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { 1745 LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2)); 1746 LP64_ONLY(assert_different_registers(arg_1, c_rarg2)); 1747 pass_arg2(this, arg_2); 1748 pass_arg1(this, arg_1); 1749 pass_arg0(this, arg_0); 1750 MacroAssembler::call_VM_leaf_base(entry_point, 3); 1751 } 1752 1753 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { 1754 LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3)); 1755 LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3)); 1756 LP64_ONLY(assert_different_registers(arg_2, c_rarg3)); 1757 pass_arg3(this, arg_3); 1758 pass_arg2(this, arg_2); 1759 pass_arg1(this, arg_1); 1760 pass_arg0(this, arg_0); 1761 MacroAssembler::call_VM_leaf_base(entry_point, 4); 1762 } 1763 1764 void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) { 1765 movptr(oop_result, Address(java_thread, JavaThread::vm_result_offset())); 1766 movptr(Address(java_thread, JavaThread::vm_result_offset()), NULL_WORD); 1767 verify_oop_msg(oop_result, "broken oop in call_VM_base"); 1768 } 1769 1770 void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) { 1771 movptr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset())); 1772 movptr(Address(java_thread, JavaThread::vm_result_2_offset()), NULL_WORD); 1773 } 1774 1775 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { 1776 } 1777 1778 void MacroAssembler::check_and_handle_popframe(Register java_thread) { 1779 } 1780 1781 void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm, Register rscratch) { 1782 assert(rscratch != noreg || always_reachable(src1), "missing"); 1783 1784 if (reachable(src1)) { 1785 cmpl(as_Address(src1), imm); 1786 } else { 1787 lea(rscratch, src1); 1788 cmpl(Address(rscratch, 0), imm); 1789 } 1790 } 1791 1792 void MacroAssembler::cmp32(Register src1, AddressLiteral src2, Register rscratch) { 1793 assert(!src2.is_lval(), "use cmpptr"); 1794 assert(rscratch != noreg || always_reachable(src2), "missing"); 1795 1796 if (reachable(src2)) { 1797 cmpl(src1, as_Address(src2)); 1798 } else { 1799 lea(rscratch, src2); 1800 cmpl(src1, Address(rscratch, 0)); 1801 } 1802 } 1803 1804 void MacroAssembler::cmp32(Register src1, int32_t imm) { 1805 Assembler::cmpl(src1, imm); 1806 } 1807 1808 void MacroAssembler::cmp32(Register src1, Address src2) { 1809 Assembler::cmpl(src1, src2); 1810 } 1811 1812 void MacroAssembler::cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) { 1813 ucomisd(opr1, opr2); 1814 1815 Label L; 1816 if (unordered_is_less) { 1817 movl(dst, -1); 1818 jcc(Assembler::parity, L); 1819 jcc(Assembler::below , L); 1820 movl(dst, 0); 1821 jcc(Assembler::equal , L); 1822 increment(dst); 1823 } else { // unordered is greater 1824 movl(dst, 1); 1825 jcc(Assembler::parity, L); 1826 jcc(Assembler::above , L); 1827 movl(dst, 0); 1828 jcc(Assembler::equal , L); 1829 decrementl(dst); 1830 } 1831 bind(L); 1832 } 1833 1834 void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) { 1835 ucomiss(opr1, opr2); 1836 1837 Label L; 1838 if (unordered_is_less) { 1839 movl(dst, -1); 1840 jcc(Assembler::parity, L); 1841 jcc(Assembler::below , L); 1842 movl(dst, 0); 1843 jcc(Assembler::equal , L); 1844 increment(dst); 1845 } else { // unordered is greater 1846 movl(dst, 1); 1847 jcc(Assembler::parity, L); 1848 jcc(Assembler::above , L); 1849 movl(dst, 0); 1850 jcc(Assembler::equal , L); 1851 decrementl(dst); 1852 } 1853 bind(L); 1854 } 1855 1856 1857 void MacroAssembler::cmp8(AddressLiteral src1, int imm, Register rscratch) { 1858 assert(rscratch != noreg || always_reachable(src1), "missing"); 1859 1860 if (reachable(src1)) { 1861 cmpb(as_Address(src1), imm); 1862 } else { 1863 lea(rscratch, src1); 1864 cmpb(Address(rscratch, 0), imm); 1865 } 1866 } 1867 1868 void MacroAssembler::cmpptr(Register src1, AddressLiteral src2, Register rscratch) { 1869 #ifdef _LP64 1870 assert(rscratch != noreg || always_reachable(src2), "missing"); 1871 1872 if (src2.is_lval()) { 1873 movptr(rscratch, src2); 1874 Assembler::cmpq(src1, rscratch); 1875 } else if (reachable(src2)) { 1876 cmpq(src1, as_Address(src2)); 1877 } else { 1878 lea(rscratch, src2); 1879 Assembler::cmpq(src1, Address(rscratch, 0)); 1880 } 1881 #else 1882 assert(rscratch == noreg, "not needed"); 1883 if (src2.is_lval()) { 1884 cmp_literal32(src1, (int32_t)src2.target(), src2.rspec()); 1885 } else { 1886 cmpl(src1, as_Address(src2)); 1887 } 1888 #endif // _LP64 1889 } 1890 1891 void MacroAssembler::cmpptr(Address src1, AddressLiteral src2, Register rscratch) { 1892 assert(src2.is_lval(), "not a mem-mem compare"); 1893 #ifdef _LP64 1894 // moves src2's literal address 1895 movptr(rscratch, src2); 1896 Assembler::cmpq(src1, rscratch); 1897 #else 1898 assert(rscratch == noreg, "not needed"); 1899 cmp_literal32(src1, (int32_t)src2.target(), src2.rspec()); 1900 #endif // _LP64 1901 } 1902 1903 void MacroAssembler::cmpoop(Register src1, Register src2) { 1904 cmpptr(src1, src2); 1905 } 1906 1907 void MacroAssembler::cmpoop(Register src1, Address src2) { 1908 cmpptr(src1, src2); 1909 } 1910 1911 #ifdef _LP64 1912 void MacroAssembler::cmpoop(Register src1, jobject src2, Register rscratch) { 1913 movoop(rscratch, src2); 1914 cmpptr(src1, rscratch); 1915 } 1916 #endif 1917 1918 void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch) { 1919 assert(rscratch != noreg || always_reachable(adr), "missing"); 1920 1921 if (reachable(adr)) { 1922 lock(); 1923 cmpxchgptr(reg, as_Address(adr)); 1924 } else { 1925 lea(rscratch, adr); 1926 lock(); 1927 cmpxchgptr(reg, Address(rscratch, 0)); 1928 } 1929 } 1930 1931 void MacroAssembler::cmpxchgptr(Register reg, Address adr) { 1932 LP64_ONLY(cmpxchgq(reg, adr)) NOT_LP64(cmpxchgl(reg, adr)); 1933 } 1934 1935 void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src, Register rscratch) { 1936 assert(rscratch != noreg || always_reachable(src), "missing"); 1937 1938 if (reachable(src)) { 1939 Assembler::comisd(dst, as_Address(src)); 1940 } else { 1941 lea(rscratch, src); 1942 Assembler::comisd(dst, Address(rscratch, 0)); 1943 } 1944 } 1945 1946 void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src, Register rscratch) { 1947 assert(rscratch != noreg || always_reachable(src), "missing"); 1948 1949 if (reachable(src)) { 1950 Assembler::comiss(dst, as_Address(src)); 1951 } else { 1952 lea(rscratch, src); 1953 Assembler::comiss(dst, Address(rscratch, 0)); 1954 } 1955 } 1956 1957 1958 void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr, Register rscratch) { 1959 assert(rscratch != noreg || always_reachable(counter_addr), "missing"); 1960 1961 Condition negated_cond = negate_condition(cond); 1962 Label L; 1963 jcc(negated_cond, L); 1964 pushf(); // Preserve flags 1965 atomic_incl(counter_addr, rscratch); 1966 popf(); 1967 bind(L); 1968 } 1969 1970 int MacroAssembler::corrected_idivl(Register reg) { 1971 // Full implementation of Java idiv and irem; checks for 1972 // special case as described in JVM spec., p.243 & p.271. 1973 // The function returns the (pc) offset of the idivl 1974 // instruction - may be needed for implicit exceptions. 1975 // 1976 // normal case special case 1977 // 1978 // input : rax,: dividend min_int 1979 // reg: divisor (may not be rax,/rdx) -1 1980 // 1981 // output: rax,: quotient (= rax, idiv reg) min_int 1982 // rdx: remainder (= rax, irem reg) 0 1983 assert(reg != rax && reg != rdx, "reg cannot be rax, or rdx register"); 1984 const int min_int = 0x80000000; 1985 Label normal_case, special_case; 1986 1987 // check for special case 1988 cmpl(rax, min_int); 1989 jcc(Assembler::notEqual, normal_case); 1990 xorl(rdx, rdx); // prepare rdx for possible special case (where remainder = 0) 1991 cmpl(reg, -1); 1992 jcc(Assembler::equal, special_case); 1993 1994 // handle normal case 1995 bind(normal_case); 1996 cdql(); 1997 int idivl_offset = offset(); 1998 idivl(reg); 1999 2000 // normal and special case exit 2001 bind(special_case); 2002 2003 return idivl_offset; 2004 } 2005 2006 2007 2008 void MacroAssembler::decrementl(Register reg, int value) { 2009 if (value == min_jint) {subl(reg, value) ; return; } 2010 if (value < 0) { incrementl(reg, -value); return; } 2011 if (value == 0) { ; return; } 2012 if (value == 1 && UseIncDec) { decl(reg) ; return; } 2013 /* else */ { subl(reg, value) ; return; } 2014 } 2015 2016 void MacroAssembler::decrementl(Address dst, int value) { 2017 if (value == min_jint) {subl(dst, value) ; return; } 2018 if (value < 0) { incrementl(dst, -value); return; } 2019 if (value == 0) { ; return; } 2020 if (value == 1 && UseIncDec) { decl(dst) ; return; } 2021 /* else */ { subl(dst, value) ; return; } 2022 } 2023 2024 void MacroAssembler::division_with_shift (Register reg, int shift_value) { 2025 assert(shift_value > 0, "illegal shift value"); 2026 Label _is_positive; 2027 testl (reg, reg); 2028 jcc (Assembler::positive, _is_positive); 2029 int offset = (1 << shift_value) - 1 ; 2030 2031 if (offset == 1) { 2032 incrementl(reg); 2033 } else { 2034 addl(reg, offset); 2035 } 2036 2037 bind (_is_positive); 2038 sarl(reg, shift_value); 2039 } 2040 2041 void MacroAssembler::divsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2042 assert(rscratch != noreg || always_reachable(src), "missing"); 2043 2044 if (reachable(src)) { 2045 Assembler::divsd(dst, as_Address(src)); 2046 } else { 2047 lea(rscratch, src); 2048 Assembler::divsd(dst, Address(rscratch, 0)); 2049 } 2050 } 2051 2052 void MacroAssembler::divss(XMMRegister dst, AddressLiteral src, Register rscratch) { 2053 assert(rscratch != noreg || always_reachable(src), "missing"); 2054 2055 if (reachable(src)) { 2056 Assembler::divss(dst, as_Address(src)); 2057 } else { 2058 lea(rscratch, src); 2059 Assembler::divss(dst, Address(rscratch, 0)); 2060 } 2061 } 2062 2063 void MacroAssembler::enter() { 2064 push(rbp); 2065 mov(rbp, rsp); 2066 } 2067 2068 void MacroAssembler::post_call_nop() { 2069 if (!Continuations::enabled()) { 2070 return; 2071 } 2072 InstructionMark im(this); 2073 relocate(post_call_nop_Relocation::spec()); 2074 InlineSkippedInstructionsCounter skipCounter(this); 2075 emit_int8((uint8_t)0x0f); 2076 emit_int8((uint8_t)0x1f); 2077 emit_int8((uint8_t)0x84); 2078 emit_int8((uint8_t)0x00); 2079 emit_int32(0x00); 2080 } 2081 2082 // A 5 byte nop that is safe for patching (see patch_verified_entry) 2083 void MacroAssembler::fat_nop() { 2084 if (UseAddressNop) { 2085 addr_nop_5(); 2086 } else { 2087 emit_int8((uint8_t)0x26); // es: 2088 emit_int8((uint8_t)0x2e); // cs: 2089 emit_int8((uint8_t)0x64); // fs: 2090 emit_int8((uint8_t)0x65); // gs: 2091 emit_int8((uint8_t)0x90); 2092 } 2093 } 2094 2095 #ifndef _LP64 2096 void MacroAssembler::fcmp(Register tmp) { 2097 fcmp(tmp, 1, true, true); 2098 } 2099 2100 void MacroAssembler::fcmp(Register tmp, int index, bool pop_left, bool pop_right) { 2101 assert(!pop_right || pop_left, "usage error"); 2102 if (VM_Version::supports_cmov()) { 2103 assert(tmp == noreg, "unneeded temp"); 2104 if (pop_left) { 2105 fucomip(index); 2106 } else { 2107 fucomi(index); 2108 } 2109 if (pop_right) { 2110 fpop(); 2111 } 2112 } else { 2113 assert(tmp != noreg, "need temp"); 2114 if (pop_left) { 2115 if (pop_right) { 2116 fcompp(); 2117 } else { 2118 fcomp(index); 2119 } 2120 } else { 2121 fcom(index); 2122 } 2123 // convert FPU condition into eflags condition via rax, 2124 save_rax(tmp); 2125 fwait(); fnstsw_ax(); 2126 sahf(); 2127 restore_rax(tmp); 2128 } 2129 // condition codes set as follows: 2130 // 2131 // CF (corresponds to C0) if x < y 2132 // PF (corresponds to C2) if unordered 2133 // ZF (corresponds to C3) if x = y 2134 } 2135 2136 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less) { 2137 fcmp2int(dst, unordered_is_less, 1, true, true); 2138 } 2139 2140 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right) { 2141 fcmp(VM_Version::supports_cmov() ? noreg : dst, index, pop_left, pop_right); 2142 Label L; 2143 if (unordered_is_less) { 2144 movl(dst, -1); 2145 jcc(Assembler::parity, L); 2146 jcc(Assembler::below , L); 2147 movl(dst, 0); 2148 jcc(Assembler::equal , L); 2149 increment(dst); 2150 } else { // unordered is greater 2151 movl(dst, 1); 2152 jcc(Assembler::parity, L); 2153 jcc(Assembler::above , L); 2154 movl(dst, 0); 2155 jcc(Assembler::equal , L); 2156 decrementl(dst); 2157 } 2158 bind(L); 2159 } 2160 2161 void MacroAssembler::fld_d(AddressLiteral src) { 2162 fld_d(as_Address(src)); 2163 } 2164 2165 void MacroAssembler::fld_s(AddressLiteral src) { 2166 fld_s(as_Address(src)); 2167 } 2168 2169 void MacroAssembler::fldcw(AddressLiteral src) { 2170 fldcw(as_Address(src)); 2171 } 2172 2173 void MacroAssembler::fpop() { 2174 ffree(); 2175 fincstp(); 2176 } 2177 2178 void MacroAssembler::fremr(Register tmp) { 2179 save_rax(tmp); 2180 { Label L; 2181 bind(L); 2182 fprem(); 2183 fwait(); fnstsw_ax(); 2184 sahf(); 2185 jcc(Assembler::parity, L); 2186 } 2187 restore_rax(tmp); 2188 // Result is in ST0. 2189 // Note: fxch & fpop to get rid of ST1 2190 // (otherwise FPU stack could overflow eventually) 2191 fxch(1); 2192 fpop(); 2193 } 2194 2195 void MacroAssembler::empty_FPU_stack() { 2196 if (VM_Version::supports_mmx()) { 2197 emms(); 2198 } else { 2199 for (int i = 8; i-- > 0; ) ffree(i); 2200 } 2201 } 2202 #endif // !LP64 2203 2204 void MacroAssembler::mulpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2205 assert(rscratch != noreg || always_reachable(src), "missing"); 2206 if (reachable(src)) { 2207 Assembler::mulpd(dst, as_Address(src)); 2208 } else { 2209 lea(rscratch, src); 2210 Assembler::mulpd(dst, Address(rscratch, 0)); 2211 } 2212 } 2213 2214 void MacroAssembler::load_float(Address src) { 2215 #ifdef _LP64 2216 movflt(xmm0, src); 2217 #else 2218 if (UseSSE >= 1) { 2219 movflt(xmm0, src); 2220 } else { 2221 fld_s(src); 2222 } 2223 #endif // LP64 2224 } 2225 2226 void MacroAssembler::store_float(Address dst) { 2227 #ifdef _LP64 2228 movflt(dst, xmm0); 2229 #else 2230 if (UseSSE >= 1) { 2231 movflt(dst, xmm0); 2232 } else { 2233 fstp_s(dst); 2234 } 2235 #endif // LP64 2236 } 2237 2238 void MacroAssembler::load_double(Address src) { 2239 #ifdef _LP64 2240 movdbl(xmm0, src); 2241 #else 2242 if (UseSSE >= 2) { 2243 movdbl(xmm0, src); 2244 } else { 2245 fld_d(src); 2246 } 2247 #endif // LP64 2248 } 2249 2250 void MacroAssembler::store_double(Address dst) { 2251 #ifdef _LP64 2252 movdbl(dst, xmm0); 2253 #else 2254 if (UseSSE >= 2) { 2255 movdbl(dst, xmm0); 2256 } else { 2257 fstp_d(dst); 2258 } 2259 #endif // LP64 2260 } 2261 2262 // dst = c = a * b + c 2263 void MacroAssembler::fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c) { 2264 Assembler::vfmadd231sd(c, a, b); 2265 if (dst != c) { 2266 movdbl(dst, c); 2267 } 2268 } 2269 2270 // dst = c = a * b + c 2271 void MacroAssembler::fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c) { 2272 Assembler::vfmadd231ss(c, a, b); 2273 if (dst != c) { 2274 movflt(dst, c); 2275 } 2276 } 2277 2278 // dst = c = a * b + c 2279 void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) { 2280 Assembler::vfmadd231pd(c, a, b, vector_len); 2281 if (dst != c) { 2282 vmovdqu(dst, c); 2283 } 2284 } 2285 2286 // dst = c = a * b + c 2287 void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) { 2288 Assembler::vfmadd231ps(c, a, b, vector_len); 2289 if (dst != c) { 2290 vmovdqu(dst, c); 2291 } 2292 } 2293 2294 // dst = c = a * b + c 2295 void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) { 2296 Assembler::vfmadd231pd(c, a, b, vector_len); 2297 if (dst != c) { 2298 vmovdqu(dst, c); 2299 } 2300 } 2301 2302 // dst = c = a * b + c 2303 void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) { 2304 Assembler::vfmadd231ps(c, a, b, vector_len); 2305 if (dst != c) { 2306 vmovdqu(dst, c); 2307 } 2308 } 2309 2310 void MacroAssembler::incrementl(AddressLiteral dst, Register rscratch) { 2311 assert(rscratch != noreg || always_reachable(dst), "missing"); 2312 2313 if (reachable(dst)) { 2314 incrementl(as_Address(dst)); 2315 } else { 2316 lea(rscratch, dst); 2317 incrementl(Address(rscratch, 0)); 2318 } 2319 } 2320 2321 void MacroAssembler::incrementl(ArrayAddress dst, Register rscratch) { 2322 incrementl(as_Address(dst, rscratch)); 2323 } 2324 2325 void MacroAssembler::incrementl(Register reg, int value) { 2326 if (value == min_jint) {addl(reg, value) ; return; } 2327 if (value < 0) { decrementl(reg, -value); return; } 2328 if (value == 0) { ; return; } 2329 if (value == 1 && UseIncDec) { incl(reg) ; return; } 2330 /* else */ { addl(reg, value) ; return; } 2331 } 2332 2333 void MacroAssembler::incrementl(Address dst, int value) { 2334 if (value == min_jint) {addl(dst, value) ; return; } 2335 if (value < 0) { decrementl(dst, -value); return; } 2336 if (value == 0) { ; return; } 2337 if (value == 1 && UseIncDec) { incl(dst) ; return; } 2338 /* else */ { addl(dst, value) ; return; } 2339 } 2340 2341 void MacroAssembler::jump(AddressLiteral dst, Register rscratch) { 2342 assert(rscratch != noreg || always_reachable(dst), "missing"); 2343 assert(!dst.rspec().reloc()->is_data(), "should not use ExternalAddress for jump"); 2344 if (reachable(dst)) { 2345 jmp_literal(dst.target(), dst.rspec()); 2346 } else { 2347 lea(rscratch, dst); 2348 jmp(rscratch); 2349 } 2350 } 2351 2352 void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst, Register rscratch) { 2353 assert(rscratch != noreg || always_reachable(dst), "missing"); 2354 assert(!dst.rspec().reloc()->is_data(), "should not use ExternalAddress for jump_cc"); 2355 if (reachable(dst)) { 2356 InstructionMark im(this); 2357 relocate(dst.reloc()); 2358 const int short_size = 2; 2359 const int long_size = 6; 2360 int offs = (intptr_t)dst.target() - ((intptr_t)pc()); 2361 if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) { 2362 // 0111 tttn #8-bit disp 2363 emit_int8(0x70 | cc); 2364 emit_int8((offs - short_size) & 0xFF); 2365 } else { 2366 // 0000 1111 1000 tttn #32-bit disp 2367 emit_int8(0x0F); 2368 emit_int8((unsigned char)(0x80 | cc)); 2369 emit_int32(offs - long_size); 2370 } 2371 } else { 2372 #ifdef ASSERT 2373 warning("reversing conditional branch"); 2374 #endif /* ASSERT */ 2375 Label skip; 2376 jccb(reverse[cc], skip); 2377 lea(rscratch, dst); 2378 Assembler::jmp(rscratch); 2379 bind(skip); 2380 } 2381 } 2382 2383 void MacroAssembler::ldmxcsr(AddressLiteral src, Register rscratch) { 2384 assert(rscratch != noreg || always_reachable(src), "missing"); 2385 2386 if (reachable(src)) { 2387 Assembler::ldmxcsr(as_Address(src)); 2388 } else { 2389 lea(rscratch, src); 2390 Assembler::ldmxcsr(Address(rscratch, 0)); 2391 } 2392 } 2393 2394 int MacroAssembler::load_signed_byte(Register dst, Address src) { 2395 int off; 2396 if (LP64_ONLY(true ||) VM_Version::is_P6()) { 2397 off = offset(); 2398 movsbl(dst, src); // movsxb 2399 } else { 2400 off = load_unsigned_byte(dst, src); 2401 shll(dst, 24); 2402 sarl(dst, 24); 2403 } 2404 return off; 2405 } 2406 2407 // Note: load_signed_short used to be called load_signed_word. 2408 // Although the 'w' in x86 opcodes refers to the term "word" in the assembler 2409 // manual, which means 16 bits, that usage is found nowhere in HotSpot code. 2410 // The term "word" in HotSpot means a 32- or 64-bit machine word. 2411 int MacroAssembler::load_signed_short(Register dst, Address src) { 2412 int off; 2413 if (LP64_ONLY(true ||) VM_Version::is_P6()) { 2414 // This is dubious to me since it seems safe to do a signed 16 => 64 bit 2415 // version but this is what 64bit has always done. This seems to imply 2416 // that users are only using 32bits worth. 2417 off = offset(); 2418 movswl(dst, src); // movsxw 2419 } else { 2420 off = load_unsigned_short(dst, src); 2421 shll(dst, 16); 2422 sarl(dst, 16); 2423 } 2424 return off; 2425 } 2426 2427 int MacroAssembler::load_unsigned_byte(Register dst, Address src) { 2428 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16, 2429 // and "3.9 Partial Register Penalties", p. 22). 2430 int off; 2431 if (LP64_ONLY(true || ) VM_Version::is_P6() || src.uses(dst)) { 2432 off = offset(); 2433 movzbl(dst, src); // movzxb 2434 } else { 2435 xorl(dst, dst); 2436 off = offset(); 2437 movb(dst, src); 2438 } 2439 return off; 2440 } 2441 2442 // Note: load_unsigned_short used to be called load_unsigned_word. 2443 int MacroAssembler::load_unsigned_short(Register dst, Address src) { 2444 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16, 2445 // and "3.9 Partial Register Penalties", p. 22). 2446 int off; 2447 if (LP64_ONLY(true ||) VM_Version::is_P6() || src.uses(dst)) { 2448 off = offset(); 2449 movzwl(dst, src); // movzxw 2450 } else { 2451 xorl(dst, dst); 2452 off = offset(); 2453 movw(dst, src); 2454 } 2455 return off; 2456 } 2457 2458 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) { 2459 switch (size_in_bytes) { 2460 #ifndef _LP64 2461 case 8: 2462 assert(dst2 != noreg, "second dest register required"); 2463 movl(dst, src); 2464 movl(dst2, src.plus_disp(BytesPerInt)); 2465 break; 2466 #else 2467 case 8: movq(dst, src); break; 2468 #endif 2469 case 4: movl(dst, src); break; 2470 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break; 2471 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break; 2472 default: ShouldNotReachHere(); 2473 } 2474 } 2475 2476 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) { 2477 switch (size_in_bytes) { 2478 #ifndef _LP64 2479 case 8: 2480 assert(src2 != noreg, "second source register required"); 2481 movl(dst, src); 2482 movl(dst.plus_disp(BytesPerInt), src2); 2483 break; 2484 #else 2485 case 8: movq(dst, src); break; 2486 #endif 2487 case 4: movl(dst, src); break; 2488 case 2: movw(dst, src); break; 2489 case 1: movb(dst, src); break; 2490 default: ShouldNotReachHere(); 2491 } 2492 } 2493 2494 void MacroAssembler::mov32(AddressLiteral dst, Register src, Register rscratch) { 2495 assert(rscratch != noreg || always_reachable(dst), "missing"); 2496 2497 if (reachable(dst)) { 2498 movl(as_Address(dst), src); 2499 } else { 2500 lea(rscratch, dst); 2501 movl(Address(rscratch, 0), src); 2502 } 2503 } 2504 2505 void MacroAssembler::mov32(Register dst, AddressLiteral src) { 2506 if (reachable(src)) { 2507 movl(dst, as_Address(src)); 2508 } else { 2509 lea(dst, src); 2510 movl(dst, Address(dst, 0)); 2511 } 2512 } 2513 2514 // C++ bool manipulation 2515 2516 void MacroAssembler::movbool(Register dst, Address src) { 2517 if(sizeof(bool) == 1) 2518 movb(dst, src); 2519 else if(sizeof(bool) == 2) 2520 movw(dst, src); 2521 else if(sizeof(bool) == 4) 2522 movl(dst, src); 2523 else 2524 // unsupported 2525 ShouldNotReachHere(); 2526 } 2527 2528 void MacroAssembler::movbool(Address dst, bool boolconst) { 2529 if(sizeof(bool) == 1) 2530 movb(dst, (int) boolconst); 2531 else if(sizeof(bool) == 2) 2532 movw(dst, (int) boolconst); 2533 else if(sizeof(bool) == 4) 2534 movl(dst, (int) boolconst); 2535 else 2536 // unsupported 2537 ShouldNotReachHere(); 2538 } 2539 2540 void MacroAssembler::movbool(Address dst, Register src) { 2541 if(sizeof(bool) == 1) 2542 movb(dst, src); 2543 else if(sizeof(bool) == 2) 2544 movw(dst, src); 2545 else if(sizeof(bool) == 4) 2546 movl(dst, src); 2547 else 2548 // unsupported 2549 ShouldNotReachHere(); 2550 } 2551 2552 void MacroAssembler::movdl(XMMRegister dst, AddressLiteral src, Register rscratch) { 2553 assert(rscratch != noreg || always_reachable(src), "missing"); 2554 2555 if (reachable(src)) { 2556 movdl(dst, as_Address(src)); 2557 } else { 2558 lea(rscratch, src); 2559 movdl(dst, Address(rscratch, 0)); 2560 } 2561 } 2562 2563 void MacroAssembler::movq(XMMRegister dst, AddressLiteral src, Register rscratch) { 2564 assert(rscratch != noreg || always_reachable(src), "missing"); 2565 2566 if (reachable(src)) { 2567 movq(dst, as_Address(src)); 2568 } else { 2569 lea(rscratch, src); 2570 movq(dst, Address(rscratch, 0)); 2571 } 2572 } 2573 2574 void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src, Register rscratch) { 2575 assert(rscratch != noreg || always_reachable(src), "missing"); 2576 2577 if (reachable(src)) { 2578 if (UseXmmLoadAndClearUpper) { 2579 movsd (dst, as_Address(src)); 2580 } else { 2581 movlpd(dst, as_Address(src)); 2582 } 2583 } else { 2584 lea(rscratch, src); 2585 if (UseXmmLoadAndClearUpper) { 2586 movsd (dst, Address(rscratch, 0)); 2587 } else { 2588 movlpd(dst, Address(rscratch, 0)); 2589 } 2590 } 2591 } 2592 2593 void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src, Register rscratch) { 2594 assert(rscratch != noreg || always_reachable(src), "missing"); 2595 2596 if (reachable(src)) { 2597 movss(dst, as_Address(src)); 2598 } else { 2599 lea(rscratch, src); 2600 movss(dst, Address(rscratch, 0)); 2601 } 2602 } 2603 2604 void MacroAssembler::movptr(Register dst, Register src) { 2605 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); 2606 } 2607 2608 void MacroAssembler::movptr(Register dst, Address src) { 2609 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); 2610 } 2611 2612 // src should NEVER be a real pointer. Use AddressLiteral for true pointers 2613 void MacroAssembler::movptr(Register dst, intptr_t src) { 2614 #ifdef _LP64 2615 if (is_uimm32(src)) { 2616 movl(dst, checked_cast<uint32_t>(src)); 2617 } else if (is_simm32(src)) { 2618 movq(dst, checked_cast<int32_t>(src)); 2619 } else { 2620 mov64(dst, src); 2621 } 2622 #else 2623 movl(dst, src); 2624 #endif 2625 } 2626 2627 void MacroAssembler::movptr(Address dst, Register src) { 2628 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); 2629 } 2630 2631 void MacroAssembler::movptr(Address dst, int32_t src) { 2632 LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); 2633 } 2634 2635 void MacroAssembler::movdqu(Address dst, XMMRegister src) { 2636 assert(((src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2637 Assembler::movdqu(dst, src); 2638 } 2639 2640 void MacroAssembler::movdqu(XMMRegister dst, Address src) { 2641 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2642 Assembler::movdqu(dst, src); 2643 } 2644 2645 void MacroAssembler::movdqu(XMMRegister dst, XMMRegister src) { 2646 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2647 Assembler::movdqu(dst, src); 2648 } 2649 2650 void MacroAssembler::movdqu(XMMRegister dst, AddressLiteral src, Register rscratch) { 2651 assert(rscratch != noreg || always_reachable(src), "missing"); 2652 2653 if (reachable(src)) { 2654 movdqu(dst, as_Address(src)); 2655 } else { 2656 lea(rscratch, src); 2657 movdqu(dst, Address(rscratch, 0)); 2658 } 2659 } 2660 2661 void MacroAssembler::vmovdqu(Address dst, XMMRegister src) { 2662 assert(((src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2663 Assembler::vmovdqu(dst, src); 2664 } 2665 2666 void MacroAssembler::vmovdqu(XMMRegister dst, Address src) { 2667 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2668 Assembler::vmovdqu(dst, src); 2669 } 2670 2671 void MacroAssembler::vmovdqu(XMMRegister dst, XMMRegister src) { 2672 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2673 Assembler::vmovdqu(dst, src); 2674 } 2675 2676 void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src, Register rscratch) { 2677 assert(rscratch != noreg || always_reachable(src), "missing"); 2678 2679 if (reachable(src)) { 2680 vmovdqu(dst, as_Address(src)); 2681 } 2682 else { 2683 lea(rscratch, src); 2684 vmovdqu(dst, Address(rscratch, 0)); 2685 } 2686 } 2687 2688 void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2689 assert(rscratch != noreg || always_reachable(src), "missing"); 2690 2691 if (vector_len == AVX_512bit) { 2692 evmovdquq(dst, src, AVX_512bit, rscratch); 2693 } else if (vector_len == AVX_256bit) { 2694 vmovdqu(dst, src, rscratch); 2695 } else { 2696 movdqu(dst, src, rscratch); 2697 } 2698 } 2699 2700 void MacroAssembler::kmov(KRegister dst, Address src) { 2701 if (VM_Version::supports_avx512bw()) { 2702 kmovql(dst, src); 2703 } else { 2704 assert(VM_Version::supports_evex(), ""); 2705 kmovwl(dst, src); 2706 } 2707 } 2708 2709 void MacroAssembler::kmov(Address dst, KRegister src) { 2710 if (VM_Version::supports_avx512bw()) { 2711 kmovql(dst, src); 2712 } else { 2713 assert(VM_Version::supports_evex(), ""); 2714 kmovwl(dst, src); 2715 } 2716 } 2717 2718 void MacroAssembler::kmov(KRegister dst, KRegister src) { 2719 if (VM_Version::supports_avx512bw()) { 2720 kmovql(dst, src); 2721 } else { 2722 assert(VM_Version::supports_evex(), ""); 2723 kmovwl(dst, src); 2724 } 2725 } 2726 2727 void MacroAssembler::kmov(Register dst, KRegister src) { 2728 if (VM_Version::supports_avx512bw()) { 2729 kmovql(dst, src); 2730 } else { 2731 assert(VM_Version::supports_evex(), ""); 2732 kmovwl(dst, src); 2733 } 2734 } 2735 2736 void MacroAssembler::kmov(KRegister dst, Register src) { 2737 if (VM_Version::supports_avx512bw()) { 2738 kmovql(dst, src); 2739 } else { 2740 assert(VM_Version::supports_evex(), ""); 2741 kmovwl(dst, src); 2742 } 2743 } 2744 2745 void MacroAssembler::kmovql(KRegister dst, AddressLiteral src, Register rscratch) { 2746 assert(rscratch != noreg || always_reachable(src), "missing"); 2747 2748 if (reachable(src)) { 2749 kmovql(dst, as_Address(src)); 2750 } else { 2751 lea(rscratch, src); 2752 kmovql(dst, Address(rscratch, 0)); 2753 } 2754 } 2755 2756 void MacroAssembler::kmovwl(KRegister dst, AddressLiteral src, Register rscratch) { 2757 assert(rscratch != noreg || always_reachable(src), "missing"); 2758 2759 if (reachable(src)) { 2760 kmovwl(dst, as_Address(src)); 2761 } else { 2762 lea(rscratch, src); 2763 kmovwl(dst, Address(rscratch, 0)); 2764 } 2765 } 2766 2767 void MacroAssembler::evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, 2768 int vector_len, Register rscratch) { 2769 assert(rscratch != noreg || always_reachable(src), "missing"); 2770 2771 if (reachable(src)) { 2772 Assembler::evmovdqub(dst, mask, as_Address(src), merge, vector_len); 2773 } else { 2774 lea(rscratch, src); 2775 Assembler::evmovdqub(dst, mask, Address(rscratch, 0), merge, vector_len); 2776 } 2777 } 2778 2779 void MacroAssembler::evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, 2780 int vector_len, Register rscratch) { 2781 assert(rscratch != noreg || always_reachable(src), "missing"); 2782 2783 if (reachable(src)) { 2784 Assembler::evmovdquw(dst, mask, as_Address(src), merge, vector_len); 2785 } else { 2786 lea(rscratch, src); 2787 Assembler::evmovdquw(dst, mask, Address(rscratch, 0), merge, vector_len); 2788 } 2789 } 2790 2791 void MacroAssembler::evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) { 2792 assert(rscratch != noreg || always_reachable(src), "missing"); 2793 2794 if (reachable(src)) { 2795 Assembler::evmovdqul(dst, mask, as_Address(src), merge, vector_len); 2796 } else { 2797 lea(rscratch, src); 2798 Assembler::evmovdqul(dst, mask, Address(rscratch, 0), merge, vector_len); 2799 } 2800 } 2801 2802 void MacroAssembler::evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) { 2803 assert(rscratch != noreg || always_reachable(src), "missing"); 2804 2805 if (reachable(src)) { 2806 Assembler::evmovdquq(dst, mask, as_Address(src), merge, vector_len); 2807 } else { 2808 lea(rscratch, src); 2809 Assembler::evmovdquq(dst, mask, Address(rscratch, 0), merge, vector_len); 2810 } 2811 } 2812 2813 void MacroAssembler::evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2814 assert(rscratch != noreg || always_reachable(src), "missing"); 2815 2816 if (reachable(src)) { 2817 Assembler::evmovdquq(dst, as_Address(src), vector_len); 2818 } else { 2819 lea(rscratch, src); 2820 Assembler::evmovdquq(dst, Address(rscratch, 0), vector_len); 2821 } 2822 } 2823 2824 void MacroAssembler::movdqa(XMMRegister dst, AddressLiteral src, Register rscratch) { 2825 assert(rscratch != noreg || always_reachable(src), "missing"); 2826 2827 if (reachable(src)) { 2828 Assembler::movdqa(dst, as_Address(src)); 2829 } else { 2830 lea(rscratch, src); 2831 Assembler::movdqa(dst, Address(rscratch, 0)); 2832 } 2833 } 2834 2835 void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2836 assert(rscratch != noreg || always_reachable(src), "missing"); 2837 2838 if (reachable(src)) { 2839 Assembler::movsd(dst, as_Address(src)); 2840 } else { 2841 lea(rscratch, src); 2842 Assembler::movsd(dst, Address(rscratch, 0)); 2843 } 2844 } 2845 2846 void MacroAssembler::movss(XMMRegister dst, AddressLiteral src, Register rscratch) { 2847 assert(rscratch != noreg || always_reachable(src), "missing"); 2848 2849 if (reachable(src)) { 2850 Assembler::movss(dst, as_Address(src)); 2851 } else { 2852 lea(rscratch, src); 2853 Assembler::movss(dst, Address(rscratch, 0)); 2854 } 2855 } 2856 2857 void MacroAssembler::movddup(XMMRegister dst, AddressLiteral src, Register rscratch) { 2858 assert(rscratch != noreg || always_reachable(src), "missing"); 2859 2860 if (reachable(src)) { 2861 Assembler::movddup(dst, as_Address(src)); 2862 } else { 2863 lea(rscratch, src); 2864 Assembler::movddup(dst, Address(rscratch, 0)); 2865 } 2866 } 2867 2868 void MacroAssembler::vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2869 assert(rscratch != noreg || always_reachable(src), "missing"); 2870 2871 if (reachable(src)) { 2872 Assembler::vmovddup(dst, as_Address(src), vector_len); 2873 } else { 2874 lea(rscratch, src); 2875 Assembler::vmovddup(dst, Address(rscratch, 0), vector_len); 2876 } 2877 } 2878 2879 void MacroAssembler::mulsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2880 assert(rscratch != noreg || always_reachable(src), "missing"); 2881 2882 if (reachable(src)) { 2883 Assembler::mulsd(dst, as_Address(src)); 2884 } else { 2885 lea(rscratch, src); 2886 Assembler::mulsd(dst, Address(rscratch, 0)); 2887 } 2888 } 2889 2890 void MacroAssembler::mulss(XMMRegister dst, AddressLiteral src, Register rscratch) { 2891 assert(rscratch != noreg || always_reachable(src), "missing"); 2892 2893 if (reachable(src)) { 2894 Assembler::mulss(dst, as_Address(src)); 2895 } else { 2896 lea(rscratch, src); 2897 Assembler::mulss(dst, Address(rscratch, 0)); 2898 } 2899 } 2900 2901 void MacroAssembler::null_check(Register reg, int offset) { 2902 if (needs_explicit_null_check(offset)) { 2903 // provoke OS null exception if reg is null by 2904 // accessing M[reg] w/o changing any (non-CC) registers 2905 // NOTE: cmpl is plenty here to provoke a segv 2906 cmpptr(rax, Address(reg, 0)); 2907 // Note: should probably use testl(rax, Address(reg, 0)); 2908 // may be shorter code (however, this version of 2909 // testl needs to be implemented first) 2910 } else { 2911 // nothing to do, (later) access of M[reg + offset] 2912 // will provoke OS null exception if reg is null 2913 } 2914 } 2915 2916 void MacroAssembler::test_markword_is_inline_type(Register markword, Label& is_inline_type) { 2917 andptr(markword, markWord::inline_type_mask_in_place); 2918 cmpptr(markword, markWord::inline_type_pattern); 2919 jcc(Assembler::equal, is_inline_type); 2920 } 2921 2922 void MacroAssembler::test_klass_is_inline_type(Register klass, Register temp_reg, Label& is_inline_type) { 2923 movl(temp_reg, Address(klass, Klass::access_flags_offset())); 2924 testl(temp_reg, JVM_ACC_IDENTITY); 2925 jcc(Assembler::zero, is_inline_type); 2926 } 2927 2928 void MacroAssembler::test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type) { 2929 testptr(object, object); 2930 jcc(Assembler::zero, not_inline_type); 2931 const int is_inline_type_mask = markWord::inline_type_pattern; 2932 movptr(tmp, Address(object, oopDesc::mark_offset_in_bytes())); 2933 andptr(tmp, is_inline_type_mask); 2934 cmpptr(tmp, is_inline_type_mask); 2935 jcc(Assembler::notEqual, not_inline_type); 2936 } 2937 2938 void MacroAssembler::test_klass_is_empty_inline_type(Register klass, Register temp_reg, Label& is_empty_inline_type) { 2939 #ifdef ASSERT 2940 { 2941 Label done_check; 2942 test_klass_is_inline_type(klass, temp_reg, done_check); 2943 stop("test_klass_is_empty_inline_type with non inline type klass"); 2944 bind(done_check); 2945 } 2946 #endif 2947 movl(temp_reg, Address(klass, InstanceKlass::misc_flags_offset())); 2948 testl(temp_reg, InstanceKlassFlags::is_empty_inline_type_value()); 2949 jcc(Assembler::notZero, is_empty_inline_type); 2950 } 2951 2952 void MacroAssembler::test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free_inline_type) { 2953 movl(temp_reg, flags); 2954 testl(temp_reg, 1 << ResolvedFieldEntry::is_null_free_inline_type_shift); 2955 jcc(Assembler::notEqual, is_null_free_inline_type); 2956 } 2957 2958 void MacroAssembler::test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free_inline_type) { 2959 movl(temp_reg, flags); 2960 testl(temp_reg, 1 << ResolvedFieldEntry::is_null_free_inline_type_shift); 2961 jcc(Assembler::equal, not_null_free_inline_type); 2962 } 2963 2964 void MacroAssembler::test_field_is_flat(Register flags, Register temp_reg, Label& is_flat) { 2965 movl(temp_reg, flags); 2966 testl(temp_reg, 1 << ResolvedFieldEntry::is_flat_shift); 2967 jcc(Assembler::notEqual, is_flat); 2968 } 2969 2970 void MacroAssembler::test_field_has_null_marker(Register flags, Register temp_reg, Label& has_null_marker) { 2971 movl(temp_reg, flags); 2972 testl(temp_reg, 1 << ResolvedFieldEntry::has_null_marker_shift); 2973 jcc(Assembler::notEqual, has_null_marker); 2974 } 2975 2976 void MacroAssembler::test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label) { 2977 Label test_mark_word; 2978 // load mark word 2979 movptr(temp_reg, Address(oop, oopDesc::mark_offset_in_bytes())); 2980 // check displaced 2981 testl(temp_reg, markWord::unlocked_value); 2982 jccb(Assembler::notZero, test_mark_word); 2983 // slow path use klass prototype 2984 push(rscratch1); 2985 load_prototype_header(temp_reg, oop, rscratch1); 2986 pop(rscratch1); 2987 2988 bind(test_mark_word); 2989 testl(temp_reg, test_bit); 2990 jcc((jmp_set) ? Assembler::notZero : Assembler::zero, jmp_label); 2991 } 2992 2993 void MacroAssembler::test_flat_array_oop(Register oop, Register temp_reg, 2994 Label& is_flat_array) { 2995 #ifdef _LP64 2996 test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, true, is_flat_array); 2997 #else 2998 load_klass(temp_reg, oop, noreg); 2999 movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset())); 3000 test_flat_array_layout(temp_reg, is_flat_array); 3001 #endif 3002 } 3003 3004 void MacroAssembler::test_non_flat_array_oop(Register oop, Register temp_reg, 3005 Label& is_non_flat_array) { 3006 #ifdef _LP64 3007 test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, false, is_non_flat_array); 3008 #else 3009 load_klass(temp_reg, oop, noreg); 3010 movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset())); 3011 test_non_flat_array_layout(temp_reg, is_non_flat_array); 3012 #endif 3013 } 3014 3015 void MacroAssembler::test_null_free_array_oop(Register oop, Register temp_reg, Label&is_null_free_array) { 3016 #ifdef _LP64 3017 test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, true, is_null_free_array); 3018 #else 3019 Unimplemented(); 3020 #endif 3021 } 3022 3023 void MacroAssembler::test_non_null_free_array_oop(Register oop, Register temp_reg, Label&is_non_null_free_array) { 3024 #ifdef _LP64 3025 test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, false, is_non_null_free_array); 3026 #else 3027 Unimplemented(); 3028 #endif 3029 } 3030 3031 void MacroAssembler::test_flat_array_layout(Register lh, Label& is_flat_array) { 3032 testl(lh, Klass::_lh_array_tag_flat_value_bit_inplace); 3033 jcc(Assembler::notZero, is_flat_array); 3034 } 3035 3036 void MacroAssembler::test_non_flat_array_layout(Register lh, Label& is_non_flat_array) { 3037 testl(lh, Klass::_lh_array_tag_flat_value_bit_inplace); 3038 jcc(Assembler::zero, is_non_flat_array); 3039 } 3040 3041 void MacroAssembler::os_breakpoint() { 3042 // instead of directly emitting a breakpoint, call os:breakpoint for better debugability 3043 // (e.g., MSVC can't call ps() otherwise) 3044 call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint))); 3045 } 3046 3047 void MacroAssembler::unimplemented(const char* what) { 3048 const char* buf = nullptr; 3049 { 3050 ResourceMark rm; 3051 stringStream ss; 3052 ss.print("unimplemented: %s", what); 3053 buf = code_string(ss.as_string()); 3054 } 3055 stop(buf); 3056 } 3057 3058 #ifdef _LP64 3059 #define XSTATE_BV 0x200 3060 #endif 3061 3062 void MacroAssembler::pop_CPU_state() { 3063 pop_FPU_state(); 3064 pop_IU_state(); 3065 } 3066 3067 void MacroAssembler::pop_FPU_state() { 3068 #ifndef _LP64 3069 frstor(Address(rsp, 0)); 3070 #else 3071 fxrstor(Address(rsp, 0)); 3072 #endif 3073 addptr(rsp, FPUStateSizeInWords * wordSize); 3074 } 3075 3076 void MacroAssembler::pop_IU_state() { 3077 popa(); 3078 LP64_ONLY(addq(rsp, 8)); 3079 popf(); 3080 } 3081 3082 // Save Integer and Float state 3083 // Warning: Stack must be 16 byte aligned (64bit) 3084 void MacroAssembler::push_CPU_state() { 3085 push_IU_state(); 3086 push_FPU_state(); 3087 } 3088 3089 void MacroAssembler::push_FPU_state() { 3090 subptr(rsp, FPUStateSizeInWords * wordSize); 3091 #ifndef _LP64 3092 fnsave(Address(rsp, 0)); 3093 fwait(); 3094 #else 3095 fxsave(Address(rsp, 0)); 3096 #endif // LP64 3097 } 3098 3099 void MacroAssembler::push_IU_state() { 3100 // Push flags first because pusha kills them 3101 pushf(); 3102 // Make sure rsp stays 16-byte aligned 3103 LP64_ONLY(subq(rsp, 8)); 3104 pusha(); 3105 } 3106 3107 void MacroAssembler::push_cont_fastpath() { 3108 if (!Continuations::enabled()) return; 3109 3110 #ifndef _LP64 3111 Register rthread = rax; 3112 Register rrealsp = rbx; 3113 push(rthread); 3114 push(rrealsp); 3115 3116 get_thread(rthread); 3117 3118 // The code below wants the original RSP. 3119 // Move it back after the pushes above. 3120 movptr(rrealsp, rsp); 3121 addptr(rrealsp, 2*wordSize); 3122 #else 3123 Register rthread = r15_thread; 3124 Register rrealsp = rsp; 3125 #endif 3126 3127 Label done; 3128 cmpptr(rrealsp, Address(rthread, JavaThread::cont_fastpath_offset())); 3129 jccb(Assembler::belowEqual, done); 3130 movptr(Address(rthread, JavaThread::cont_fastpath_offset()), rrealsp); 3131 bind(done); 3132 3133 #ifndef _LP64 3134 pop(rrealsp); 3135 pop(rthread); 3136 #endif 3137 } 3138 3139 void MacroAssembler::pop_cont_fastpath() { 3140 if (!Continuations::enabled()) return; 3141 3142 #ifndef _LP64 3143 Register rthread = rax; 3144 Register rrealsp = rbx; 3145 push(rthread); 3146 push(rrealsp); 3147 3148 get_thread(rthread); 3149 3150 // The code below wants the original RSP. 3151 // Move it back after the pushes above. 3152 movptr(rrealsp, rsp); 3153 addptr(rrealsp, 2*wordSize); 3154 #else 3155 Register rthread = r15_thread; 3156 Register rrealsp = rsp; 3157 #endif 3158 3159 Label done; 3160 cmpptr(rrealsp, Address(rthread, JavaThread::cont_fastpath_offset())); 3161 jccb(Assembler::below, done); 3162 movptr(Address(rthread, JavaThread::cont_fastpath_offset()), 0); 3163 bind(done); 3164 3165 #ifndef _LP64 3166 pop(rrealsp); 3167 pop(rthread); 3168 #endif 3169 } 3170 3171 void MacroAssembler::inc_held_monitor_count() { 3172 #ifndef _LP64 3173 Register thread = rax; 3174 push(thread); 3175 get_thread(thread); 3176 incrementl(Address(thread, JavaThread::held_monitor_count_offset())); 3177 pop(thread); 3178 #else // LP64 3179 incrementq(Address(r15_thread, JavaThread::held_monitor_count_offset())); 3180 #endif 3181 } 3182 3183 void MacroAssembler::dec_held_monitor_count() { 3184 #ifndef _LP64 3185 Register thread = rax; 3186 push(thread); 3187 get_thread(thread); 3188 decrementl(Address(thread, JavaThread::held_monitor_count_offset())); 3189 pop(thread); 3190 #else // LP64 3191 decrementq(Address(r15_thread, JavaThread::held_monitor_count_offset())); 3192 #endif 3193 } 3194 3195 #ifdef ASSERT 3196 void MacroAssembler::stop_if_in_cont(Register cont, const char* name) { 3197 #ifdef _LP64 3198 Label no_cont; 3199 movptr(cont, Address(r15_thread, JavaThread::cont_entry_offset())); 3200 testl(cont, cont); 3201 jcc(Assembler::zero, no_cont); 3202 stop(name); 3203 bind(no_cont); 3204 #else 3205 Unimplemented(); 3206 #endif 3207 } 3208 #endif 3209 3210 void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp) { // determine java_thread register 3211 if (!java_thread->is_valid()) { 3212 java_thread = rdi; 3213 get_thread(java_thread); 3214 } 3215 // we must set sp to zero to clear frame 3216 movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), NULL_WORD); 3217 // must clear fp, so that compiled frames are not confused; it is 3218 // possible that we need it only for debugging 3219 if (clear_fp) { 3220 movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), NULL_WORD); 3221 } 3222 // Always clear the pc because it could have been set by make_walkable() 3223 movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), NULL_WORD); 3224 vzeroupper(); 3225 } 3226 3227 void MacroAssembler::restore_rax(Register tmp) { 3228 if (tmp == noreg) pop(rax); 3229 else if (tmp != rax) mov(rax, tmp); 3230 } 3231 3232 void MacroAssembler::round_to(Register reg, int modulus) { 3233 addptr(reg, modulus - 1); 3234 andptr(reg, -modulus); 3235 } 3236 3237 void MacroAssembler::save_rax(Register tmp) { 3238 if (tmp == noreg) push(rax); 3239 else if (tmp != rax) mov(tmp, rax); 3240 } 3241 3242 void MacroAssembler::safepoint_poll(Label& slow_path, Register thread_reg, bool at_return, bool in_nmethod) { 3243 if (at_return) { 3244 // Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore, 3245 // we may safely use rsp instead to perform the stack watermark check. 3246 cmpptr(in_nmethod ? rsp : rbp, Address(thread_reg, JavaThread::polling_word_offset())); 3247 jcc(Assembler::above, slow_path); 3248 return; 3249 } 3250 testb(Address(thread_reg, JavaThread::polling_word_offset()), SafepointMechanism::poll_bit()); 3251 jcc(Assembler::notZero, slow_path); // handshake bit set implies poll 3252 } 3253 3254 // Calls to C land 3255 // 3256 // When entering C land, the rbp, & rsp of the last Java frame have to be recorded 3257 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp 3258 // has to be reset to 0. This is required to allow proper stack traversal. 3259 void MacroAssembler::set_last_Java_frame(Register java_thread, 3260 Register last_java_sp, 3261 Register last_java_fp, 3262 address last_java_pc, 3263 Register rscratch) { 3264 vzeroupper(); 3265 // determine java_thread register 3266 if (!java_thread->is_valid()) { 3267 java_thread = rdi; 3268 get_thread(java_thread); 3269 } 3270 // determine last_java_sp register 3271 if (!last_java_sp->is_valid()) { 3272 last_java_sp = rsp; 3273 } 3274 // last_java_fp is optional 3275 if (last_java_fp->is_valid()) { 3276 movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), last_java_fp); 3277 } 3278 // last_java_pc is optional 3279 if (last_java_pc != nullptr) { 3280 Address java_pc(java_thread, 3281 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()); 3282 lea(java_pc, InternalAddress(last_java_pc), rscratch); 3283 } 3284 movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), last_java_sp); 3285 } 3286 3287 void MacroAssembler::shlptr(Register dst, int imm8) { 3288 LP64_ONLY(shlq(dst, imm8)) NOT_LP64(shll(dst, imm8)); 3289 } 3290 3291 void MacroAssembler::shrptr(Register dst, int imm8) { 3292 LP64_ONLY(shrq(dst, imm8)) NOT_LP64(shrl(dst, imm8)); 3293 } 3294 3295 void MacroAssembler::sign_extend_byte(Register reg) { 3296 if (LP64_ONLY(true ||) (VM_Version::is_P6() && reg->has_byte_register())) { 3297 movsbl(reg, reg); // movsxb 3298 } else { 3299 shll(reg, 24); 3300 sarl(reg, 24); 3301 } 3302 } 3303 3304 void MacroAssembler::sign_extend_short(Register reg) { 3305 if (LP64_ONLY(true ||) VM_Version::is_P6()) { 3306 movswl(reg, reg); // movsxw 3307 } else { 3308 shll(reg, 16); 3309 sarl(reg, 16); 3310 } 3311 } 3312 3313 void MacroAssembler::testl(Address dst, int32_t imm32) { 3314 if (imm32 >= 0 && is8bit(imm32)) { 3315 testb(dst, imm32); 3316 } else { 3317 Assembler::testl(dst, imm32); 3318 } 3319 } 3320 3321 void MacroAssembler::testl(Register dst, int32_t imm32) { 3322 if (imm32 >= 0 && is8bit(imm32) && dst->has_byte_register()) { 3323 testb(dst, imm32); 3324 } else { 3325 Assembler::testl(dst, imm32); 3326 } 3327 } 3328 3329 void MacroAssembler::testl(Register dst, AddressLiteral src) { 3330 assert(always_reachable(src), "Address should be reachable"); 3331 testl(dst, as_Address(src)); 3332 } 3333 3334 #ifdef _LP64 3335 3336 void MacroAssembler::testq(Address dst, int32_t imm32) { 3337 if (imm32 >= 0) { 3338 testl(dst, imm32); 3339 } else { 3340 Assembler::testq(dst, imm32); 3341 } 3342 } 3343 3344 void MacroAssembler::testq(Register dst, int32_t imm32) { 3345 if (imm32 >= 0) { 3346 testl(dst, imm32); 3347 } else { 3348 Assembler::testq(dst, imm32); 3349 } 3350 } 3351 3352 #endif 3353 3354 void MacroAssembler::pcmpeqb(XMMRegister dst, XMMRegister src) { 3355 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3356 Assembler::pcmpeqb(dst, src); 3357 } 3358 3359 void MacroAssembler::pcmpeqw(XMMRegister dst, XMMRegister src) { 3360 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3361 Assembler::pcmpeqw(dst, src); 3362 } 3363 3364 void MacroAssembler::pcmpestri(XMMRegister dst, Address src, int imm8) { 3365 assert((dst->encoding() < 16),"XMM register should be 0-15"); 3366 Assembler::pcmpestri(dst, src, imm8); 3367 } 3368 3369 void MacroAssembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) { 3370 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15"); 3371 Assembler::pcmpestri(dst, src, imm8); 3372 } 3373 3374 void MacroAssembler::pmovzxbw(XMMRegister dst, XMMRegister src) { 3375 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3376 Assembler::pmovzxbw(dst, src); 3377 } 3378 3379 void MacroAssembler::pmovzxbw(XMMRegister dst, Address src) { 3380 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3381 Assembler::pmovzxbw(dst, src); 3382 } 3383 3384 void MacroAssembler::pmovmskb(Register dst, XMMRegister src) { 3385 assert((src->encoding() < 16),"XMM register should be 0-15"); 3386 Assembler::pmovmskb(dst, src); 3387 } 3388 3389 void MacroAssembler::ptest(XMMRegister dst, XMMRegister src) { 3390 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15"); 3391 Assembler::ptest(dst, src); 3392 } 3393 3394 void MacroAssembler::sqrtss(XMMRegister dst, AddressLiteral src, Register rscratch) { 3395 assert(rscratch != noreg || always_reachable(src), "missing"); 3396 3397 if (reachable(src)) { 3398 Assembler::sqrtss(dst, as_Address(src)); 3399 } else { 3400 lea(rscratch, src); 3401 Assembler::sqrtss(dst, Address(rscratch, 0)); 3402 } 3403 } 3404 3405 void MacroAssembler::subsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 3406 assert(rscratch != noreg || always_reachable(src), "missing"); 3407 3408 if (reachable(src)) { 3409 Assembler::subsd(dst, as_Address(src)); 3410 } else { 3411 lea(rscratch, src); 3412 Assembler::subsd(dst, Address(rscratch, 0)); 3413 } 3414 } 3415 3416 void MacroAssembler::roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register rscratch) { 3417 assert(rscratch != noreg || always_reachable(src), "missing"); 3418 3419 if (reachable(src)) { 3420 Assembler::roundsd(dst, as_Address(src), rmode); 3421 } else { 3422 lea(rscratch, src); 3423 Assembler::roundsd(dst, Address(rscratch, 0), rmode); 3424 } 3425 } 3426 3427 void MacroAssembler::subss(XMMRegister dst, AddressLiteral src, Register rscratch) { 3428 assert(rscratch != noreg || always_reachable(src), "missing"); 3429 3430 if (reachable(src)) { 3431 Assembler::subss(dst, as_Address(src)); 3432 } else { 3433 lea(rscratch, src); 3434 Assembler::subss(dst, Address(rscratch, 0)); 3435 } 3436 } 3437 3438 void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch) { 3439 assert(rscratch != noreg || always_reachable(src), "missing"); 3440 3441 if (reachable(src)) { 3442 Assembler::ucomisd(dst, as_Address(src)); 3443 } else { 3444 lea(rscratch, src); 3445 Assembler::ucomisd(dst, Address(rscratch, 0)); 3446 } 3447 } 3448 3449 void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch) { 3450 assert(rscratch != noreg || always_reachable(src), "missing"); 3451 3452 if (reachable(src)) { 3453 Assembler::ucomiss(dst, as_Address(src)); 3454 } else { 3455 lea(rscratch, src); 3456 Assembler::ucomiss(dst, Address(rscratch, 0)); 3457 } 3458 } 3459 3460 void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 3461 assert(rscratch != noreg || always_reachable(src), "missing"); 3462 3463 // Used in sign-bit flipping with aligned address. 3464 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 3465 if (reachable(src)) { 3466 Assembler::xorpd(dst, as_Address(src)); 3467 } else { 3468 lea(rscratch, src); 3469 Assembler::xorpd(dst, Address(rscratch, 0)); 3470 } 3471 } 3472 3473 void MacroAssembler::xorpd(XMMRegister dst, XMMRegister src) { 3474 if (UseAVX > 2 && !VM_Version::supports_avx512dq() && (dst->encoding() == src->encoding())) { 3475 Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit); 3476 } 3477 else { 3478 Assembler::xorpd(dst, src); 3479 } 3480 } 3481 3482 void MacroAssembler::xorps(XMMRegister dst, XMMRegister src) { 3483 if (UseAVX > 2 && !VM_Version::supports_avx512dq() && (dst->encoding() == src->encoding())) { 3484 Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit); 3485 } else { 3486 Assembler::xorps(dst, src); 3487 } 3488 } 3489 3490 void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src, Register rscratch) { 3491 assert(rscratch != noreg || always_reachable(src), "missing"); 3492 3493 // Used in sign-bit flipping with aligned address. 3494 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 3495 if (reachable(src)) { 3496 Assembler::xorps(dst, as_Address(src)); 3497 } else { 3498 lea(rscratch, src); 3499 Assembler::xorps(dst, Address(rscratch, 0)); 3500 } 3501 } 3502 3503 void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src, Register rscratch) { 3504 assert(rscratch != noreg || always_reachable(src), "missing"); 3505 3506 // Used in sign-bit flipping with aligned address. 3507 bool aligned_adr = (((intptr_t)src.target() & 15) == 0); 3508 assert((UseAVX > 0) || aligned_adr, "SSE mode requires address alignment 16 bytes"); 3509 if (reachable(src)) { 3510 Assembler::pshufb(dst, as_Address(src)); 3511 } else { 3512 lea(rscratch, src); 3513 Assembler::pshufb(dst, Address(rscratch, 0)); 3514 } 3515 } 3516 3517 // AVX 3-operands instructions 3518 3519 void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3520 assert(rscratch != noreg || always_reachable(src), "missing"); 3521 3522 if (reachable(src)) { 3523 vaddsd(dst, nds, as_Address(src)); 3524 } else { 3525 lea(rscratch, src); 3526 vaddsd(dst, nds, Address(rscratch, 0)); 3527 } 3528 } 3529 3530 void MacroAssembler::vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3531 assert(rscratch != noreg || always_reachable(src), "missing"); 3532 3533 if (reachable(src)) { 3534 vaddss(dst, nds, as_Address(src)); 3535 } else { 3536 lea(rscratch, src); 3537 vaddss(dst, nds, Address(rscratch, 0)); 3538 } 3539 } 3540 3541 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3542 assert(UseAVX > 0, "requires some form of AVX"); 3543 assert(rscratch != noreg || always_reachable(src), "missing"); 3544 3545 if (reachable(src)) { 3546 Assembler::vpaddb(dst, nds, as_Address(src), vector_len); 3547 } else { 3548 lea(rscratch, src); 3549 Assembler::vpaddb(dst, nds, Address(rscratch, 0), vector_len); 3550 } 3551 } 3552 3553 void MacroAssembler::vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3554 assert(UseAVX > 0, "requires some form of AVX"); 3555 assert(rscratch != noreg || always_reachable(src), "missing"); 3556 3557 if (reachable(src)) { 3558 Assembler::vpaddd(dst, nds, as_Address(src), vector_len); 3559 } else { 3560 lea(rscratch, src); 3561 Assembler::vpaddd(dst, nds, Address(rscratch, 0), vector_len); 3562 } 3563 } 3564 3565 void MacroAssembler::vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch) { 3566 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 3567 assert(rscratch != noreg || always_reachable(negate_field), "missing"); 3568 3569 vandps(dst, nds, negate_field, vector_len, rscratch); 3570 } 3571 3572 void MacroAssembler::vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch) { 3573 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 3574 assert(rscratch != noreg || always_reachable(negate_field), "missing"); 3575 3576 vandpd(dst, nds, negate_field, vector_len, rscratch); 3577 } 3578 3579 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3580 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3581 Assembler::vpaddb(dst, nds, src, vector_len); 3582 } 3583 3584 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3585 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3586 Assembler::vpaddb(dst, nds, src, vector_len); 3587 } 3588 3589 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3590 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3591 Assembler::vpaddw(dst, nds, src, vector_len); 3592 } 3593 3594 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3595 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3596 Assembler::vpaddw(dst, nds, src, vector_len); 3597 } 3598 3599 void MacroAssembler::vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3600 assert(rscratch != noreg || always_reachable(src), "missing"); 3601 3602 if (reachable(src)) { 3603 Assembler::vpand(dst, nds, as_Address(src), vector_len); 3604 } else { 3605 lea(rscratch, src); 3606 Assembler::vpand(dst, nds, Address(rscratch, 0), vector_len); 3607 } 3608 } 3609 3610 void MacroAssembler::vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 3611 assert(rscratch != noreg || always_reachable(src), "missing"); 3612 3613 if (reachable(src)) { 3614 Assembler::vpbroadcastd(dst, as_Address(src), vector_len); 3615 } else { 3616 lea(rscratch, src); 3617 Assembler::vpbroadcastd(dst, Address(rscratch, 0), vector_len); 3618 } 3619 } 3620 3621 void MacroAssembler::vbroadcasti128(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 3622 assert(rscratch != noreg || always_reachable(src), "missing"); 3623 3624 if (reachable(src)) { 3625 Assembler::vbroadcasti128(dst, as_Address(src), vector_len); 3626 } else { 3627 lea(rscratch, src); 3628 Assembler::vbroadcasti128(dst, Address(rscratch, 0), vector_len); 3629 } 3630 } 3631 3632 void MacroAssembler::vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 3633 assert(rscratch != noreg || always_reachable(src), "missing"); 3634 3635 if (reachable(src)) { 3636 Assembler::vpbroadcastq(dst, as_Address(src), vector_len); 3637 } else { 3638 lea(rscratch, src); 3639 Assembler::vpbroadcastq(dst, Address(rscratch, 0), vector_len); 3640 } 3641 } 3642 3643 void MacroAssembler::vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 3644 assert(rscratch != noreg || always_reachable(src), "missing"); 3645 3646 if (reachable(src)) { 3647 Assembler::vbroadcastsd(dst, as_Address(src), vector_len); 3648 } else { 3649 lea(rscratch, src); 3650 Assembler::vbroadcastsd(dst, Address(rscratch, 0), vector_len); 3651 } 3652 } 3653 3654 void MacroAssembler::vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 3655 assert(rscratch != noreg || always_reachable(src), "missing"); 3656 3657 if (reachable(src)) { 3658 Assembler::vbroadcastss(dst, as_Address(src), vector_len); 3659 } else { 3660 lea(rscratch, src); 3661 Assembler::vbroadcastss(dst, Address(rscratch, 0), vector_len); 3662 } 3663 } 3664 3665 // Vector float blend 3666 // vblendvps(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg) 3667 void MacroAssembler::vblendvps(XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister mask, int vector_len, bool compute_mask, XMMRegister scratch) { 3668 // WARN: Allow dst == (src1|src2), mask == scratch 3669 bool blend_emulation = EnableX86ECoreOpts && UseAVX > 1; 3670 bool scratch_available = scratch != xnoreg && scratch != src1 && scratch != src2 && scratch != dst; 3671 bool dst_available = dst != mask && (dst != src1 || dst != src2); 3672 if (blend_emulation && scratch_available && dst_available) { 3673 if (compute_mask) { 3674 vpsrad(scratch, mask, 32, vector_len); 3675 mask = scratch; 3676 } 3677 if (dst == src1) { 3678 vpandn(dst, mask, src1, vector_len); // if mask == 0, src1 3679 vpand (scratch, mask, src2, vector_len); // if mask == 1, src2 3680 } else { 3681 vpand (dst, mask, src2, vector_len); // if mask == 1, src2 3682 vpandn(scratch, mask, src1, vector_len); // if mask == 0, src1 3683 } 3684 vpor(dst, dst, scratch, vector_len); 3685 } else { 3686 Assembler::vblendvps(dst, src1, src2, mask, vector_len); 3687 } 3688 } 3689 3690 // vblendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg) 3691 void MacroAssembler::vblendvpd(XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister mask, int vector_len, bool compute_mask, XMMRegister scratch) { 3692 // WARN: Allow dst == (src1|src2), mask == scratch 3693 bool blend_emulation = EnableX86ECoreOpts && UseAVX > 1; 3694 bool scratch_available = scratch != xnoreg && scratch != src1 && scratch != src2 && scratch != dst && (!compute_mask || scratch != mask); 3695 bool dst_available = dst != mask && (dst != src1 || dst != src2); 3696 if (blend_emulation && scratch_available && dst_available) { 3697 if (compute_mask) { 3698 vpxor(scratch, scratch, scratch, vector_len); 3699 vpcmpgtq(scratch, scratch, mask, vector_len); 3700 mask = scratch; 3701 } 3702 if (dst == src1) { 3703 vpandn(dst, mask, src1, vector_len); // if mask == 0, src 3704 vpand (scratch, mask, src2, vector_len); // if mask == 1, src2 3705 } else { 3706 vpand (dst, mask, src2, vector_len); // if mask == 1, src2 3707 vpandn(scratch, mask, src1, vector_len); // if mask == 0, src 3708 } 3709 vpor(dst, dst, scratch, vector_len); 3710 } else { 3711 Assembler::vblendvpd(dst, src1, src2, mask, vector_len); 3712 } 3713 } 3714 3715 void MacroAssembler::vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3716 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3717 Assembler::vpcmpeqb(dst, nds, src, vector_len); 3718 } 3719 3720 void MacroAssembler::vpcmpeqb(XMMRegister dst, XMMRegister src1, Address src2, int vector_len) { 3721 assert(((dst->encoding() < 16 && src1->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3722 Assembler::vpcmpeqb(dst, src1, src2, vector_len); 3723 } 3724 3725 void MacroAssembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3726 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3727 Assembler::vpcmpeqw(dst, nds, src, vector_len); 3728 } 3729 3730 void MacroAssembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3731 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3732 Assembler::vpcmpeqw(dst, nds, src, vector_len); 3733 } 3734 3735 void MacroAssembler::evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3736 assert(rscratch != noreg || always_reachable(src), "missing"); 3737 3738 if (reachable(src)) { 3739 Assembler::evpcmpeqd(kdst, mask, nds, as_Address(src), vector_len); 3740 } else { 3741 lea(rscratch, src); 3742 Assembler::evpcmpeqd(kdst, mask, nds, Address(rscratch, 0), vector_len); 3743 } 3744 } 3745 3746 void MacroAssembler::evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 3747 int comparison, bool is_signed, int vector_len, Register rscratch) { 3748 assert(rscratch != noreg || always_reachable(src), "missing"); 3749 3750 if (reachable(src)) { 3751 Assembler::evpcmpd(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 3752 } else { 3753 lea(rscratch, src); 3754 Assembler::evpcmpd(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 3755 } 3756 } 3757 3758 void MacroAssembler::evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 3759 int comparison, bool is_signed, int vector_len, Register rscratch) { 3760 assert(rscratch != noreg || always_reachable(src), "missing"); 3761 3762 if (reachable(src)) { 3763 Assembler::evpcmpq(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 3764 } else { 3765 lea(rscratch, src); 3766 Assembler::evpcmpq(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 3767 } 3768 } 3769 3770 void MacroAssembler::evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 3771 int comparison, bool is_signed, int vector_len, Register rscratch) { 3772 assert(rscratch != noreg || always_reachable(src), "missing"); 3773 3774 if (reachable(src)) { 3775 Assembler::evpcmpb(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 3776 } else { 3777 lea(rscratch, src); 3778 Assembler::evpcmpb(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 3779 } 3780 } 3781 3782 void MacroAssembler::evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 3783 int comparison, bool is_signed, int vector_len, Register rscratch) { 3784 assert(rscratch != noreg || always_reachable(src), "missing"); 3785 3786 if (reachable(src)) { 3787 Assembler::evpcmpw(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 3788 } else { 3789 lea(rscratch, src); 3790 Assembler::evpcmpw(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 3791 } 3792 } 3793 3794 void MacroAssembler::vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len) { 3795 if (width == Assembler::Q) { 3796 Assembler::vpcmpCCq(dst, nds, src, cond_encoding, vector_len); 3797 } else { 3798 Assembler::vpcmpCCbwd(dst, nds, src, cond_encoding, vector_len); 3799 } 3800 } 3801 3802 void MacroAssembler::vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister xtmp, ComparisonPredicate cond, Width width, int vector_len) { 3803 int eq_cond_enc = 0x29; 3804 int gt_cond_enc = 0x37; 3805 if (width != Assembler::Q) { 3806 eq_cond_enc = 0x74 + width; 3807 gt_cond_enc = 0x64 + width; 3808 } 3809 switch (cond) { 3810 case eq: 3811 vpcmpCC(dst, nds, src, eq_cond_enc, width, vector_len); 3812 break; 3813 case neq: 3814 vpcmpCC(dst, nds, src, eq_cond_enc, width, vector_len); 3815 vallones(xtmp, vector_len); 3816 vpxor(dst, xtmp, dst, vector_len); 3817 break; 3818 case le: 3819 vpcmpCC(dst, nds, src, gt_cond_enc, width, vector_len); 3820 vallones(xtmp, vector_len); 3821 vpxor(dst, xtmp, dst, vector_len); 3822 break; 3823 case nlt: 3824 vpcmpCC(dst, src, nds, gt_cond_enc, width, vector_len); 3825 vallones(xtmp, vector_len); 3826 vpxor(dst, xtmp, dst, vector_len); 3827 break; 3828 case lt: 3829 vpcmpCC(dst, src, nds, gt_cond_enc, width, vector_len); 3830 break; 3831 case nle: 3832 vpcmpCC(dst, nds, src, gt_cond_enc, width, vector_len); 3833 break; 3834 default: 3835 assert(false, "Should not reach here"); 3836 } 3837 } 3838 3839 void MacroAssembler::vpmovzxbw(XMMRegister dst, Address src, int vector_len) { 3840 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3841 Assembler::vpmovzxbw(dst, src, vector_len); 3842 } 3843 3844 void MacroAssembler::vpmovmskb(Register dst, XMMRegister src, int vector_len) { 3845 assert((src->encoding() < 16),"XMM register should be 0-15"); 3846 Assembler::vpmovmskb(dst, src, vector_len); 3847 } 3848 3849 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3850 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3851 Assembler::vpmullw(dst, nds, src, vector_len); 3852 } 3853 3854 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3855 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3856 Assembler::vpmullw(dst, nds, src, vector_len); 3857 } 3858 3859 void MacroAssembler::vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3860 assert((UseAVX > 0), "AVX support is needed"); 3861 assert(rscratch != noreg || always_reachable(src), "missing"); 3862 3863 if (reachable(src)) { 3864 Assembler::vpmulld(dst, nds, as_Address(src), vector_len); 3865 } else { 3866 lea(rscratch, src); 3867 Assembler::vpmulld(dst, nds, Address(rscratch, 0), vector_len); 3868 } 3869 } 3870 3871 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3872 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3873 Assembler::vpsubb(dst, nds, src, vector_len); 3874 } 3875 3876 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3877 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3878 Assembler::vpsubb(dst, nds, src, vector_len); 3879 } 3880 3881 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3882 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3883 Assembler::vpsubw(dst, nds, src, vector_len); 3884 } 3885 3886 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3887 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3888 Assembler::vpsubw(dst, nds, src, vector_len); 3889 } 3890 3891 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3892 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3893 Assembler::vpsraw(dst, nds, shift, vector_len); 3894 } 3895 3896 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3897 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3898 Assembler::vpsraw(dst, nds, shift, vector_len); 3899 } 3900 3901 void MacroAssembler::evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3902 assert(UseAVX > 2,""); 3903 if (!VM_Version::supports_avx512vl() && vector_len < 2) { 3904 vector_len = 2; 3905 } 3906 Assembler::evpsraq(dst, nds, shift, vector_len); 3907 } 3908 3909 void MacroAssembler::evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3910 assert(UseAVX > 2,""); 3911 if (!VM_Version::supports_avx512vl() && vector_len < 2) { 3912 vector_len = 2; 3913 } 3914 Assembler::evpsraq(dst, nds, shift, vector_len); 3915 } 3916 3917 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3918 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3919 Assembler::vpsrlw(dst, nds, shift, vector_len); 3920 } 3921 3922 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3923 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3924 Assembler::vpsrlw(dst, nds, shift, vector_len); 3925 } 3926 3927 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3928 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3929 Assembler::vpsllw(dst, nds, shift, vector_len); 3930 } 3931 3932 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3933 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3934 Assembler::vpsllw(dst, nds, shift, vector_len); 3935 } 3936 3937 void MacroAssembler::vptest(XMMRegister dst, XMMRegister src) { 3938 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15"); 3939 Assembler::vptest(dst, src); 3940 } 3941 3942 void MacroAssembler::punpcklbw(XMMRegister dst, XMMRegister src) { 3943 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3944 Assembler::punpcklbw(dst, src); 3945 } 3946 3947 void MacroAssembler::pshufd(XMMRegister dst, Address src, int mode) { 3948 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 3949 Assembler::pshufd(dst, src, mode); 3950 } 3951 3952 void MacroAssembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) { 3953 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3954 Assembler::pshuflw(dst, src, mode); 3955 } 3956 3957 void MacroAssembler::vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3958 assert(rscratch != noreg || always_reachable(src), "missing"); 3959 3960 if (reachable(src)) { 3961 vandpd(dst, nds, as_Address(src), vector_len); 3962 } else { 3963 lea(rscratch, src); 3964 vandpd(dst, nds, Address(rscratch, 0), vector_len); 3965 } 3966 } 3967 3968 void MacroAssembler::vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3969 assert(rscratch != noreg || always_reachable(src), "missing"); 3970 3971 if (reachable(src)) { 3972 vandps(dst, nds, as_Address(src), vector_len); 3973 } else { 3974 lea(rscratch, src); 3975 vandps(dst, nds, Address(rscratch, 0), vector_len); 3976 } 3977 } 3978 3979 void MacroAssembler::evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, 3980 bool merge, int vector_len, Register rscratch) { 3981 assert(rscratch != noreg || always_reachable(src), "missing"); 3982 3983 if (reachable(src)) { 3984 Assembler::evpord(dst, mask, nds, as_Address(src), merge, vector_len); 3985 } else { 3986 lea(rscratch, src); 3987 Assembler::evpord(dst, mask, nds, Address(rscratch, 0), merge, vector_len); 3988 } 3989 } 3990 3991 void MacroAssembler::vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3992 assert(rscratch != noreg || always_reachable(src), "missing"); 3993 3994 if (reachable(src)) { 3995 vdivsd(dst, nds, as_Address(src)); 3996 } else { 3997 lea(rscratch, src); 3998 vdivsd(dst, nds, Address(rscratch, 0)); 3999 } 4000 } 4001 4002 void MacroAssembler::vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 4003 assert(rscratch != noreg || always_reachable(src), "missing"); 4004 4005 if (reachable(src)) { 4006 vdivss(dst, nds, as_Address(src)); 4007 } else { 4008 lea(rscratch, src); 4009 vdivss(dst, nds, Address(rscratch, 0)); 4010 } 4011 } 4012 4013 void MacroAssembler::vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 4014 assert(rscratch != noreg || always_reachable(src), "missing"); 4015 4016 if (reachable(src)) { 4017 vmulsd(dst, nds, as_Address(src)); 4018 } else { 4019 lea(rscratch, src); 4020 vmulsd(dst, nds, Address(rscratch, 0)); 4021 } 4022 } 4023 4024 void MacroAssembler::vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 4025 assert(rscratch != noreg || always_reachable(src), "missing"); 4026 4027 if (reachable(src)) { 4028 vmulss(dst, nds, as_Address(src)); 4029 } else { 4030 lea(rscratch, src); 4031 vmulss(dst, nds, Address(rscratch, 0)); 4032 } 4033 } 4034 4035 void MacroAssembler::vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 4036 assert(rscratch != noreg || always_reachable(src), "missing"); 4037 4038 if (reachable(src)) { 4039 vsubsd(dst, nds, as_Address(src)); 4040 } else { 4041 lea(rscratch, src); 4042 vsubsd(dst, nds, Address(rscratch, 0)); 4043 } 4044 } 4045 4046 void MacroAssembler::vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 4047 assert(rscratch != noreg || always_reachable(src), "missing"); 4048 4049 if (reachable(src)) { 4050 vsubss(dst, nds, as_Address(src)); 4051 } else { 4052 lea(rscratch, src); 4053 vsubss(dst, nds, Address(rscratch, 0)); 4054 } 4055 } 4056 4057 void MacroAssembler::vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 4058 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 4059 assert(rscratch != noreg || always_reachable(src), "missing"); 4060 4061 vxorps(dst, nds, src, Assembler::AVX_128bit, rscratch); 4062 } 4063 4064 void MacroAssembler::vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 4065 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 4066 assert(rscratch != noreg || always_reachable(src), "missing"); 4067 4068 vxorpd(dst, nds, src, Assembler::AVX_128bit, rscratch); 4069 } 4070 4071 void MacroAssembler::vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 4072 assert(rscratch != noreg || always_reachable(src), "missing"); 4073 4074 if (reachable(src)) { 4075 vxorpd(dst, nds, as_Address(src), vector_len); 4076 } else { 4077 lea(rscratch, src); 4078 vxorpd(dst, nds, Address(rscratch, 0), vector_len); 4079 } 4080 } 4081 4082 void MacroAssembler::vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 4083 assert(rscratch != noreg || always_reachable(src), "missing"); 4084 4085 if (reachable(src)) { 4086 vxorps(dst, nds, as_Address(src), vector_len); 4087 } else { 4088 lea(rscratch, src); 4089 vxorps(dst, nds, Address(rscratch, 0), vector_len); 4090 } 4091 } 4092 4093 void MacroAssembler::vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 4094 assert(rscratch != noreg || always_reachable(src), "missing"); 4095 4096 if (UseAVX > 1 || (vector_len < 1)) { 4097 if (reachable(src)) { 4098 Assembler::vpxor(dst, nds, as_Address(src), vector_len); 4099 } else { 4100 lea(rscratch, src); 4101 Assembler::vpxor(dst, nds, Address(rscratch, 0), vector_len); 4102 } 4103 } else { 4104 MacroAssembler::vxorpd(dst, nds, src, vector_len, rscratch); 4105 } 4106 } 4107 4108 void MacroAssembler::vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 4109 assert(rscratch != noreg || always_reachable(src), "missing"); 4110 4111 if (reachable(src)) { 4112 Assembler::vpermd(dst, nds, as_Address(src), vector_len); 4113 } else { 4114 lea(rscratch, src); 4115 Assembler::vpermd(dst, nds, Address(rscratch, 0), vector_len); 4116 } 4117 } 4118 4119 void MacroAssembler::clear_jobject_tag(Register possibly_non_local) { 4120 const int32_t inverted_mask = ~static_cast<int32_t>(JNIHandles::tag_mask); 4121 STATIC_ASSERT(inverted_mask == -4); // otherwise check this code 4122 // The inverted mask is sign-extended 4123 andptr(possibly_non_local, inverted_mask); 4124 } 4125 4126 void MacroAssembler::resolve_jobject(Register value, 4127 Register thread, 4128 Register tmp) { 4129 assert_different_registers(value, thread, tmp); 4130 Label done, tagged, weak_tagged; 4131 testptr(value, value); 4132 jcc(Assembler::zero, done); // Use null as-is. 4133 testptr(value, JNIHandles::tag_mask); // Test for tag. 4134 jcc(Assembler::notZero, tagged); 4135 4136 // Resolve local handle 4137 access_load_at(T_OBJECT, IN_NATIVE | AS_RAW, value, Address(value, 0), tmp, thread); 4138 verify_oop(value); 4139 jmp(done); 4140 4141 bind(tagged); 4142 testptr(value, JNIHandles::TypeTag::weak_global); // Test for weak tag. 4143 jcc(Assembler::notZero, weak_tagged); 4144 4145 // Resolve global handle 4146 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp, thread); 4147 verify_oop(value); 4148 jmp(done); 4149 4150 bind(weak_tagged); 4151 // Resolve jweak. 4152 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 4153 value, Address(value, -JNIHandles::TypeTag::weak_global), tmp, thread); 4154 verify_oop(value); 4155 4156 bind(done); 4157 } 4158 4159 void MacroAssembler::resolve_global_jobject(Register value, 4160 Register thread, 4161 Register tmp) { 4162 assert_different_registers(value, thread, tmp); 4163 Label done; 4164 4165 testptr(value, value); 4166 jcc(Assembler::zero, done); // Use null as-is. 4167 4168 #ifdef ASSERT 4169 { 4170 Label valid_global_tag; 4171 testptr(value, JNIHandles::TypeTag::global); // Test for global tag. 4172 jcc(Assembler::notZero, valid_global_tag); 4173 stop("non global jobject using resolve_global_jobject"); 4174 bind(valid_global_tag); 4175 } 4176 #endif 4177 4178 // Resolve global handle 4179 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp, thread); 4180 verify_oop(value); 4181 4182 bind(done); 4183 } 4184 4185 void MacroAssembler::subptr(Register dst, int32_t imm32) { 4186 LP64_ONLY(subq(dst, imm32)) NOT_LP64(subl(dst, imm32)); 4187 } 4188 4189 // Force generation of a 4 byte immediate value even if it fits into 8bit 4190 void MacroAssembler::subptr_imm32(Register dst, int32_t imm32) { 4191 LP64_ONLY(subq_imm32(dst, imm32)) NOT_LP64(subl_imm32(dst, imm32)); 4192 } 4193 4194 void MacroAssembler::subptr(Register dst, Register src) { 4195 LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); 4196 } 4197 4198 // C++ bool manipulation 4199 void MacroAssembler::testbool(Register dst) { 4200 if(sizeof(bool) == 1) 4201 testb(dst, 0xff); 4202 else if(sizeof(bool) == 2) { 4203 // testw implementation needed for two byte bools 4204 ShouldNotReachHere(); 4205 } else if(sizeof(bool) == 4) 4206 testl(dst, dst); 4207 else 4208 // unsupported 4209 ShouldNotReachHere(); 4210 } 4211 4212 void MacroAssembler::testptr(Register dst, Register src) { 4213 LP64_ONLY(testq(dst, src)) NOT_LP64(testl(dst, src)); 4214 } 4215 4216 // Object / value buffer allocation... 4217 // 4218 // Kills klass and rsi on LP64 4219 void MacroAssembler::allocate_instance(Register klass, Register new_obj, 4220 Register t1, Register t2, 4221 bool clear_fields, Label& alloc_failed) 4222 { 4223 Label done, initialize_header, initialize_object, slow_case, slow_case_no_pop; 4224 Register layout_size = t1; 4225 assert(new_obj == rax, "needs to be rax"); 4226 assert_different_registers(klass, new_obj, t1, t2); 4227 4228 // get instance_size in InstanceKlass (scaled to a count of bytes) 4229 movl(layout_size, Address(klass, Klass::layout_helper_offset())); 4230 // test to see if it is malformed in some way 4231 testl(layout_size, Klass::_lh_instance_slow_path_bit); 4232 jcc(Assembler::notZero, slow_case_no_pop); 4233 4234 // Allocate the instance: 4235 // If TLAB is enabled: 4236 // Try to allocate in the TLAB. 4237 // If fails, go to the slow path. 4238 // Else If inline contiguous allocations are enabled: 4239 // Try to allocate in eden. 4240 // If fails due to heap end, go to slow path. 4241 // 4242 // If TLAB is enabled OR inline contiguous is enabled: 4243 // Initialize the allocation. 4244 // Exit. 4245 // 4246 // Go to slow path. 4247 4248 push(klass); 4249 const Register thread = LP64_ONLY(r15_thread) NOT_LP64(klass); 4250 #ifndef _LP64 4251 if (UseTLAB) { 4252 get_thread(thread); 4253 } 4254 #endif // _LP64 4255 4256 if (UseTLAB) { 4257 tlab_allocate(thread, new_obj, layout_size, 0, klass, t2, slow_case); 4258 if (ZeroTLAB || (!clear_fields)) { 4259 // the fields have been already cleared 4260 jmp(initialize_header); 4261 } else { 4262 // initialize both the header and fields 4263 jmp(initialize_object); 4264 } 4265 } else { 4266 jmp(slow_case); 4267 } 4268 4269 // If UseTLAB is true, the object is created above and there is an initialize need. 4270 // Otherwise, skip and go to the slow path. 4271 if (UseTLAB) { 4272 if (clear_fields) { 4273 // The object is initialized before the header. If the object size is 4274 // zero, go directly to the header initialization. 4275 bind(initialize_object); 4276 decrement(layout_size, sizeof(oopDesc)); 4277 jcc(Assembler::zero, initialize_header); 4278 4279 // Initialize topmost object field, divide size by 8, check if odd and 4280 // test if zero. 4281 Register zero = klass; 4282 xorl(zero, zero); // use zero reg to clear memory (shorter code) 4283 shrl(layout_size, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd 4284 4285 #ifdef ASSERT 4286 // make sure instance_size was multiple of 8 4287 Label L; 4288 // Ignore partial flag stall after shrl() since it is debug VM 4289 jcc(Assembler::carryClear, L); 4290 stop("object size is not multiple of 2 - adjust this code"); 4291 bind(L); 4292 // must be > 0, no extra check needed here 4293 #endif 4294 4295 // initialize remaining object fields: instance_size was a multiple of 8 4296 { 4297 Label loop; 4298 bind(loop); 4299 movptr(Address(new_obj, layout_size, Address::times_8, sizeof(oopDesc) - 1*oopSize), zero); 4300 NOT_LP64(movptr(Address(new_obj, layout_size, Address::times_8, sizeof(oopDesc) - 2*oopSize), zero)); 4301 decrement(layout_size); 4302 jcc(Assembler::notZero, loop); 4303 } 4304 } // clear_fields 4305 4306 // initialize object header only. 4307 bind(initialize_header); 4308 pop(klass); 4309 Register mark_word = t2; 4310 movptr(mark_word, Address(klass, Klass::prototype_header_offset())); 4311 movptr(Address(new_obj, oopDesc::mark_offset_in_bytes ()), mark_word); 4312 #ifdef _LP64 4313 xorl(rsi, rsi); // use zero reg to clear memory (shorter code) 4314 store_klass_gap(new_obj, rsi); // zero klass gap for compressed oops 4315 #endif 4316 movptr(t2, klass); // preserve klass 4317 store_klass(new_obj, t2, rscratch1); // src klass reg is potentially compressed 4318 4319 jmp(done); 4320 } 4321 4322 bind(slow_case); 4323 pop(klass); 4324 bind(slow_case_no_pop); 4325 jmp(alloc_failed); 4326 4327 bind(done); 4328 } 4329 4330 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. 4331 void MacroAssembler::tlab_allocate(Register thread, Register obj, 4332 Register var_size_in_bytes, 4333 int con_size_in_bytes, 4334 Register t1, 4335 Register t2, 4336 Label& slow_case) { 4337 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 4338 bs->tlab_allocate(this, thread, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case); 4339 } 4340 4341 RegSet MacroAssembler::call_clobbered_gp_registers() { 4342 RegSet regs; 4343 #ifdef _LP64 4344 regs += RegSet::of(rax, rcx, rdx); 4345 #ifndef WINDOWS 4346 regs += RegSet::of(rsi, rdi); 4347 #endif 4348 regs += RegSet::range(r8, r11); 4349 #else 4350 regs += RegSet::of(rax, rcx, rdx); 4351 #endif 4352 #ifdef _LP64 4353 if (UseAPX) { 4354 regs += RegSet::range(r16, as_Register(Register::number_of_registers - 1)); 4355 } 4356 #endif 4357 return regs; 4358 } 4359 4360 XMMRegSet MacroAssembler::call_clobbered_xmm_registers() { 4361 int num_xmm_registers = XMMRegister::available_xmm_registers(); 4362 #if defined(WINDOWS) && defined(_LP64) 4363 XMMRegSet result = XMMRegSet::range(xmm0, xmm5); 4364 if (num_xmm_registers > 16) { 4365 result += XMMRegSet::range(xmm16, as_XMMRegister(num_xmm_registers - 1)); 4366 } 4367 return result; 4368 #else 4369 return XMMRegSet::range(xmm0, as_XMMRegister(num_xmm_registers - 1)); 4370 #endif 4371 } 4372 4373 static int FPUSaveAreaSize = align_up(108, StackAlignmentInBytes); // 108 bytes needed for FPU state by fsave/frstor 4374 4375 #ifndef _LP64 4376 static bool use_x87_registers() { return UseSSE < 2; } 4377 #endif 4378 static bool use_xmm_registers() { return UseSSE >= 1; } 4379 4380 // C1 only ever uses the first double/float of the XMM register. 4381 static int xmm_save_size() { return UseSSE >= 2 ? sizeof(double) : sizeof(float); } 4382 4383 static void save_xmm_register(MacroAssembler* masm, int offset, XMMRegister reg) { 4384 if (UseSSE == 1) { 4385 masm->movflt(Address(rsp, offset), reg); 4386 } else { 4387 masm->movdbl(Address(rsp, offset), reg); 4388 } 4389 } 4390 4391 static void restore_xmm_register(MacroAssembler* masm, int offset, XMMRegister reg) { 4392 if (UseSSE == 1) { 4393 masm->movflt(reg, Address(rsp, offset)); 4394 } else { 4395 masm->movdbl(reg, Address(rsp, offset)); 4396 } 4397 } 4398 4399 static int register_section_sizes(RegSet gp_registers, XMMRegSet xmm_registers, 4400 bool save_fpu, int& gp_area_size, 4401 int& fp_area_size, int& xmm_area_size) { 4402 4403 gp_area_size = align_up(gp_registers.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size, 4404 StackAlignmentInBytes); 4405 #ifdef _LP64 4406 fp_area_size = 0; 4407 #else 4408 fp_area_size = (save_fpu && use_x87_registers()) ? FPUSaveAreaSize : 0; 4409 #endif 4410 xmm_area_size = (save_fpu && use_xmm_registers()) ? xmm_registers.size() * xmm_save_size() : 0; 4411 4412 return gp_area_size + fp_area_size + xmm_area_size; 4413 } 4414 4415 void MacroAssembler::push_call_clobbered_registers_except(RegSet exclude, bool save_fpu) { 4416 block_comment("push_call_clobbered_registers start"); 4417 // Regular registers 4418 RegSet gp_registers_to_push = call_clobbered_gp_registers() - exclude; 4419 4420 int gp_area_size; 4421 int fp_area_size; 4422 int xmm_area_size; 4423 int total_save_size = register_section_sizes(gp_registers_to_push, call_clobbered_xmm_registers(), save_fpu, 4424 gp_area_size, fp_area_size, xmm_area_size); 4425 subptr(rsp, total_save_size); 4426 4427 push_set(gp_registers_to_push, 0); 4428 4429 #ifndef _LP64 4430 if (save_fpu && use_x87_registers()) { 4431 fnsave(Address(rsp, gp_area_size)); 4432 fwait(); 4433 } 4434 #endif 4435 if (save_fpu && use_xmm_registers()) { 4436 push_set(call_clobbered_xmm_registers(), gp_area_size + fp_area_size); 4437 } 4438 4439 block_comment("push_call_clobbered_registers end"); 4440 } 4441 4442 void MacroAssembler::pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu) { 4443 block_comment("pop_call_clobbered_registers start"); 4444 4445 RegSet gp_registers_to_pop = call_clobbered_gp_registers() - exclude; 4446 4447 int gp_area_size; 4448 int fp_area_size; 4449 int xmm_area_size; 4450 int total_save_size = register_section_sizes(gp_registers_to_pop, call_clobbered_xmm_registers(), restore_fpu, 4451 gp_area_size, fp_area_size, xmm_area_size); 4452 4453 if (restore_fpu && use_xmm_registers()) { 4454 pop_set(call_clobbered_xmm_registers(), gp_area_size + fp_area_size); 4455 } 4456 #ifndef _LP64 4457 if (restore_fpu && use_x87_registers()) { 4458 frstor(Address(rsp, gp_area_size)); 4459 } 4460 #endif 4461 4462 pop_set(gp_registers_to_pop, 0); 4463 4464 addptr(rsp, total_save_size); 4465 4466 vzeroupper(); 4467 4468 block_comment("pop_call_clobbered_registers end"); 4469 } 4470 4471 void MacroAssembler::push_set(XMMRegSet set, int offset) { 4472 assert(is_aligned(set.size() * xmm_save_size(), StackAlignmentInBytes), "must be"); 4473 int spill_offset = offset; 4474 4475 for (RegSetIterator<XMMRegister> it = set.begin(); *it != xnoreg; ++it) { 4476 save_xmm_register(this, spill_offset, *it); 4477 spill_offset += xmm_save_size(); 4478 } 4479 } 4480 4481 void MacroAssembler::pop_set(XMMRegSet set, int offset) { 4482 int restore_size = set.size() * xmm_save_size(); 4483 assert(is_aligned(restore_size, StackAlignmentInBytes), "must be"); 4484 4485 int restore_offset = offset + restore_size - xmm_save_size(); 4486 4487 for (ReverseRegSetIterator<XMMRegister> it = set.rbegin(); *it != xnoreg; ++it) { 4488 restore_xmm_register(this, restore_offset, *it); 4489 restore_offset -= xmm_save_size(); 4490 } 4491 } 4492 4493 void MacroAssembler::push_set(RegSet set, int offset) { 4494 int spill_offset; 4495 if (offset == -1) { 4496 int register_push_size = set.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size; 4497 int aligned_size = align_up(register_push_size, StackAlignmentInBytes); 4498 subptr(rsp, aligned_size); 4499 spill_offset = 0; 4500 } else { 4501 spill_offset = offset; 4502 } 4503 4504 for (RegSetIterator<Register> it = set.begin(); *it != noreg; ++it) { 4505 movptr(Address(rsp, spill_offset), *it); 4506 spill_offset += Register::max_slots_per_register * VMRegImpl::stack_slot_size; 4507 } 4508 } 4509 4510 void MacroAssembler::pop_set(RegSet set, int offset) { 4511 4512 int gp_reg_size = Register::max_slots_per_register * VMRegImpl::stack_slot_size; 4513 int restore_size = set.size() * gp_reg_size; 4514 int aligned_size = align_up(restore_size, StackAlignmentInBytes); 4515 4516 int restore_offset; 4517 if (offset == -1) { 4518 restore_offset = restore_size - gp_reg_size; 4519 } else { 4520 restore_offset = offset + restore_size - gp_reg_size; 4521 } 4522 for (ReverseRegSetIterator<Register> it = set.rbegin(); *it != noreg; ++it) { 4523 movptr(*it, Address(rsp, restore_offset)); 4524 restore_offset -= gp_reg_size; 4525 } 4526 4527 if (offset == -1) { 4528 addptr(rsp, aligned_size); 4529 } 4530 } 4531 4532 // Preserves the contents of address, destroys the contents length_in_bytes and temp. 4533 void MacroAssembler::zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp) { 4534 assert(address != length_in_bytes && address != temp && temp != length_in_bytes, "registers must be different"); 4535 assert((offset_in_bytes & (BytesPerWord - 1)) == 0, "offset must be a multiple of BytesPerWord"); 4536 Label done; 4537 4538 testptr(length_in_bytes, length_in_bytes); 4539 jcc(Assembler::zero, done); 4540 4541 // initialize topmost word, divide index by 2, check if odd and test if zero 4542 // note: for the remaining code to work, index must be a multiple of BytesPerWord 4543 #ifdef ASSERT 4544 { 4545 Label L; 4546 testptr(length_in_bytes, BytesPerWord - 1); 4547 jcc(Assembler::zero, L); 4548 stop("length must be a multiple of BytesPerWord"); 4549 bind(L); 4550 } 4551 #endif 4552 Register index = length_in_bytes; 4553 xorptr(temp, temp); // use _zero reg to clear memory (shorter code) 4554 if (UseIncDec) { 4555 shrptr(index, 3); // divide by 8/16 and set carry flag if bit 2 was set 4556 } else { 4557 shrptr(index, 2); // use 2 instructions to avoid partial flag stall 4558 shrptr(index, 1); 4559 } 4560 #ifndef _LP64 4561 // index could have not been a multiple of 8 (i.e., bit 2 was set) 4562 { 4563 Label even; 4564 // note: if index was a multiple of 8, then it cannot 4565 // be 0 now otherwise it must have been 0 before 4566 // => if it is even, we don't need to check for 0 again 4567 jcc(Assembler::carryClear, even); 4568 // clear topmost word (no jump would be needed if conditional assignment worked here) 4569 movptr(Address(address, index, Address::times_8, offset_in_bytes - 0*BytesPerWord), temp); 4570 // index could be 0 now, must check again 4571 jcc(Assembler::zero, done); 4572 bind(even); 4573 } 4574 #endif // !_LP64 4575 // initialize remaining object fields: index is a multiple of 2 now 4576 { 4577 Label loop; 4578 bind(loop); 4579 movptr(Address(address, index, Address::times_8, offset_in_bytes - 1*BytesPerWord), temp); 4580 NOT_LP64(movptr(Address(address, index, Address::times_8, offset_in_bytes - 2*BytesPerWord), temp);) 4581 decrement(index); 4582 jcc(Assembler::notZero, loop); 4583 } 4584 4585 bind(done); 4586 } 4587 4588 void MacroAssembler::get_inline_type_field_klass(Register holder_klass, Register index, Register inline_klass) { 4589 inline_layout_info(holder_klass, index, inline_klass); 4590 movptr(inline_klass, Address(inline_klass, InlineLayoutInfo::klass_offset())); 4591 } 4592 4593 void MacroAssembler::inline_layout_info(Register holder_klass, Register index, Register layout_info) { 4594 movptr(layout_info, Address(holder_klass, InstanceKlass::inline_layout_info_array_offset())); 4595 #ifdef ASSERT 4596 { 4597 Label done; 4598 cmpptr(layout_info, 0); 4599 jcc(Assembler::notEqual, done); 4600 stop("inline_layout_info_array is null"); 4601 bind(done); 4602 } 4603 #endif 4604 4605 InlineLayoutInfo array[2]; 4606 int size = (char*)&array[1] - (char*)&array[0]; // computing size of array elements 4607 if (is_power_of_2(size)) { 4608 shll(index, log2i_exact(size)); // Scale index by power of 2 4609 } else { 4610 imull(index, index, size); // Scale the index to be the entry index * array_element_size 4611 } 4612 lea(layout_info, Address(layout_info, index, Address::times_1, Array<InlineLayoutInfo>::base_offset_in_bytes())); 4613 } 4614 4615 void MacroAssembler::get_default_value_oop(Register inline_klass, Register temp_reg, Register obj) { 4616 #ifdef ASSERT 4617 { 4618 Label done_check; 4619 test_klass_is_inline_type(inline_klass, temp_reg, done_check); 4620 stop("get_default_value_oop from non inline type klass"); 4621 bind(done_check); 4622 } 4623 #endif 4624 Register offset = temp_reg; 4625 // Getting the offset of the pre-allocated default value 4626 movptr(offset, Address(inline_klass, in_bytes(InstanceKlass::adr_inlineklass_fixed_block_offset()))); 4627 movl(offset, Address(offset, in_bytes(InlineKlass::default_value_offset_offset()))); 4628 4629 // Getting the mirror 4630 movptr(obj, Address(inline_klass, in_bytes(Klass::java_mirror_offset()))); 4631 resolve_oop_handle(obj, inline_klass); 4632 4633 // Getting the pre-allocated default value from the mirror 4634 Address field(obj, offset, Address::times_1); 4635 load_heap_oop(obj, field); 4636 } 4637 4638 void MacroAssembler::get_empty_inline_type_oop(Register inline_klass, Register temp_reg, Register obj) { 4639 #ifdef ASSERT 4640 { 4641 Label done_check; 4642 test_klass_is_empty_inline_type(inline_klass, temp_reg, done_check); 4643 stop("get_empty_value from non-empty inline klass"); 4644 bind(done_check); 4645 } 4646 #endif 4647 get_default_value_oop(inline_klass, temp_reg, obj); 4648 } 4649 4650 4651 // Look up the method for a megamorphic invokeinterface call. 4652 // The target method is determined by <intf_klass, itable_index>. 4653 // The receiver klass is in recv_klass. 4654 // On success, the result will be in method_result, and execution falls through. 4655 // On failure, execution transfers to the given label. 4656 void MacroAssembler::lookup_interface_method(Register recv_klass, 4657 Register intf_klass, 4658 RegisterOrConstant itable_index, 4659 Register method_result, 4660 Register scan_temp, 4661 Label& L_no_such_interface, 4662 bool return_method) { 4663 assert_different_registers(recv_klass, intf_klass, scan_temp); 4664 assert_different_registers(method_result, intf_klass, scan_temp); 4665 assert(recv_klass != method_result || !return_method, 4666 "recv_klass can be destroyed when method isn't needed"); 4667 4668 assert(itable_index.is_constant() || itable_index.as_register() == method_result, 4669 "caller must use same register for non-constant itable index as for method"); 4670 4671 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 4672 int vtable_base = in_bytes(Klass::vtable_start_offset()); 4673 int itentry_off = in_bytes(itableMethodEntry::method_offset()); 4674 int scan_step = itableOffsetEntry::size() * wordSize; 4675 int vte_size = vtableEntry::size_in_bytes(); 4676 Address::ScaleFactor times_vte_scale = Address::times_ptr; 4677 assert(vte_size == wordSize, "else adjust times_vte_scale"); 4678 4679 movl(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 4680 4681 // Could store the aligned, prescaled offset in the klass. 4682 lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base)); 4683 4684 if (return_method) { 4685 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 4686 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 4687 lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off)); 4688 } 4689 4690 // for (scan = klass->itable(); scan->interface() != nullptr; scan += scan_step) { 4691 // if (scan->interface() == intf) { 4692 // result = (klass + scan->offset() + itable_index); 4693 // } 4694 // } 4695 Label search, found_method; 4696 4697 for (int peel = 1; peel >= 0; peel--) { 4698 movptr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset())); 4699 cmpptr(intf_klass, method_result); 4700 4701 if (peel) { 4702 jccb(Assembler::equal, found_method); 4703 } else { 4704 jccb(Assembler::notEqual, search); 4705 // (invert the test to fall through to found_method...) 4706 } 4707 4708 if (!peel) break; 4709 4710 bind(search); 4711 4712 // Check that the previous entry is non-null. A null entry means that 4713 // the receiver class doesn't implement the interface, and wasn't the 4714 // same as when the caller was compiled. 4715 testptr(method_result, method_result); 4716 jcc(Assembler::zero, L_no_such_interface); 4717 addptr(scan_temp, scan_step); 4718 } 4719 4720 bind(found_method); 4721 4722 if (return_method) { 4723 // Got a hit. 4724 movl(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset())); 4725 movptr(method_result, Address(recv_klass, scan_temp, Address::times_1)); 4726 } 4727 } 4728 4729 // Look up the method for a megamorphic invokeinterface call in a single pass over itable: 4730 // - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICData 4731 // - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index 4732 // The target method is determined by <holder_klass, itable_index>. 4733 // The receiver klass is in recv_klass. 4734 // On success, the result will be in method_result, and execution falls through. 4735 // On failure, execution transfers to the given label. 4736 void MacroAssembler::lookup_interface_method_stub(Register recv_klass, 4737 Register holder_klass, 4738 Register resolved_klass, 4739 Register method_result, 4740 Register scan_temp, 4741 Register temp_reg2, 4742 Register receiver, 4743 int itable_index, 4744 Label& L_no_such_interface) { 4745 assert_different_registers(recv_klass, method_result, holder_klass, resolved_klass, scan_temp, temp_reg2, receiver); 4746 Register temp_itbl_klass = method_result; 4747 Register temp_reg = (temp_reg2 == noreg ? recv_klass : temp_reg2); // reuse recv_klass register on 32-bit x86 impl 4748 4749 int vtable_base = in_bytes(Klass::vtable_start_offset()); 4750 int itentry_off = in_bytes(itableMethodEntry::method_offset()); 4751 int scan_step = itableOffsetEntry::size() * wordSize; 4752 int vte_size = vtableEntry::size_in_bytes(); 4753 int ioffset = in_bytes(itableOffsetEntry::interface_offset()); 4754 int ooffset = in_bytes(itableOffsetEntry::offset_offset()); 4755 Address::ScaleFactor times_vte_scale = Address::times_ptr; 4756 assert(vte_size == wordSize, "adjust times_vte_scale"); 4757 4758 Label L_loop_scan_resolved_entry, L_resolved_found, L_holder_found; 4759 4760 // temp_itbl_klass = recv_klass.itable[0] 4761 // scan_temp = &recv_klass.itable[0] + step 4762 movl(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 4763 movptr(temp_itbl_klass, Address(recv_klass, scan_temp, times_vte_scale, vtable_base + ioffset)); 4764 lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base + ioffset + scan_step)); 4765 xorptr(temp_reg, temp_reg); 4766 4767 // Initial checks: 4768 // - if (holder_klass != resolved_klass), go to "scan for resolved" 4769 // - if (itable[0] == 0), no such interface 4770 // - if (itable[0] == holder_klass), shortcut to "holder found" 4771 cmpptr(holder_klass, resolved_klass); 4772 jccb(Assembler::notEqual, L_loop_scan_resolved_entry); 4773 testptr(temp_itbl_klass, temp_itbl_klass); 4774 jccb(Assembler::zero, L_no_such_interface); 4775 cmpptr(holder_klass, temp_itbl_klass); 4776 jccb(Assembler::equal, L_holder_found); 4777 4778 // Loop: Look for holder_klass record in itable 4779 // do { 4780 // tmp = itable[index]; 4781 // index += step; 4782 // if (tmp == holder_klass) { 4783 // goto L_holder_found; // Found! 4784 // } 4785 // } while (tmp != 0); 4786 // goto L_no_such_interface // Not found. 4787 Label L_scan_holder; 4788 bind(L_scan_holder); 4789 movptr(temp_itbl_klass, Address(scan_temp, 0)); 4790 addptr(scan_temp, scan_step); 4791 cmpptr(holder_klass, temp_itbl_klass); 4792 jccb(Assembler::equal, L_holder_found); 4793 testptr(temp_itbl_klass, temp_itbl_klass); 4794 jccb(Assembler::notZero, L_scan_holder); 4795 4796 jmpb(L_no_such_interface); 4797 4798 // Loop: Look for resolved_class record in itable 4799 // do { 4800 // tmp = itable[index]; 4801 // index += step; 4802 // if (tmp == holder_klass) { 4803 // // Also check if we have met a holder klass 4804 // holder_tmp = itable[index-step-ioffset]; 4805 // } 4806 // if (tmp == resolved_klass) { 4807 // goto L_resolved_found; // Found! 4808 // } 4809 // } while (tmp != 0); 4810 // goto L_no_such_interface // Not found. 4811 // 4812 Label L_loop_scan_resolved; 4813 bind(L_loop_scan_resolved); 4814 movptr(temp_itbl_klass, Address(scan_temp, 0)); 4815 addptr(scan_temp, scan_step); 4816 bind(L_loop_scan_resolved_entry); 4817 cmpptr(holder_klass, temp_itbl_klass); 4818 cmovl(Assembler::equal, temp_reg, Address(scan_temp, ooffset - ioffset - scan_step)); 4819 cmpptr(resolved_klass, temp_itbl_klass); 4820 jccb(Assembler::equal, L_resolved_found); 4821 testptr(temp_itbl_klass, temp_itbl_klass); 4822 jccb(Assembler::notZero, L_loop_scan_resolved); 4823 4824 jmpb(L_no_such_interface); 4825 4826 Label L_ready; 4827 4828 // See if we already have a holder klass. If not, go and scan for it. 4829 bind(L_resolved_found); 4830 testptr(temp_reg, temp_reg); 4831 jccb(Assembler::zero, L_scan_holder); 4832 jmpb(L_ready); 4833 4834 bind(L_holder_found); 4835 movl(temp_reg, Address(scan_temp, ooffset - ioffset - scan_step)); 4836 4837 // Finally, temp_reg contains holder_klass vtable offset 4838 bind(L_ready); 4839 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 4840 if (temp_reg2 == noreg) { // recv_klass register is clobbered for 32-bit x86 impl 4841 load_klass(scan_temp, receiver, noreg); 4842 movptr(method_result, Address(scan_temp, temp_reg, Address::times_1, itable_index * wordSize + itentry_off)); 4843 } else { 4844 movptr(method_result, Address(recv_klass, temp_reg, Address::times_1, itable_index * wordSize + itentry_off)); 4845 } 4846 } 4847 4848 4849 // virtual method calling 4850 void MacroAssembler::lookup_virtual_method(Register recv_klass, 4851 RegisterOrConstant vtable_index, 4852 Register method_result) { 4853 const ByteSize base = Klass::vtable_start_offset(); 4854 assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below"); 4855 Address vtable_entry_addr(recv_klass, 4856 vtable_index, Address::times_ptr, 4857 base + vtableEntry::method_offset()); 4858 movptr(method_result, vtable_entry_addr); 4859 } 4860 4861 4862 void MacroAssembler::check_klass_subtype(Register sub_klass, 4863 Register super_klass, 4864 Register temp_reg, 4865 Label& L_success) { 4866 Label L_failure; 4867 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, nullptr); 4868 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, nullptr); 4869 bind(L_failure); 4870 } 4871 4872 4873 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 4874 Register super_klass, 4875 Register temp_reg, 4876 Label* L_success, 4877 Label* L_failure, 4878 Label* L_slow_path, 4879 RegisterOrConstant super_check_offset) { 4880 assert_different_registers(sub_klass, super_klass, temp_reg); 4881 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 4882 if (super_check_offset.is_register()) { 4883 assert_different_registers(sub_klass, super_klass, 4884 super_check_offset.as_register()); 4885 } else if (must_load_sco) { 4886 assert(temp_reg != noreg, "supply either a temp or a register offset"); 4887 } 4888 4889 Label L_fallthrough; 4890 int label_nulls = 0; 4891 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 4892 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 4893 if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; } 4894 assert(label_nulls <= 1, "at most one null in the batch"); 4895 4896 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 4897 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 4898 Address super_check_offset_addr(super_klass, sco_offset); 4899 4900 // Hacked jcc, which "knows" that L_fallthrough, at least, is in 4901 // range of a jccb. If this routine grows larger, reconsider at 4902 // least some of these. 4903 #define local_jcc(assembler_cond, label) \ 4904 if (&(label) == &L_fallthrough) jccb(assembler_cond, label); \ 4905 else jcc( assembler_cond, label) /*omit semi*/ 4906 4907 // Hacked jmp, which may only be used just before L_fallthrough. 4908 #define final_jmp(label) \ 4909 if (&(label) == &L_fallthrough) { /*do nothing*/ } \ 4910 else jmp(label) /*omit semi*/ 4911 4912 // If the pointers are equal, we are done (e.g., String[] elements). 4913 // This self-check enables sharing of secondary supertype arrays among 4914 // non-primary types such as array-of-interface. Otherwise, each such 4915 // type would need its own customized SSA. 4916 // We move this check to the front of the fast path because many 4917 // type checks are in fact trivially successful in this manner, 4918 // so we get a nicely predicted branch right at the start of the check. 4919 cmpptr(sub_klass, super_klass); 4920 local_jcc(Assembler::equal, *L_success); 4921 4922 // Check the supertype display: 4923 if (must_load_sco) { 4924 // Positive movl does right thing on LP64. 4925 movl(temp_reg, super_check_offset_addr); 4926 super_check_offset = RegisterOrConstant(temp_reg); 4927 } 4928 Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0); 4929 cmpptr(super_klass, super_check_addr); // load displayed supertype 4930 4931 // This check has worked decisively for primary supers. 4932 // Secondary supers are sought in the super_cache ('super_cache_addr'). 4933 // (Secondary supers are interfaces and very deeply nested subtypes.) 4934 // This works in the same check above because of a tricky aliasing 4935 // between the super_cache and the primary super display elements. 4936 // (The 'super_check_addr' can address either, as the case requires.) 4937 // Note that the cache is updated below if it does not help us find 4938 // what we need immediately. 4939 // So if it was a primary super, we can just fail immediately. 4940 // Otherwise, it's the slow path for us (no success at this point). 4941 4942 if (super_check_offset.is_register()) { 4943 local_jcc(Assembler::equal, *L_success); 4944 cmpl(super_check_offset.as_register(), sc_offset); 4945 if (L_failure == &L_fallthrough) { 4946 local_jcc(Assembler::equal, *L_slow_path); 4947 } else { 4948 local_jcc(Assembler::notEqual, *L_failure); 4949 final_jmp(*L_slow_path); 4950 } 4951 } else if (super_check_offset.as_constant() == sc_offset) { 4952 // Need a slow path; fast failure is impossible. 4953 if (L_slow_path == &L_fallthrough) { 4954 local_jcc(Assembler::equal, *L_success); 4955 } else { 4956 local_jcc(Assembler::notEqual, *L_slow_path); 4957 final_jmp(*L_success); 4958 } 4959 } else { 4960 // No slow path; it's a fast decision. 4961 if (L_failure == &L_fallthrough) { 4962 local_jcc(Assembler::equal, *L_success); 4963 } else { 4964 local_jcc(Assembler::notEqual, *L_failure); 4965 final_jmp(*L_success); 4966 } 4967 } 4968 4969 bind(L_fallthrough); 4970 4971 #undef local_jcc 4972 #undef final_jmp 4973 } 4974 4975 4976 void MacroAssembler::check_klass_subtype_slow_path_linear(Register sub_klass, 4977 Register super_klass, 4978 Register temp_reg, 4979 Register temp2_reg, 4980 Label* L_success, 4981 Label* L_failure, 4982 bool set_cond_codes) { 4983 assert_different_registers(sub_klass, super_klass, temp_reg); 4984 if (temp2_reg != noreg) 4985 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg); 4986 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg) 4987 4988 Label L_fallthrough; 4989 int label_nulls = 0; 4990 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 4991 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 4992 assert(label_nulls <= 1, "at most one null in the batch"); 4993 4994 // a couple of useful fields in sub_klass: 4995 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 4996 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 4997 Address secondary_supers_addr(sub_klass, ss_offset); 4998 Address super_cache_addr( sub_klass, sc_offset); 4999 5000 // Do a linear scan of the secondary super-klass chain. 5001 // This code is rarely used, so simplicity is a virtue here. 5002 // The repne_scan instruction uses fixed registers, which we must spill. 5003 // Don't worry too much about pre-existing connections with the input regs. 5004 5005 assert(sub_klass != rax, "killed reg"); // killed by mov(rax, super) 5006 assert(sub_klass != rcx, "killed reg"); // killed by lea(rcx, &pst_counter) 5007 5008 // Get super_klass value into rax (even if it was in rdi or rcx). 5009 bool pushed_rax = false, pushed_rcx = false, pushed_rdi = false; 5010 if (super_klass != rax) { 5011 if (!IS_A_TEMP(rax)) { push(rax); pushed_rax = true; } 5012 mov(rax, super_klass); 5013 } 5014 if (!IS_A_TEMP(rcx)) { push(rcx); pushed_rcx = true; } 5015 if (!IS_A_TEMP(rdi)) { push(rdi); pushed_rdi = true; } 5016 5017 #ifndef PRODUCT 5018 uint* pst_counter = &SharedRuntime::_partial_subtype_ctr; 5019 ExternalAddress pst_counter_addr((address) pst_counter); 5020 NOT_LP64( incrementl(pst_counter_addr) ); 5021 LP64_ONLY( lea(rcx, pst_counter_addr) ); 5022 LP64_ONLY( incrementl(Address(rcx, 0)) ); 5023 #endif //PRODUCT 5024 5025 // We will consult the secondary-super array. 5026 movptr(rdi, secondary_supers_addr); 5027 // Load the array length. (Positive movl does right thing on LP64.) 5028 movl(rcx, Address(rdi, Array<Klass*>::length_offset_in_bytes())); 5029 // Skip to start of data. 5030 addptr(rdi, Array<Klass*>::base_offset_in_bytes()); 5031 5032 // Scan RCX words at [RDI] for an occurrence of RAX. 5033 // Set NZ/Z based on last compare. 5034 // Z flag value will not be set by 'repne' if RCX == 0 since 'repne' does 5035 // not change flags (only scas instruction which is repeated sets flags). 5036 // Set Z = 0 (not equal) before 'repne' to indicate that class was not found. 5037 5038 testptr(rax,rax); // Set Z = 0 5039 repne_scan(); 5040 5041 // Unspill the temp. registers: 5042 if (pushed_rdi) pop(rdi); 5043 if (pushed_rcx) pop(rcx); 5044 if (pushed_rax) pop(rax); 5045 5046 if (set_cond_codes) { 5047 // Special hack for the AD files: rdi is guaranteed non-zero. 5048 assert(!pushed_rdi, "rdi must be left non-null"); 5049 // Also, the condition codes are properly set Z/NZ on succeed/failure. 5050 } 5051 5052 if (L_failure == &L_fallthrough) 5053 jccb(Assembler::notEqual, *L_failure); 5054 else jcc(Assembler::notEqual, *L_failure); 5055 5056 // Success. Cache the super we found and proceed in triumph. 5057 movptr(super_cache_addr, super_klass); 5058 5059 if (L_success != &L_fallthrough) { 5060 jmp(*L_success); 5061 } 5062 5063 #undef IS_A_TEMP 5064 5065 bind(L_fallthrough); 5066 } 5067 5068 #ifndef _LP64 5069 5070 // 32-bit x86 only: always use the linear search. 5071 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 5072 Register super_klass, 5073 Register temp_reg, 5074 Register temp2_reg, 5075 Label* L_success, 5076 Label* L_failure, 5077 bool set_cond_codes) { 5078 check_klass_subtype_slow_path_linear 5079 (sub_klass, super_klass, temp_reg, temp2_reg, L_success, L_failure, set_cond_codes); 5080 } 5081 5082 #else // _LP64 5083 5084 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 5085 Register super_klass, 5086 Register temp_reg, 5087 Register temp2_reg, 5088 Label* L_success, 5089 Label* L_failure, 5090 bool set_cond_codes) { 5091 assert(set_cond_codes == false, "must be false on 64-bit x86"); 5092 check_klass_subtype_slow_path 5093 (sub_klass, super_klass, temp_reg, temp2_reg, noreg, noreg, 5094 L_success, L_failure); 5095 } 5096 5097 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 5098 Register super_klass, 5099 Register temp_reg, 5100 Register temp2_reg, 5101 Register temp3_reg, 5102 Register temp4_reg, 5103 Label* L_success, 5104 Label* L_failure) { 5105 if (UseSecondarySupersTable) { 5106 check_klass_subtype_slow_path_table 5107 (sub_klass, super_klass, temp_reg, temp2_reg, temp3_reg, temp4_reg, 5108 L_success, L_failure); 5109 } else { 5110 check_klass_subtype_slow_path_linear 5111 (sub_klass, super_klass, temp_reg, temp2_reg, L_success, L_failure, /*set_cond_codes*/false); 5112 } 5113 } 5114 5115 Register MacroAssembler::allocate_if_noreg(Register r, 5116 RegSetIterator<Register> &available_regs, 5117 RegSet ®s_to_push) { 5118 if (!r->is_valid()) { 5119 r = *available_regs++; 5120 regs_to_push += r; 5121 } 5122 return r; 5123 } 5124 5125 void MacroAssembler::check_klass_subtype_slow_path_table(Register sub_klass, 5126 Register super_klass, 5127 Register temp_reg, 5128 Register temp2_reg, 5129 Register temp3_reg, 5130 Register result_reg, 5131 Label* L_success, 5132 Label* L_failure) { 5133 // NB! Callers may assume that, when temp2_reg is a valid register, 5134 // this code sets it to a nonzero value. 5135 bool temp2_reg_was_valid = temp2_reg->is_valid(); 5136 5137 RegSet temps = RegSet::of(temp_reg, temp2_reg, temp3_reg); 5138 5139 Label L_fallthrough; 5140 int label_nulls = 0; 5141 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 5142 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 5143 assert(label_nulls <= 1, "at most one null in the batch"); 5144 5145 BLOCK_COMMENT("check_klass_subtype_slow_path_table"); 5146 5147 RegSetIterator<Register> available_regs 5148 = (RegSet::of(rax, rcx, rdx, r8) + r9 + r10 + r11 + r12 - temps - sub_klass - super_klass).begin(); 5149 5150 RegSet pushed_regs; 5151 5152 temp_reg = allocate_if_noreg(temp_reg, available_regs, pushed_regs); 5153 temp2_reg = allocate_if_noreg(temp2_reg, available_regs, pushed_regs); 5154 temp3_reg = allocate_if_noreg(temp3_reg, available_regs, pushed_regs); 5155 result_reg = allocate_if_noreg(result_reg, available_regs, pushed_regs); 5156 Register temp4_reg = allocate_if_noreg(noreg, available_regs, pushed_regs); 5157 5158 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, temp3_reg, result_reg); 5159 5160 { 5161 5162 int register_push_size = pushed_regs.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size; 5163 int aligned_size = align_up(register_push_size, StackAlignmentInBytes); 5164 subptr(rsp, aligned_size); 5165 push_set(pushed_regs, 0); 5166 5167 lookup_secondary_supers_table_var(sub_klass, 5168 super_klass, 5169 temp_reg, temp2_reg, temp3_reg, temp4_reg, result_reg); 5170 cmpq(result_reg, 0); 5171 5172 // Unspill the temp. registers: 5173 pop_set(pushed_regs, 0); 5174 // Increment SP but do not clobber flags. 5175 lea(rsp, Address(rsp, aligned_size)); 5176 } 5177 5178 if (temp2_reg_was_valid) { 5179 movq(temp2_reg, 1); 5180 } 5181 5182 jcc(Assembler::notEqual, *L_failure); 5183 5184 if (L_success != &L_fallthrough) { 5185 jmp(*L_success); 5186 } 5187 5188 bind(L_fallthrough); 5189 } 5190 5191 // population_count variant for running without the POPCNT 5192 // instruction, which was introduced with SSE4.2 in 2008. 5193 void MacroAssembler::population_count(Register dst, Register src, 5194 Register scratch1, Register scratch2) { 5195 assert_different_registers(src, scratch1, scratch2); 5196 if (UsePopCountInstruction) { 5197 Assembler::popcntq(dst, src); 5198 } else { 5199 assert_different_registers(src, scratch1, scratch2); 5200 assert_different_registers(dst, scratch1, scratch2); 5201 Label loop, done; 5202 5203 mov(scratch1, src); 5204 // dst = 0; 5205 // while(scratch1 != 0) { 5206 // dst++; 5207 // scratch1 &= (scratch1 - 1); 5208 // } 5209 xorl(dst, dst); 5210 testq(scratch1, scratch1); 5211 jccb(Assembler::equal, done); 5212 { 5213 bind(loop); 5214 incq(dst); 5215 movq(scratch2, scratch1); 5216 decq(scratch2); 5217 andq(scratch1, scratch2); 5218 jccb(Assembler::notEqual, loop); 5219 } 5220 bind(done); 5221 } 5222 } 5223 5224 // Ensure that the inline code and the stub are using the same registers. 5225 #define LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS \ 5226 do { \ 5227 assert(r_super_klass == rax, "mismatch"); \ 5228 assert(r_array_base == rbx, "mismatch"); \ 5229 assert(r_array_length == rcx, "mismatch"); \ 5230 assert(r_array_index == rdx, "mismatch"); \ 5231 assert(r_sub_klass == rsi || r_sub_klass == noreg, "mismatch"); \ 5232 assert(r_bitmap == r11 || r_bitmap == noreg, "mismatch"); \ 5233 assert(result == rdi || result == noreg, "mismatch"); \ 5234 } while(0) 5235 5236 // Versions of salq and rorq that don't need count to be in rcx 5237 5238 void MacroAssembler::salq(Register dest, Register count) { 5239 if (count == rcx) { 5240 Assembler::salq(dest); 5241 } else { 5242 assert_different_registers(rcx, dest); 5243 xchgq(rcx, count); 5244 Assembler::salq(dest); 5245 xchgq(rcx, count); 5246 } 5247 } 5248 5249 void MacroAssembler::rorq(Register dest, Register count) { 5250 if (count == rcx) { 5251 Assembler::rorq(dest); 5252 } else { 5253 assert_different_registers(rcx, dest); 5254 xchgq(rcx, count); 5255 Assembler::rorq(dest); 5256 xchgq(rcx, count); 5257 } 5258 } 5259 5260 // Return true: we succeeded in generating this code 5261 // 5262 // At runtime, return 0 in result if r_super_klass is a superclass of 5263 // r_sub_klass, otherwise return nonzero. Use this if you know the 5264 // super_klass_slot of the class you're looking for. This is always 5265 // the case for instanceof and checkcast. 5266 void MacroAssembler::lookup_secondary_supers_table_const(Register r_sub_klass, 5267 Register r_super_klass, 5268 Register temp1, 5269 Register temp2, 5270 Register temp3, 5271 Register temp4, 5272 Register result, 5273 u1 super_klass_slot) { 5274 assert_different_registers(r_sub_klass, r_super_klass, temp1, temp2, temp3, temp4, result); 5275 5276 Label L_fallthrough, L_success, L_failure; 5277 5278 BLOCK_COMMENT("lookup_secondary_supers_table {"); 5279 5280 const Register 5281 r_array_index = temp1, 5282 r_array_length = temp2, 5283 r_array_base = temp3, 5284 r_bitmap = temp4; 5285 5286 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS; 5287 5288 xorq(result, result); // = 0 5289 5290 movq(r_bitmap, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset())); 5291 movq(r_array_index, r_bitmap); 5292 5293 // First check the bitmap to see if super_klass might be present. If 5294 // the bit is zero, we are certain that super_klass is not one of 5295 // the secondary supers. 5296 u1 bit = super_klass_slot; 5297 { 5298 // NB: If the count in a x86 shift instruction is 0, the flags are 5299 // not affected, so we do a testq instead. 5300 int shift_count = Klass::SECONDARY_SUPERS_TABLE_MASK - bit; 5301 if (shift_count != 0) { 5302 salq(r_array_index, shift_count); 5303 } else { 5304 testq(r_array_index, r_array_index); 5305 } 5306 } 5307 // We test the MSB of r_array_index, i.e. its sign bit 5308 jcc(Assembler::positive, L_failure); 5309 5310 // Get the first array index that can contain super_klass into r_array_index. 5311 if (bit != 0) { 5312 population_count(r_array_index, r_array_index, temp2, temp3); 5313 } else { 5314 movl(r_array_index, 1); 5315 } 5316 // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word. 5317 5318 // We will consult the secondary-super array. 5319 movptr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset()))); 5320 5321 // We're asserting that the first word in an Array<Klass*> is the 5322 // length, and the second word is the first word of the data. If 5323 // that ever changes, r_array_base will have to be adjusted here. 5324 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code"); 5325 assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code"); 5326 5327 cmpq(r_super_klass, Address(r_array_base, r_array_index, Address::times_8)); 5328 jccb(Assembler::equal, L_success); 5329 5330 // Is there another entry to check? Consult the bitmap. 5331 btq(r_bitmap, (bit + 1) & Klass::SECONDARY_SUPERS_TABLE_MASK); 5332 jccb(Assembler::carryClear, L_failure); 5333 5334 // Linear probe. Rotate the bitmap so that the next bit to test is 5335 // in Bit 1. 5336 if (bit != 0) { 5337 rorq(r_bitmap, bit); 5338 } 5339 5340 // Calls into the stub generated by lookup_secondary_supers_table_slow_path. 5341 // Arguments: r_super_klass, r_array_base, r_array_index, r_bitmap. 5342 // Kills: r_array_length. 5343 // Returns: result. 5344 call(RuntimeAddress(StubRoutines::lookup_secondary_supers_table_slow_path_stub())); 5345 // Result (0/1) is in rdi 5346 jmpb(L_fallthrough); 5347 5348 bind(L_failure); 5349 incq(result); // 0 => 1 5350 5351 bind(L_success); 5352 // result = 0; 5353 5354 bind(L_fallthrough); 5355 BLOCK_COMMENT("} lookup_secondary_supers_table"); 5356 5357 if (VerifySecondarySupers) { 5358 verify_secondary_supers_table(r_sub_klass, r_super_klass, result, 5359 temp1, temp2, temp3); 5360 } 5361 } 5362 5363 // At runtime, return 0 in result if r_super_klass is a superclass of 5364 // r_sub_klass, otherwise return nonzero. Use this version of 5365 // lookup_secondary_supers_table() if you don't know ahead of time 5366 // which superclass will be searched for. Used by interpreter and 5367 // runtime stubs. It is larger and has somewhat greater latency than 5368 // the version above, which takes a constant super_klass_slot. 5369 void MacroAssembler::lookup_secondary_supers_table_var(Register r_sub_klass, 5370 Register r_super_klass, 5371 Register temp1, 5372 Register temp2, 5373 Register temp3, 5374 Register temp4, 5375 Register result) { 5376 assert_different_registers(r_sub_klass, r_super_klass, temp1, temp2, temp3, temp4, result); 5377 assert_different_registers(r_sub_klass, r_super_klass, rcx); 5378 RegSet temps = RegSet::of(temp1, temp2, temp3, temp4); 5379 5380 Label L_fallthrough, L_success, L_failure; 5381 5382 BLOCK_COMMENT("lookup_secondary_supers_table {"); 5383 5384 RegSetIterator<Register> available_regs = (temps - rcx).begin(); 5385 5386 // FIXME. Once we are sure that all paths reaching this point really 5387 // do pass rcx as one of our temps we can get rid of the following 5388 // workaround. 5389 assert(temps.contains(rcx), "fix this code"); 5390 5391 // We prefer to have our shift count in rcx. If rcx is one of our 5392 // temps, use it for slot. If not, pick any of our temps. 5393 Register slot; 5394 if (!temps.contains(rcx)) { 5395 slot = *available_regs++; 5396 } else { 5397 slot = rcx; 5398 } 5399 5400 const Register r_array_index = *available_regs++; 5401 const Register r_bitmap = *available_regs++; 5402 5403 // The logic above guarantees this property, but we state it here. 5404 assert_different_registers(r_array_index, r_bitmap, rcx); 5405 5406 movq(r_bitmap, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset())); 5407 movq(r_array_index, r_bitmap); 5408 5409 // First check the bitmap to see if super_klass might be present. If 5410 // the bit is zero, we are certain that super_klass is not one of 5411 // the secondary supers. 5412 movb(slot, Address(r_super_klass, Klass::hash_slot_offset())); 5413 xorl(slot, (u1)(Klass::SECONDARY_SUPERS_TABLE_SIZE - 1)); // slot ^ 63 === 63 - slot (mod 64) 5414 salq(r_array_index, slot); 5415 5416 testq(r_array_index, r_array_index); 5417 // We test the MSB of r_array_index, i.e. its sign bit 5418 jcc(Assembler::positive, L_failure); 5419 5420 const Register r_array_base = *available_regs++; 5421 5422 // Get the first array index that can contain super_klass into r_array_index. 5423 population_count(r_array_index, r_array_index, /*temp2*/r_array_base, /*temp3*/slot); 5424 5425 // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word. 5426 5427 // We will consult the secondary-super array. 5428 movptr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset()))); 5429 5430 // We're asserting that the first word in an Array<Klass*> is the 5431 // length, and the second word is the first word of the data. If 5432 // that ever changes, r_array_base will have to be adjusted here. 5433 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code"); 5434 assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code"); 5435 5436 cmpq(r_super_klass, Address(r_array_base, r_array_index, Address::times_8)); 5437 jccb(Assembler::equal, L_success); 5438 5439 // Restore slot to its true value 5440 xorl(slot, (u1)(Klass::SECONDARY_SUPERS_TABLE_SIZE - 1)); // slot ^ 63 === 63 - slot (mod 64) 5441 5442 // Linear probe. Rotate the bitmap so that the next bit to test is 5443 // in Bit 1. 5444 rorq(r_bitmap, slot); 5445 5446 // Is there another entry to check? Consult the bitmap. 5447 btq(r_bitmap, 1); 5448 jccb(Assembler::carryClear, L_failure); 5449 5450 // Calls into the stub generated by lookup_secondary_supers_table_slow_path. 5451 // Arguments: r_super_klass, r_array_base, r_array_index, r_bitmap. 5452 // Kills: r_array_length. 5453 // Returns: result. 5454 lookup_secondary_supers_table_slow_path(r_super_klass, 5455 r_array_base, 5456 r_array_index, 5457 r_bitmap, 5458 /*temp1*/result, 5459 /*temp2*/slot, 5460 &L_success, 5461 nullptr); 5462 5463 bind(L_failure); 5464 movq(result, 1); 5465 jmpb(L_fallthrough); 5466 5467 bind(L_success); 5468 xorq(result, result); // = 0 5469 5470 bind(L_fallthrough); 5471 BLOCK_COMMENT("} lookup_secondary_supers_table"); 5472 5473 if (VerifySecondarySupers) { 5474 verify_secondary_supers_table(r_sub_klass, r_super_klass, result, 5475 temp1, temp2, temp3); 5476 } 5477 } 5478 5479 void MacroAssembler::repne_scanq(Register addr, Register value, Register count, Register limit, 5480 Label* L_success, Label* L_failure) { 5481 Label L_loop, L_fallthrough; 5482 { 5483 int label_nulls = 0; 5484 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 5485 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 5486 assert(label_nulls <= 1, "at most one null in the batch"); 5487 } 5488 bind(L_loop); 5489 cmpq(value, Address(addr, count, Address::times_8)); 5490 jcc(Assembler::equal, *L_success); 5491 addl(count, 1); 5492 cmpl(count, limit); 5493 jcc(Assembler::less, L_loop); 5494 5495 if (&L_fallthrough != L_failure) { 5496 jmp(*L_failure); 5497 } 5498 bind(L_fallthrough); 5499 } 5500 5501 // Called by code generated by check_klass_subtype_slow_path 5502 // above. This is called when there is a collision in the hashed 5503 // lookup in the secondary supers array. 5504 void MacroAssembler::lookup_secondary_supers_table_slow_path(Register r_super_klass, 5505 Register r_array_base, 5506 Register r_array_index, 5507 Register r_bitmap, 5508 Register temp1, 5509 Register temp2, 5510 Label* L_success, 5511 Label* L_failure) { 5512 assert_different_registers(r_super_klass, r_array_base, r_array_index, r_bitmap, temp1, temp2); 5513 5514 const Register 5515 r_array_length = temp1, 5516 r_sub_klass = noreg, 5517 result = noreg; 5518 5519 Label L_fallthrough; 5520 int label_nulls = 0; 5521 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 5522 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 5523 assert(label_nulls <= 1, "at most one null in the batch"); 5524 5525 // Load the array length. 5526 movl(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes())); 5527 // And adjust the array base to point to the data. 5528 // NB! Effectively increments current slot index by 1. 5529 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, ""); 5530 addptr(r_array_base, Array<Klass*>::base_offset_in_bytes()); 5531 5532 // Linear probe 5533 Label L_huge; 5534 5535 // The bitmap is full to bursting. 5536 // Implicit invariant: BITMAP_FULL implies (length > 0) 5537 cmpl(r_array_length, (int32_t)Klass::SECONDARY_SUPERS_TABLE_SIZE - 2); 5538 jcc(Assembler::greater, L_huge); 5539 5540 // NB! Our caller has checked bits 0 and 1 in the bitmap. The 5541 // current slot (at secondary_supers[r_array_index]) has not yet 5542 // been inspected, and r_array_index may be out of bounds if we 5543 // wrapped around the end of the array. 5544 5545 { // This is conventional linear probing, but instead of terminating 5546 // when a null entry is found in the table, we maintain a bitmap 5547 // in which a 0 indicates missing entries. 5548 // The check above guarantees there are 0s in the bitmap, so the loop 5549 // eventually terminates. 5550 5551 xorl(temp2, temp2); // = 0; 5552 5553 Label L_again; 5554 bind(L_again); 5555 5556 // Check for array wraparound. 5557 cmpl(r_array_index, r_array_length); 5558 cmovl(Assembler::greaterEqual, r_array_index, temp2); 5559 5560 cmpq(r_super_klass, Address(r_array_base, r_array_index, Address::times_8)); 5561 jcc(Assembler::equal, *L_success); 5562 5563 // If the next bit in bitmap is zero, we're done. 5564 btq(r_bitmap, 2); // look-ahead check (Bit 2); Bits 0 and 1 are tested by now 5565 jcc(Assembler::carryClear, *L_failure); 5566 5567 rorq(r_bitmap, 1); // Bits 1/2 => 0/1 5568 addl(r_array_index, 1); 5569 5570 jmp(L_again); 5571 } 5572 5573 { // Degenerate case: more than 64 secondary supers. 5574 // FIXME: We could do something smarter here, maybe a vectorized 5575 // comparison or a binary search, but is that worth any added 5576 // complexity? 5577 bind(L_huge); 5578 xorl(r_array_index, r_array_index); // = 0 5579 repne_scanq(r_array_base, r_super_klass, r_array_index, r_array_length, 5580 L_success, 5581 (&L_fallthrough != L_failure ? L_failure : nullptr)); 5582 5583 bind(L_fallthrough); 5584 } 5585 } 5586 5587 struct VerifyHelperArguments { 5588 Klass* _super; 5589 Klass* _sub; 5590 intptr_t _linear_result; 5591 intptr_t _table_result; 5592 }; 5593 5594 static void verify_secondary_supers_table_helper(const char* msg, VerifyHelperArguments* args) { 5595 Klass::on_secondary_supers_verification_failure(args->_super, 5596 args->_sub, 5597 args->_linear_result, 5598 args->_table_result, 5599 msg); 5600 } 5601 5602 // Make sure that the hashed lookup and a linear scan agree. 5603 void MacroAssembler::verify_secondary_supers_table(Register r_sub_klass, 5604 Register r_super_klass, 5605 Register result, 5606 Register temp1, 5607 Register temp2, 5608 Register temp3) { 5609 const Register 5610 r_array_index = temp1, 5611 r_array_length = temp2, 5612 r_array_base = temp3, 5613 r_bitmap = noreg; 5614 5615 BLOCK_COMMENT("verify_secondary_supers_table {"); 5616 5617 Label L_success, L_failure, L_check, L_done; 5618 5619 movptr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset()))); 5620 movl(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes())); 5621 // And adjust the array base to point to the data. 5622 addptr(r_array_base, Array<Klass*>::base_offset_in_bytes()); 5623 5624 testl(r_array_length, r_array_length); // array_length == 0? 5625 jcc(Assembler::zero, L_failure); 5626 5627 movl(r_array_index, 0); 5628 repne_scanq(r_array_base, r_super_klass, r_array_index, r_array_length, &L_success); 5629 // fall through to L_failure 5630 5631 const Register linear_result = r_array_index; // reuse temp1 5632 5633 bind(L_failure); // not present 5634 movl(linear_result, 1); 5635 jmp(L_check); 5636 5637 bind(L_success); // present 5638 movl(linear_result, 0); 5639 5640 bind(L_check); 5641 cmpl(linear_result, result); 5642 jcc(Assembler::equal, L_done); 5643 5644 { // To avoid calling convention issues, build a record on the stack 5645 // and pass the pointer to that instead. 5646 push(result); 5647 push(linear_result); 5648 push(r_sub_klass); 5649 push(r_super_klass); 5650 movptr(c_rarg1, rsp); 5651 movptr(c_rarg0, (uintptr_t) "mismatch"); 5652 call(RuntimeAddress(CAST_FROM_FN_PTR(address, verify_secondary_supers_table_helper))); 5653 should_not_reach_here(); 5654 } 5655 bind(L_done); 5656 5657 BLOCK_COMMENT("} verify_secondary_supers_table"); 5658 } 5659 5660 #undef LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS 5661 5662 #endif // LP64 5663 5664 void MacroAssembler::clinit_barrier(Register klass, Register thread, Label* L_fast_path, Label* L_slow_path) { 5665 assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required"); 5666 5667 Label L_fallthrough; 5668 if (L_fast_path == nullptr) { 5669 L_fast_path = &L_fallthrough; 5670 } else if (L_slow_path == nullptr) { 5671 L_slow_path = &L_fallthrough; 5672 } 5673 5674 // Fast path check: class is fully initialized. 5675 // init_state needs acquire, but x86 is TSO, and so we are already good. 5676 cmpb(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized); 5677 jcc(Assembler::equal, *L_fast_path); 5678 5679 // Fast path check: current thread is initializer thread 5680 cmpptr(thread, Address(klass, InstanceKlass::init_thread_offset())); 5681 if (L_slow_path == &L_fallthrough) { 5682 jcc(Assembler::equal, *L_fast_path); 5683 bind(*L_slow_path); 5684 } else if (L_fast_path == &L_fallthrough) { 5685 jcc(Assembler::notEqual, *L_slow_path); 5686 bind(*L_fast_path); 5687 } else { 5688 Unimplemented(); 5689 } 5690 } 5691 5692 void MacroAssembler::cmov32(Condition cc, Register dst, Address src) { 5693 if (VM_Version::supports_cmov()) { 5694 cmovl(cc, dst, src); 5695 } else { 5696 Label L; 5697 jccb(negate_condition(cc), L); 5698 movl(dst, src); 5699 bind(L); 5700 } 5701 } 5702 5703 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) { 5704 if (VM_Version::supports_cmov()) { 5705 cmovl(cc, dst, src); 5706 } else { 5707 Label L; 5708 jccb(negate_condition(cc), L); 5709 movl(dst, src); 5710 bind(L); 5711 } 5712 } 5713 5714 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) { 5715 if (!VerifyOops || VerifyAdapterSharing) { 5716 // Below address of the code string confuses VerifyAdapterSharing 5717 // because it may differ between otherwise equivalent adapters. 5718 return; 5719 } 5720 5721 BLOCK_COMMENT("verify_oop {"); 5722 #ifdef _LP64 5723 push(rscratch1); 5724 #endif 5725 push(rax); // save rax 5726 push(reg); // pass register argument 5727 5728 // Pass register number to verify_oop_subroutine 5729 const char* b = nullptr; 5730 { 5731 ResourceMark rm; 5732 stringStream ss; 5733 ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line); 5734 b = code_string(ss.as_string()); 5735 } 5736 AddressLiteral buffer((address) b, external_word_Relocation::spec_for_immediate()); 5737 pushptr(buffer.addr(), rscratch1); 5738 5739 // call indirectly to solve generation ordering problem 5740 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 5741 call(rax); 5742 // Caller pops the arguments (oop, message) and restores rax, r10 5743 BLOCK_COMMENT("} verify_oop"); 5744 } 5745 5746 void MacroAssembler::vallones(XMMRegister dst, int vector_len) { 5747 if (UseAVX > 2 && (vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl())) { 5748 // Only pcmpeq has dependency breaking treatment (i.e the execution can begin without 5749 // waiting for the previous result on dst), not vpcmpeqd, so just use vpternlog 5750 vpternlogd(dst, 0xFF, dst, dst, vector_len); 5751 } else if (VM_Version::supports_avx()) { 5752 vpcmpeqd(dst, dst, dst, vector_len); 5753 } else { 5754 assert(VM_Version::supports_sse2(), ""); 5755 pcmpeqd(dst, dst); 5756 } 5757 } 5758 5759 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 5760 int extra_slot_offset) { 5761 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 5762 int stackElementSize = Interpreter::stackElementSize; 5763 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0); 5764 #ifdef ASSERT 5765 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1); 5766 assert(offset1 - offset == stackElementSize, "correct arithmetic"); 5767 #endif 5768 Register scale_reg = noreg; 5769 Address::ScaleFactor scale_factor = Address::no_scale; 5770 if (arg_slot.is_constant()) { 5771 offset += arg_slot.as_constant() * stackElementSize; 5772 } else { 5773 scale_reg = arg_slot.as_register(); 5774 scale_factor = Address::times(stackElementSize); 5775 } 5776 offset += wordSize; // return PC is on stack 5777 return Address(rsp, scale_reg, scale_factor, offset); 5778 } 5779 5780 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) { 5781 if (!VerifyOops || VerifyAdapterSharing) { 5782 // Below address of the code string confuses VerifyAdapterSharing 5783 // because it may differ between otherwise equivalent adapters. 5784 return; 5785 } 5786 5787 #ifdef _LP64 5788 push(rscratch1); 5789 #endif 5790 push(rax); // save rax, 5791 // addr may contain rsp so we will have to adjust it based on the push 5792 // we just did (and on 64 bit we do two pushes) 5793 // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which 5794 // stores rax into addr which is backwards of what was intended. 5795 if (addr.uses(rsp)) { 5796 lea(rax, addr); 5797 pushptr(Address(rax, LP64_ONLY(2 *) BytesPerWord)); 5798 } else { 5799 pushptr(addr); 5800 } 5801 5802 // Pass register number to verify_oop_subroutine 5803 const char* b = nullptr; 5804 { 5805 ResourceMark rm; 5806 stringStream ss; 5807 ss.print("verify_oop_addr: %s (%s:%d)", s, file, line); 5808 b = code_string(ss.as_string()); 5809 } 5810 AddressLiteral buffer((address) b, external_word_Relocation::spec_for_immediate()); 5811 pushptr(buffer.addr(), rscratch1); 5812 5813 // call indirectly to solve generation ordering problem 5814 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 5815 call(rax); 5816 // Caller pops the arguments (addr, message) and restores rax, r10. 5817 } 5818 5819 void MacroAssembler::verify_tlab() { 5820 #ifdef ASSERT 5821 if (UseTLAB && VerifyOops) { 5822 Label next, ok; 5823 Register t1 = rsi; 5824 Register thread_reg = NOT_LP64(rbx) LP64_ONLY(r15_thread); 5825 5826 push(t1); 5827 NOT_LP64(push(thread_reg)); 5828 NOT_LP64(get_thread(thread_reg)); 5829 5830 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset()))); 5831 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset()))); 5832 jcc(Assembler::aboveEqual, next); 5833 STOP("assert(top >= start)"); 5834 should_not_reach_here(); 5835 5836 bind(next); 5837 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset()))); 5838 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset()))); 5839 jcc(Assembler::aboveEqual, ok); 5840 STOP("assert(top <= end)"); 5841 should_not_reach_here(); 5842 5843 bind(ok); 5844 NOT_LP64(pop(thread_reg)); 5845 pop(t1); 5846 } 5847 #endif 5848 } 5849 5850 class ControlWord { 5851 public: 5852 int32_t _value; 5853 5854 int rounding_control() const { return (_value >> 10) & 3 ; } 5855 int precision_control() const { return (_value >> 8) & 3 ; } 5856 bool precision() const { return ((_value >> 5) & 1) != 0; } 5857 bool underflow() const { return ((_value >> 4) & 1) != 0; } 5858 bool overflow() const { return ((_value >> 3) & 1) != 0; } 5859 bool zero_divide() const { return ((_value >> 2) & 1) != 0; } 5860 bool denormalized() const { return ((_value >> 1) & 1) != 0; } 5861 bool invalid() const { return ((_value >> 0) & 1) != 0; } 5862 5863 void print() const { 5864 // rounding control 5865 const char* rc; 5866 switch (rounding_control()) { 5867 case 0: rc = "round near"; break; 5868 case 1: rc = "round down"; break; 5869 case 2: rc = "round up "; break; 5870 case 3: rc = "chop "; break; 5871 default: 5872 rc = nullptr; // silence compiler warnings 5873 fatal("Unknown rounding control: %d", rounding_control()); 5874 }; 5875 // precision control 5876 const char* pc; 5877 switch (precision_control()) { 5878 case 0: pc = "24 bits "; break; 5879 case 1: pc = "reserved"; break; 5880 case 2: pc = "53 bits "; break; 5881 case 3: pc = "64 bits "; break; 5882 default: 5883 pc = nullptr; // silence compiler warnings 5884 fatal("Unknown precision control: %d", precision_control()); 5885 }; 5886 // flags 5887 char f[9]; 5888 f[0] = ' '; 5889 f[1] = ' '; 5890 f[2] = (precision ()) ? 'P' : 'p'; 5891 f[3] = (underflow ()) ? 'U' : 'u'; 5892 f[4] = (overflow ()) ? 'O' : 'o'; 5893 f[5] = (zero_divide ()) ? 'Z' : 'z'; 5894 f[6] = (denormalized()) ? 'D' : 'd'; 5895 f[7] = (invalid ()) ? 'I' : 'i'; 5896 f[8] = '\x0'; 5897 // output 5898 printf("%04x masks = %s, %s, %s", _value & 0xFFFF, f, rc, pc); 5899 } 5900 5901 }; 5902 5903 class StatusWord { 5904 public: 5905 int32_t _value; 5906 5907 bool busy() const { return ((_value >> 15) & 1) != 0; } 5908 bool C3() const { return ((_value >> 14) & 1) != 0; } 5909 bool C2() const { return ((_value >> 10) & 1) != 0; } 5910 bool C1() const { return ((_value >> 9) & 1) != 0; } 5911 bool C0() const { return ((_value >> 8) & 1) != 0; } 5912 int top() const { return (_value >> 11) & 7 ; } 5913 bool error_status() const { return ((_value >> 7) & 1) != 0; } 5914 bool stack_fault() const { return ((_value >> 6) & 1) != 0; } 5915 bool precision() const { return ((_value >> 5) & 1) != 0; } 5916 bool underflow() const { return ((_value >> 4) & 1) != 0; } 5917 bool overflow() const { return ((_value >> 3) & 1) != 0; } 5918 bool zero_divide() const { return ((_value >> 2) & 1) != 0; } 5919 bool denormalized() const { return ((_value >> 1) & 1) != 0; } 5920 bool invalid() const { return ((_value >> 0) & 1) != 0; } 5921 5922 void print() const { 5923 // condition codes 5924 char c[5]; 5925 c[0] = (C3()) ? '3' : '-'; 5926 c[1] = (C2()) ? '2' : '-'; 5927 c[2] = (C1()) ? '1' : '-'; 5928 c[3] = (C0()) ? '0' : '-'; 5929 c[4] = '\x0'; 5930 // flags 5931 char f[9]; 5932 f[0] = (error_status()) ? 'E' : '-'; 5933 f[1] = (stack_fault ()) ? 'S' : '-'; 5934 f[2] = (precision ()) ? 'P' : '-'; 5935 f[3] = (underflow ()) ? 'U' : '-'; 5936 f[4] = (overflow ()) ? 'O' : '-'; 5937 f[5] = (zero_divide ()) ? 'Z' : '-'; 5938 f[6] = (denormalized()) ? 'D' : '-'; 5939 f[7] = (invalid ()) ? 'I' : '-'; 5940 f[8] = '\x0'; 5941 // output 5942 printf("%04x flags = %s, cc = %s, top = %d", _value & 0xFFFF, f, c, top()); 5943 } 5944 5945 }; 5946 5947 class TagWord { 5948 public: 5949 int32_t _value; 5950 5951 int tag_at(int i) const { return (_value >> (i*2)) & 3; } 5952 5953 void print() const { 5954 printf("%04x", _value & 0xFFFF); 5955 } 5956 5957 }; 5958 5959 class FPU_Register { 5960 public: 5961 int32_t _m0; 5962 int32_t _m1; 5963 int16_t _ex; 5964 5965 bool is_indefinite() const { 5966 return _ex == -1 && _m1 == (int32_t)0xC0000000 && _m0 == 0; 5967 } 5968 5969 void print() const { 5970 char sign = (_ex < 0) ? '-' : '+'; 5971 const char* kind = (_ex == 0x7FFF || _ex == (int16_t)-1) ? "NaN" : " "; 5972 printf("%c%04hx.%08x%08x %s", sign, _ex, _m1, _m0, kind); 5973 }; 5974 5975 }; 5976 5977 class FPU_State { 5978 public: 5979 enum { 5980 register_size = 10, 5981 number_of_registers = 8, 5982 register_mask = 7 5983 }; 5984 5985 ControlWord _control_word; 5986 StatusWord _status_word; 5987 TagWord _tag_word; 5988 int32_t _error_offset; 5989 int32_t _error_selector; 5990 int32_t _data_offset; 5991 int32_t _data_selector; 5992 int8_t _register[register_size * number_of_registers]; 5993 5994 int tag_for_st(int i) const { return _tag_word.tag_at((_status_word.top() + i) & register_mask); } 5995 FPU_Register* st(int i) const { return (FPU_Register*)&_register[register_size * i]; } 5996 5997 const char* tag_as_string(int tag) const { 5998 switch (tag) { 5999 case 0: return "valid"; 6000 case 1: return "zero"; 6001 case 2: return "special"; 6002 case 3: return "empty"; 6003 } 6004 ShouldNotReachHere(); 6005 return nullptr; 6006 } 6007 6008 void print() const { 6009 // print computation registers 6010 { int t = _status_word.top(); 6011 for (int i = 0; i < number_of_registers; i++) { 6012 int j = (i - t) & register_mask; 6013 printf("%c r%d = ST%d = ", (j == 0 ? '*' : ' '), i, j); 6014 st(j)->print(); 6015 printf(" %s\n", tag_as_string(_tag_word.tag_at(i))); 6016 } 6017 } 6018 printf("\n"); 6019 // print control registers 6020 printf("ctrl = "); _control_word.print(); printf("\n"); 6021 printf("stat = "); _status_word .print(); printf("\n"); 6022 printf("tags = "); _tag_word .print(); printf("\n"); 6023 } 6024 6025 }; 6026 6027 class Flag_Register { 6028 public: 6029 int32_t _value; 6030 6031 bool overflow() const { return ((_value >> 11) & 1) != 0; } 6032 bool direction() const { return ((_value >> 10) & 1) != 0; } 6033 bool sign() const { return ((_value >> 7) & 1) != 0; } 6034 bool zero() const { return ((_value >> 6) & 1) != 0; } 6035 bool auxiliary_carry() const { return ((_value >> 4) & 1) != 0; } 6036 bool parity() const { return ((_value >> 2) & 1) != 0; } 6037 bool carry() const { return ((_value >> 0) & 1) != 0; } 6038 6039 void print() const { 6040 // flags 6041 char f[8]; 6042 f[0] = (overflow ()) ? 'O' : '-'; 6043 f[1] = (direction ()) ? 'D' : '-'; 6044 f[2] = (sign ()) ? 'S' : '-'; 6045 f[3] = (zero ()) ? 'Z' : '-'; 6046 f[4] = (auxiliary_carry()) ? 'A' : '-'; 6047 f[5] = (parity ()) ? 'P' : '-'; 6048 f[6] = (carry ()) ? 'C' : '-'; 6049 f[7] = '\x0'; 6050 // output 6051 printf("%08x flags = %s", _value, f); 6052 } 6053 6054 }; 6055 6056 class IU_Register { 6057 public: 6058 int32_t _value; 6059 6060 void print() const { 6061 printf("%08x %11d", _value, _value); 6062 } 6063 6064 }; 6065 6066 class IU_State { 6067 public: 6068 Flag_Register _eflags; 6069 IU_Register _rdi; 6070 IU_Register _rsi; 6071 IU_Register _rbp; 6072 IU_Register _rsp; 6073 IU_Register _rbx; 6074 IU_Register _rdx; 6075 IU_Register _rcx; 6076 IU_Register _rax; 6077 6078 void print() const { 6079 // computation registers 6080 printf("rax, = "); _rax.print(); printf("\n"); 6081 printf("rbx, = "); _rbx.print(); printf("\n"); 6082 printf("rcx = "); _rcx.print(); printf("\n"); 6083 printf("rdx = "); _rdx.print(); printf("\n"); 6084 printf("rdi = "); _rdi.print(); printf("\n"); 6085 printf("rsi = "); _rsi.print(); printf("\n"); 6086 printf("rbp, = "); _rbp.print(); printf("\n"); 6087 printf("rsp = "); _rsp.print(); printf("\n"); 6088 printf("\n"); 6089 // control registers 6090 printf("flgs = "); _eflags.print(); printf("\n"); 6091 } 6092 }; 6093 6094 6095 class CPU_State { 6096 public: 6097 FPU_State _fpu_state; 6098 IU_State _iu_state; 6099 6100 void print() const { 6101 printf("--------------------------------------------------\n"); 6102 _iu_state .print(); 6103 printf("\n"); 6104 _fpu_state.print(); 6105 printf("--------------------------------------------------\n"); 6106 } 6107 6108 }; 6109 6110 6111 static void _print_CPU_state(CPU_State* state) { 6112 state->print(); 6113 }; 6114 6115 6116 void MacroAssembler::print_CPU_state() { 6117 push_CPU_state(); 6118 push(rsp); // pass CPU state 6119 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _print_CPU_state))); 6120 addptr(rsp, wordSize); // discard argument 6121 pop_CPU_state(); 6122 } 6123 6124 6125 #ifndef _LP64 6126 static bool _verify_FPU(int stack_depth, char* s, CPU_State* state) { 6127 static int counter = 0; 6128 FPU_State* fs = &state->_fpu_state; 6129 counter++; 6130 // For leaf calls, only verify that the top few elements remain empty. 6131 // We only need 1 empty at the top for C2 code. 6132 if( stack_depth < 0 ) { 6133 if( fs->tag_for_st(7) != 3 ) { 6134 printf("FPR7 not empty\n"); 6135 state->print(); 6136 assert(false, "error"); 6137 return false; 6138 } 6139 return true; // All other stack states do not matter 6140 } 6141 6142 assert((fs->_control_word._value & 0xffff) == StubRoutines::x86::fpu_cntrl_wrd_std(), 6143 "bad FPU control word"); 6144 6145 // compute stack depth 6146 int i = 0; 6147 while (i < FPU_State::number_of_registers && fs->tag_for_st(i) < 3) i++; 6148 int d = i; 6149 while (i < FPU_State::number_of_registers && fs->tag_for_st(i) == 3) i++; 6150 // verify findings 6151 if (i != FPU_State::number_of_registers) { 6152 // stack not contiguous 6153 printf("%s: stack not contiguous at ST%d\n", s, i); 6154 state->print(); 6155 assert(false, "error"); 6156 return false; 6157 } 6158 // check if computed stack depth corresponds to expected stack depth 6159 if (stack_depth < 0) { 6160 // expected stack depth is -stack_depth or less 6161 if (d > -stack_depth) { 6162 // too many elements on the stack 6163 printf("%s: <= %d stack elements expected but found %d\n", s, -stack_depth, d); 6164 state->print(); 6165 assert(false, "error"); 6166 return false; 6167 } 6168 } else { 6169 // expected stack depth is stack_depth 6170 if (d != stack_depth) { 6171 // wrong stack depth 6172 printf("%s: %d stack elements expected but found %d\n", s, stack_depth, d); 6173 state->print(); 6174 assert(false, "error"); 6175 return false; 6176 } 6177 } 6178 // everything is cool 6179 return true; 6180 } 6181 6182 void MacroAssembler::verify_FPU(int stack_depth, const char* s) { 6183 if (!VerifyFPU) return; 6184 push_CPU_state(); 6185 push(rsp); // pass CPU state 6186 ExternalAddress msg((address) s); 6187 // pass message string s 6188 pushptr(msg.addr(), noreg); 6189 push(stack_depth); // pass stack depth 6190 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _verify_FPU))); 6191 addptr(rsp, 3 * wordSize); // discard arguments 6192 // check for error 6193 { Label L; 6194 testl(rax, rax); 6195 jcc(Assembler::notZero, L); 6196 int3(); // break if error condition 6197 bind(L); 6198 } 6199 pop_CPU_state(); 6200 } 6201 #endif // _LP64 6202 6203 void MacroAssembler::restore_cpu_control_state_after_jni(Register rscratch) { 6204 // Either restore the MXCSR register after returning from the JNI Call 6205 // or verify that it wasn't changed (with -Xcheck:jni flag). 6206 if (VM_Version::supports_sse()) { 6207 if (RestoreMXCSROnJNICalls) { 6208 ldmxcsr(ExternalAddress(StubRoutines::x86::addr_mxcsr_std()), rscratch); 6209 } else if (CheckJNICalls) { 6210 call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry())); 6211 } 6212 } 6213 // Clear upper bits of YMM registers to avoid SSE <-> AVX transition penalty. 6214 vzeroupper(); 6215 6216 #ifndef _LP64 6217 // Either restore the x87 floating pointer control word after returning 6218 // from the JNI call or verify that it wasn't changed. 6219 if (CheckJNICalls) { 6220 call(RuntimeAddress(StubRoutines::x86::verify_fpu_cntrl_wrd_entry())); 6221 } 6222 #endif // _LP64 6223 } 6224 6225 // ((OopHandle)result).resolve(); 6226 void MacroAssembler::resolve_oop_handle(Register result, Register tmp) { 6227 assert_different_registers(result, tmp); 6228 6229 // Only 64 bit platforms support GCs that require a tmp register 6230 // Only IN_HEAP loads require a thread_tmp register 6231 // OopHandle::resolve is an indirection like jobject. 6232 access_load_at(T_OBJECT, IN_NATIVE, 6233 result, Address(result, 0), tmp, /*tmp_thread*/noreg); 6234 } 6235 6236 // ((WeakHandle)result).resolve(); 6237 void MacroAssembler::resolve_weak_handle(Register rresult, Register rtmp) { 6238 assert_different_registers(rresult, rtmp); 6239 Label resolved; 6240 6241 // A null weak handle resolves to null. 6242 cmpptr(rresult, 0); 6243 jcc(Assembler::equal, resolved); 6244 6245 // Only 64 bit platforms support GCs that require a tmp register 6246 // Only IN_HEAP loads require a thread_tmp register 6247 // WeakHandle::resolve is an indirection like jweak. 6248 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 6249 rresult, Address(rresult, 0), rtmp, /*tmp_thread*/noreg); 6250 bind(resolved); 6251 } 6252 6253 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) { 6254 // get mirror 6255 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 6256 load_method_holder(mirror, method); 6257 movptr(mirror, Address(mirror, mirror_offset)); 6258 resolve_oop_handle(mirror, tmp); 6259 } 6260 6261 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) { 6262 load_method_holder(rresult, rmethod); 6263 movptr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset())); 6264 } 6265 6266 void MacroAssembler::load_method_holder(Register holder, Register method) { 6267 movptr(holder, Address(method, Method::const_offset())); // ConstMethod* 6268 movptr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool* 6269 movptr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass* 6270 } 6271 6272 void MacroAssembler::load_metadata(Register dst, Register src) { 6273 if (UseCompressedClassPointers) { 6274 movl(dst, Address(src, oopDesc::klass_offset_in_bytes())); 6275 } else { 6276 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes())); 6277 } 6278 } 6279 6280 void MacroAssembler::load_klass(Register dst, Register src, Register tmp) { 6281 assert_different_registers(src, tmp); 6282 assert_different_registers(dst, tmp); 6283 #ifdef _LP64 6284 if (UseCompressedClassPointers) { 6285 movl(dst, Address(src, oopDesc::klass_offset_in_bytes())); 6286 decode_klass_not_null(dst, tmp); 6287 } else 6288 #endif 6289 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes())); 6290 } 6291 6292 void MacroAssembler::load_prototype_header(Register dst, Register src, Register tmp) { 6293 load_klass(dst, src, tmp); 6294 movptr(dst, Address(dst, Klass::prototype_header_offset())); 6295 } 6296 6297 void MacroAssembler::store_klass(Register dst, Register src, Register tmp) { 6298 assert_different_registers(src, tmp); 6299 assert_different_registers(dst, tmp); 6300 #ifdef _LP64 6301 if (UseCompressedClassPointers) { 6302 encode_klass_not_null(src, tmp); 6303 movl(Address(dst, oopDesc::klass_offset_in_bytes()), src); 6304 } else 6305 #endif 6306 movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src); 6307 } 6308 6309 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src, 6310 Register tmp1, Register thread_tmp) { 6311 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 6312 decorators = AccessInternal::decorator_fixup(decorators, type); 6313 bool as_raw = (decorators & AS_RAW) != 0; 6314 if (as_raw) { 6315 bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, thread_tmp); 6316 } else { 6317 bs->load_at(this, decorators, type, dst, src, tmp1, thread_tmp); 6318 } 6319 } 6320 6321 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val, 6322 Register tmp1, Register tmp2, Register tmp3) { 6323 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 6324 decorators = AccessInternal::decorator_fixup(decorators, type); 6325 bool as_raw = (decorators & AS_RAW) != 0; 6326 if (as_raw) { 6327 bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 6328 } else { 6329 bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 6330 } 6331 } 6332 6333 void MacroAssembler::flat_field_copy(DecoratorSet decorators, Register src, Register dst, 6334 Register inline_layout_info) { 6335 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 6336 bs->flat_field_copy(this, decorators, src, dst, inline_layout_info); 6337 } 6338 6339 void MacroAssembler::first_field_offset(Register inline_klass, Register offset) { 6340 movptr(offset, Address(inline_klass, InstanceKlass::adr_inlineklass_fixed_block_offset())); 6341 movl(offset, Address(offset, InlineKlass::first_field_offset_offset())); 6342 } 6343 6344 void MacroAssembler::data_for_oop(Register oop, Register data, Register inline_klass) { 6345 // ((address) (void*) o) + vk->first_field_offset(); 6346 Register offset = (data == oop) ? rscratch1 : data; 6347 first_field_offset(inline_klass, offset); 6348 if (data == oop) { 6349 addptr(data, offset); 6350 } else { 6351 lea(data, Address(oop, offset)); 6352 } 6353 } 6354 6355 void MacroAssembler::data_for_value_array_index(Register array, Register array_klass, 6356 Register index, Register data) { 6357 assert(index != rcx, "index needs to shift by rcx"); 6358 assert_different_registers(array, array_klass, index); 6359 assert_different_registers(rcx, array, index); 6360 6361 // array->base() + (index << Klass::layout_helper_log2_element_size(lh)); 6362 movl(rcx, Address(array_klass, Klass::layout_helper_offset())); 6363 6364 // Klass::layout_helper_log2_element_size(lh) 6365 // (lh >> _lh_log2_element_size_shift) & _lh_log2_element_size_mask; 6366 shrl(rcx, Klass::_lh_log2_element_size_shift); 6367 andl(rcx, Klass::_lh_log2_element_size_mask); 6368 shlptr(index); // index << rcx 6369 6370 lea(data, Address(array, index, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_FLAT_ELEMENT))); 6371 } 6372 6373 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1, 6374 Register thread_tmp, DecoratorSet decorators) { 6375 access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp); 6376 } 6377 6378 // Doesn't do verification, generates fixed size code 6379 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1, 6380 Register thread_tmp, DecoratorSet decorators) { 6381 access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, thread_tmp); 6382 } 6383 6384 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1, 6385 Register tmp2, Register tmp3, DecoratorSet decorators) { 6386 access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3); 6387 } 6388 6389 // Used for storing nulls. 6390 void MacroAssembler::store_heap_oop_null(Address dst) { 6391 access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg); 6392 } 6393 6394 #ifdef _LP64 6395 void MacroAssembler::store_klass_gap(Register dst, Register src) { 6396 if (UseCompressedClassPointers) { 6397 // Store to klass gap in destination 6398 movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src); 6399 } 6400 } 6401 6402 #ifdef ASSERT 6403 void MacroAssembler::verify_heapbase(const char* msg) { 6404 assert (UseCompressedOops, "should be compressed"); 6405 assert (Universe::heap() != nullptr, "java heap should be initialized"); 6406 if (CheckCompressedOops) { 6407 Label ok; 6408 ExternalAddress src2(CompressedOops::base_addr()); 6409 const bool is_src2_reachable = reachable(src2); 6410 if (!is_src2_reachable) { 6411 push(rscratch1); // cmpptr trashes rscratch1 6412 } 6413 cmpptr(r12_heapbase, src2, rscratch1); 6414 jcc(Assembler::equal, ok); 6415 STOP(msg); 6416 bind(ok); 6417 if (!is_src2_reachable) { 6418 pop(rscratch1); 6419 } 6420 } 6421 } 6422 #endif 6423 6424 // Algorithm must match oop.inline.hpp encode_heap_oop. 6425 void MacroAssembler::encode_heap_oop(Register r) { 6426 #ifdef ASSERT 6427 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?"); 6428 #endif 6429 verify_oop_msg(r, "broken oop in encode_heap_oop"); 6430 if (CompressedOops::base() == nullptr) { 6431 if (CompressedOops::shift() != 0) { 6432 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 6433 shrq(r, LogMinObjAlignmentInBytes); 6434 } 6435 return; 6436 } 6437 testq(r, r); 6438 cmovq(Assembler::equal, r, r12_heapbase); 6439 subq(r, r12_heapbase); 6440 shrq(r, LogMinObjAlignmentInBytes); 6441 } 6442 6443 void MacroAssembler::encode_heap_oop_not_null(Register r) { 6444 #ifdef ASSERT 6445 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?"); 6446 if (CheckCompressedOops) { 6447 Label ok; 6448 testq(r, r); 6449 jcc(Assembler::notEqual, ok); 6450 STOP("null oop passed to encode_heap_oop_not_null"); 6451 bind(ok); 6452 } 6453 #endif 6454 verify_oop_msg(r, "broken oop in encode_heap_oop_not_null"); 6455 if (CompressedOops::base() != nullptr) { 6456 subq(r, r12_heapbase); 6457 } 6458 if (CompressedOops::shift() != 0) { 6459 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 6460 shrq(r, LogMinObjAlignmentInBytes); 6461 } 6462 } 6463 6464 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) { 6465 #ifdef ASSERT 6466 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?"); 6467 if (CheckCompressedOops) { 6468 Label ok; 6469 testq(src, src); 6470 jcc(Assembler::notEqual, ok); 6471 STOP("null oop passed to encode_heap_oop_not_null2"); 6472 bind(ok); 6473 } 6474 #endif 6475 verify_oop_msg(src, "broken oop in encode_heap_oop_not_null2"); 6476 if (dst != src) { 6477 movq(dst, src); 6478 } 6479 if (CompressedOops::base() != nullptr) { 6480 subq(dst, r12_heapbase); 6481 } 6482 if (CompressedOops::shift() != 0) { 6483 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 6484 shrq(dst, LogMinObjAlignmentInBytes); 6485 } 6486 } 6487 6488 void MacroAssembler::decode_heap_oop(Register r) { 6489 #ifdef ASSERT 6490 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?"); 6491 #endif 6492 if (CompressedOops::base() == nullptr) { 6493 if (CompressedOops::shift() != 0) { 6494 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 6495 shlq(r, LogMinObjAlignmentInBytes); 6496 } 6497 } else { 6498 Label done; 6499 shlq(r, LogMinObjAlignmentInBytes); 6500 jccb(Assembler::equal, done); 6501 addq(r, r12_heapbase); 6502 bind(done); 6503 } 6504 verify_oop_msg(r, "broken oop in decode_heap_oop"); 6505 } 6506 6507 void MacroAssembler::decode_heap_oop_not_null(Register r) { 6508 // Note: it will change flags 6509 assert (UseCompressedOops, "should only be used for compressed headers"); 6510 assert (Universe::heap() != nullptr, "java heap should be initialized"); 6511 // Cannot assert, unverified entry point counts instructions (see .ad file) 6512 // vtableStubs also counts instructions in pd_code_size_limit. 6513 // Also do not verify_oop as this is called by verify_oop. 6514 if (CompressedOops::shift() != 0) { 6515 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 6516 shlq(r, LogMinObjAlignmentInBytes); 6517 if (CompressedOops::base() != nullptr) { 6518 addq(r, r12_heapbase); 6519 } 6520 } else { 6521 assert (CompressedOops::base() == nullptr, "sanity"); 6522 } 6523 } 6524 6525 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { 6526 // Note: it will change flags 6527 assert (UseCompressedOops, "should only be used for compressed headers"); 6528 assert (Universe::heap() != nullptr, "java heap should be initialized"); 6529 // Cannot assert, unverified entry point counts instructions (see .ad file) 6530 // vtableStubs also counts instructions in pd_code_size_limit. 6531 // Also do not verify_oop as this is called by verify_oop. 6532 if (CompressedOops::shift() != 0) { 6533 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 6534 if (LogMinObjAlignmentInBytes == Address::times_8) { 6535 leaq(dst, Address(r12_heapbase, src, Address::times_8, 0)); 6536 } else { 6537 if (dst != src) { 6538 movq(dst, src); 6539 } 6540 shlq(dst, LogMinObjAlignmentInBytes); 6541 if (CompressedOops::base() != nullptr) { 6542 addq(dst, r12_heapbase); 6543 } 6544 } 6545 } else { 6546 assert (CompressedOops::base() == nullptr, "sanity"); 6547 if (dst != src) { 6548 movq(dst, src); 6549 } 6550 } 6551 } 6552 6553 void MacroAssembler::encode_klass_not_null(Register r, Register tmp) { 6554 assert_different_registers(r, tmp); 6555 if (CompressedKlassPointers::base() != nullptr) { 6556 mov64(tmp, (int64_t)CompressedKlassPointers::base()); 6557 subq(r, tmp); 6558 } 6559 if (CompressedKlassPointers::shift() != 0) { 6560 assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); 6561 shrq(r, LogKlassAlignmentInBytes); 6562 } 6563 } 6564 6565 void MacroAssembler::encode_and_move_klass_not_null(Register dst, Register src) { 6566 assert_different_registers(src, dst); 6567 if (CompressedKlassPointers::base() != nullptr) { 6568 mov64(dst, -(int64_t)CompressedKlassPointers::base()); 6569 addq(dst, src); 6570 } else { 6571 movptr(dst, src); 6572 } 6573 if (CompressedKlassPointers::shift() != 0) { 6574 assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); 6575 shrq(dst, LogKlassAlignmentInBytes); 6576 } 6577 } 6578 6579 void MacroAssembler::decode_klass_not_null(Register r, Register tmp) { 6580 assert_different_registers(r, tmp); 6581 // Note: it will change flags 6582 assert(UseCompressedClassPointers, "should only be used for compressed headers"); 6583 // Cannot assert, unverified entry point counts instructions (see .ad file) 6584 // vtableStubs also counts instructions in pd_code_size_limit. 6585 // Also do not verify_oop as this is called by verify_oop. 6586 if (CompressedKlassPointers::shift() != 0) { 6587 assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); 6588 shlq(r, LogKlassAlignmentInBytes); 6589 } 6590 if (CompressedKlassPointers::base() != nullptr) { 6591 mov64(tmp, (int64_t)CompressedKlassPointers::base()); 6592 addq(r, tmp); 6593 } 6594 } 6595 6596 void MacroAssembler::decode_and_move_klass_not_null(Register dst, Register src) { 6597 assert_different_registers(src, dst); 6598 // Note: it will change flags 6599 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 6600 // Cannot assert, unverified entry point counts instructions (see .ad file) 6601 // vtableStubs also counts instructions in pd_code_size_limit. 6602 // Also do not verify_oop as this is called by verify_oop. 6603 6604 if (CompressedKlassPointers::base() == nullptr && 6605 CompressedKlassPointers::shift() == 0) { 6606 // The best case scenario is that there is no base or shift. Then it is already 6607 // a pointer that needs nothing but a register rename. 6608 movl(dst, src); 6609 } else { 6610 if (CompressedKlassPointers::base() != nullptr) { 6611 mov64(dst, (int64_t)CompressedKlassPointers::base()); 6612 } else { 6613 xorq(dst, dst); 6614 } 6615 if (CompressedKlassPointers::shift() != 0) { 6616 assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); 6617 assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?"); 6618 leaq(dst, Address(dst, src, Address::times_8, 0)); 6619 } else { 6620 addq(dst, src); 6621 } 6622 } 6623 } 6624 6625 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) { 6626 assert (UseCompressedOops, "should only be used for compressed headers"); 6627 assert (Universe::heap() != nullptr, "java heap should be initialized"); 6628 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6629 int oop_index = oop_recorder()->find_index(obj); 6630 RelocationHolder rspec = oop_Relocation::spec(oop_index); 6631 mov_narrow_oop(dst, oop_index, rspec); 6632 } 6633 6634 void MacroAssembler::set_narrow_oop(Address dst, jobject obj) { 6635 assert (UseCompressedOops, "should only be used for compressed headers"); 6636 assert (Universe::heap() != nullptr, "java heap should be initialized"); 6637 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6638 int oop_index = oop_recorder()->find_index(obj); 6639 RelocationHolder rspec = oop_Relocation::spec(oop_index); 6640 mov_narrow_oop(dst, oop_index, rspec); 6641 } 6642 6643 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) { 6644 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 6645 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6646 int klass_index = oop_recorder()->find_index(k); 6647 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 6648 mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 6649 } 6650 6651 void MacroAssembler::set_narrow_klass(Address dst, Klass* k) { 6652 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 6653 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6654 int klass_index = oop_recorder()->find_index(k); 6655 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 6656 mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 6657 } 6658 6659 void MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) { 6660 assert (UseCompressedOops, "should only be used for compressed headers"); 6661 assert (Universe::heap() != nullptr, "java heap should be initialized"); 6662 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6663 int oop_index = oop_recorder()->find_index(obj); 6664 RelocationHolder rspec = oop_Relocation::spec(oop_index); 6665 Assembler::cmp_narrow_oop(dst, oop_index, rspec); 6666 } 6667 6668 void MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) { 6669 assert (UseCompressedOops, "should only be used for compressed headers"); 6670 assert (Universe::heap() != nullptr, "java heap should be initialized"); 6671 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6672 int oop_index = oop_recorder()->find_index(obj); 6673 RelocationHolder rspec = oop_Relocation::spec(oop_index); 6674 Assembler::cmp_narrow_oop(dst, oop_index, rspec); 6675 } 6676 6677 void MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) { 6678 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 6679 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6680 int klass_index = oop_recorder()->find_index(k); 6681 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 6682 Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 6683 } 6684 6685 void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) { 6686 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 6687 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6688 int klass_index = oop_recorder()->find_index(k); 6689 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 6690 Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 6691 } 6692 6693 void MacroAssembler::reinit_heapbase() { 6694 if (UseCompressedOops) { 6695 if (Universe::heap() != nullptr) { 6696 if (CompressedOops::base() == nullptr) { 6697 MacroAssembler::xorptr(r12_heapbase, r12_heapbase); 6698 } else { 6699 mov64(r12_heapbase, (int64_t)CompressedOops::base()); 6700 } 6701 } else { 6702 movptr(r12_heapbase, ExternalAddress(CompressedOops::base_addr())); 6703 } 6704 } 6705 } 6706 6707 #endif // _LP64 6708 6709 #if COMPILER2_OR_JVMCI 6710 6711 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM/ZMM registers 6712 void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, KRegister mask) { 6713 // cnt - number of qwords (8-byte words). 6714 // base - start address, qword aligned. 6715 Label L_zero_64_bytes, L_loop, L_sloop, L_tail, L_end; 6716 bool use64byteVector = (MaxVectorSize == 64) && (VM_Version::avx3_threshold() == 0); 6717 if (use64byteVector) { 6718 evpbroadcastq(xtmp, val, AVX_512bit); 6719 } else if (MaxVectorSize >= 32) { 6720 movdq(xtmp, val); 6721 punpcklqdq(xtmp, xtmp); 6722 vinserti128_high(xtmp, xtmp); 6723 } else { 6724 movdq(xtmp, val); 6725 punpcklqdq(xtmp, xtmp); 6726 } 6727 jmp(L_zero_64_bytes); 6728 6729 BIND(L_loop); 6730 if (MaxVectorSize >= 32) { 6731 fill64(base, 0, xtmp, use64byteVector); 6732 } else { 6733 movdqu(Address(base, 0), xtmp); 6734 movdqu(Address(base, 16), xtmp); 6735 movdqu(Address(base, 32), xtmp); 6736 movdqu(Address(base, 48), xtmp); 6737 } 6738 addptr(base, 64); 6739 6740 BIND(L_zero_64_bytes); 6741 subptr(cnt, 8); 6742 jccb(Assembler::greaterEqual, L_loop); 6743 6744 // Copy trailing 64 bytes 6745 if (use64byteVector) { 6746 addptr(cnt, 8); 6747 jccb(Assembler::equal, L_end); 6748 fill64_masked(3, base, 0, xtmp, mask, cnt, val, true); 6749 jmp(L_end); 6750 } else { 6751 addptr(cnt, 4); 6752 jccb(Assembler::less, L_tail); 6753 if (MaxVectorSize >= 32) { 6754 vmovdqu(Address(base, 0), xtmp); 6755 } else { 6756 movdqu(Address(base, 0), xtmp); 6757 movdqu(Address(base, 16), xtmp); 6758 } 6759 } 6760 addptr(base, 32); 6761 subptr(cnt, 4); 6762 6763 BIND(L_tail); 6764 addptr(cnt, 4); 6765 jccb(Assembler::lessEqual, L_end); 6766 if (UseAVX > 2 && MaxVectorSize >= 32 && VM_Version::supports_avx512vl()) { 6767 fill32_masked(3, base, 0, xtmp, mask, cnt, val); 6768 } else { 6769 decrement(cnt); 6770 6771 BIND(L_sloop); 6772 movq(Address(base, 0), xtmp); 6773 addptr(base, 8); 6774 decrement(cnt); 6775 jccb(Assembler::greaterEqual, L_sloop); 6776 } 6777 BIND(L_end); 6778 } 6779 6780 int MacroAssembler::store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter) { 6781 assert(InlineTypeReturnedAsFields, "Inline types should never be returned as fields"); 6782 // An inline type might be returned. If fields are in registers we 6783 // need to allocate an inline type instance and initialize it with 6784 // the value of the fields. 6785 Label skip; 6786 // We only need a new buffered inline type if a new one is not returned 6787 testptr(rax, 1); 6788 jcc(Assembler::zero, skip); 6789 int call_offset = -1; 6790 6791 #ifdef _LP64 6792 // The following code is similar to allocate_instance but has some slight differences, 6793 // e.g. object size is always not zero, sometimes it's constant; storing klass ptr after 6794 // allocating is not necessary if vk != nullptr, etc. allocate_instance is not aware of these. 6795 Label slow_case; 6796 // 1. Try to allocate a new buffered inline instance either from TLAB or eden space 6797 mov(rscratch1, rax); // save rax for slow_case since *_allocate may corrupt it when allocation failed 6798 if (vk != nullptr) { 6799 // Called from C1, where the return type is statically known. 6800 movptr(rbx, (intptr_t)vk->get_InlineKlass()); 6801 jint lh = vk->layout_helper(); 6802 assert(lh != Klass::_lh_neutral_value, "inline class in return type must have been resolved"); 6803 if (UseTLAB && !Klass::layout_helper_needs_slow_path(lh)) { 6804 tlab_allocate(r15_thread, rax, noreg, lh, r13, r14, slow_case); 6805 } else { 6806 jmp(slow_case); 6807 } 6808 } else { 6809 // Call from interpreter. RAX contains ((the InlineKlass* of the return type) | 0x01) 6810 mov(rbx, rax); 6811 andptr(rbx, -2); 6812 if (UseTLAB) { 6813 movl(r14, Address(rbx, Klass::layout_helper_offset())); 6814 testl(r14, Klass::_lh_instance_slow_path_bit); 6815 jcc(Assembler::notZero, slow_case); 6816 tlab_allocate(r15_thread, rax, r14, 0, r13, r14, slow_case); 6817 } else { 6818 jmp(slow_case); 6819 } 6820 } 6821 if (UseTLAB) { 6822 // 2. Initialize buffered inline instance header 6823 Register buffer_obj = rax; 6824 movptr(Address(buffer_obj, oopDesc::mark_offset_in_bytes()), (intptr_t)markWord::inline_type_prototype().value()); 6825 xorl(r13, r13); 6826 store_klass_gap(buffer_obj, r13); 6827 if (vk == nullptr) { 6828 // store_klass corrupts rbx(klass), so save it in r13 for later use (interpreter case only). 6829 mov(r13, rbx); 6830 } 6831 store_klass(buffer_obj, rbx, rscratch1); 6832 // 3. Initialize its fields with an inline class specific handler 6833 if (vk != nullptr) { 6834 call(RuntimeAddress(vk->pack_handler())); // no need for call info as this will not safepoint. 6835 } else { 6836 movptr(rbx, Address(r13, InstanceKlass::adr_inlineklass_fixed_block_offset())); 6837 movptr(rbx, Address(rbx, InlineKlass::pack_handler_offset())); 6838 call(rbx); 6839 } 6840 jmp(skip); 6841 } 6842 bind(slow_case); 6843 // We failed to allocate a new inline type, fall back to a runtime 6844 // call. Some oop field may be live in some registers but we can't 6845 // tell. That runtime call will take care of preserving them 6846 // across a GC if there's one. 6847 mov(rax, rscratch1); 6848 #endif 6849 6850 if (from_interpreter) { 6851 super_call_VM_leaf(StubRoutines::store_inline_type_fields_to_buf()); 6852 } else { 6853 call(RuntimeAddress(StubRoutines::store_inline_type_fields_to_buf())); 6854 call_offset = offset(); 6855 } 6856 6857 bind(skip); 6858 return call_offset; 6859 } 6860 6861 // Move a value between registers/stack slots and update the reg_state 6862 bool MacroAssembler::move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]) { 6863 assert(from->is_valid() && to->is_valid(), "source and destination must be valid"); 6864 if (reg_state[to->value()] == reg_written) { 6865 return true; // Already written 6866 } 6867 if (from != to && bt != T_VOID) { 6868 if (reg_state[to->value()] == reg_readonly) { 6869 return false; // Not yet writable 6870 } 6871 if (from->is_reg()) { 6872 if (to->is_reg()) { 6873 if (from->is_XMMRegister()) { 6874 if (bt == T_DOUBLE) { 6875 movdbl(to->as_XMMRegister(), from->as_XMMRegister()); 6876 } else { 6877 assert(bt == T_FLOAT, "must be float"); 6878 movflt(to->as_XMMRegister(), from->as_XMMRegister()); 6879 } 6880 } else { 6881 movq(to->as_Register(), from->as_Register()); 6882 } 6883 } else { 6884 int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize; 6885 Address to_addr = Address(rsp, st_off); 6886 if (from->is_XMMRegister()) { 6887 if (bt == T_DOUBLE) { 6888 movdbl(to_addr, from->as_XMMRegister()); 6889 } else { 6890 assert(bt == T_FLOAT, "must be float"); 6891 movflt(to_addr, from->as_XMMRegister()); 6892 } 6893 } else { 6894 movq(to_addr, from->as_Register()); 6895 } 6896 } 6897 } else { 6898 Address from_addr = Address(rsp, from->reg2stack() * VMRegImpl::stack_slot_size + wordSize); 6899 if (to->is_reg()) { 6900 if (to->is_XMMRegister()) { 6901 if (bt == T_DOUBLE) { 6902 movdbl(to->as_XMMRegister(), from_addr); 6903 } else { 6904 assert(bt == T_FLOAT, "must be float"); 6905 movflt(to->as_XMMRegister(), from_addr); 6906 } 6907 } else { 6908 movq(to->as_Register(), from_addr); 6909 } 6910 } else { 6911 int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize; 6912 movq(r13, from_addr); 6913 movq(Address(rsp, st_off), r13); 6914 } 6915 } 6916 } 6917 // Update register states 6918 reg_state[from->value()] = reg_writable; 6919 reg_state[to->value()] = reg_written; 6920 return true; 6921 } 6922 6923 // Calculate the extra stack space required for packing or unpacking inline 6924 // args and adjust the stack pointer 6925 int MacroAssembler::extend_stack_for_inline_args(int args_on_stack) { 6926 // Two additional slots to account for return address 6927 int sp_inc = (args_on_stack + 2) * VMRegImpl::stack_slot_size; 6928 sp_inc = align_up(sp_inc, StackAlignmentInBytes); 6929 // Save the return address, adjust the stack (make sure it is properly 6930 // 16-byte aligned) and copy the return address to the new top of the stack. 6931 // The stack will be repaired on return (see MacroAssembler::remove_frame). 6932 assert(sp_inc > 0, "sanity"); 6933 pop(r13); 6934 subptr(rsp, sp_inc); 6935 push(r13); 6936 return sp_inc; 6937 } 6938 6939 // Read all fields from an inline type buffer and store the field values in registers/stack slots. 6940 bool MacroAssembler::unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, 6941 VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index, 6942 RegState reg_state[]) { 6943 assert(sig->at(sig_index)._bt == T_VOID, "should be at end delimiter"); 6944 assert(from->is_valid(), "source must be valid"); 6945 bool progress = false; 6946 #ifdef ASSERT 6947 const int start_offset = offset(); 6948 #endif 6949 6950 Label L_null, L_notNull; 6951 // Don't use r14 as tmp because it's used for spilling (see MacroAssembler::spill_reg_for) 6952 Register tmp1 = r10; 6953 Register tmp2 = r13; 6954 Register fromReg = noreg; 6955 ScalarizedInlineArgsStream stream(sig, sig_index, to, to_count, to_index, -1); 6956 bool done = true; 6957 bool mark_done = true; 6958 VMReg toReg; 6959 BasicType bt; 6960 // Check if argument requires a null check 6961 bool null_check = false; 6962 VMReg nullCheckReg; 6963 while (stream.next(nullCheckReg, bt)) { 6964 if (sig->at(stream.sig_index())._offset == -1) { 6965 null_check = true; 6966 break; 6967 } 6968 } 6969 stream.reset(sig_index, to_index); 6970 while (stream.next(toReg, bt)) { 6971 assert(toReg->is_valid(), "destination must be valid"); 6972 int idx = (int)toReg->value(); 6973 if (reg_state[idx] == reg_readonly) { 6974 if (idx != from->value()) { 6975 mark_done = false; 6976 } 6977 done = false; 6978 continue; 6979 } else if (reg_state[idx] == reg_written) { 6980 continue; 6981 } 6982 assert(reg_state[idx] == reg_writable, "must be writable"); 6983 reg_state[idx] = reg_written; 6984 progress = true; 6985 6986 if (fromReg == noreg) { 6987 if (from->is_reg()) { 6988 fromReg = from->as_Register(); 6989 } else { 6990 int st_off = from->reg2stack() * VMRegImpl::stack_slot_size + wordSize; 6991 movq(tmp1, Address(rsp, st_off)); 6992 fromReg = tmp1; 6993 } 6994 if (null_check) { 6995 // Nullable inline type argument, emit null check 6996 testptr(fromReg, fromReg); 6997 jcc(Assembler::zero, L_null); 6998 } 6999 } 7000 int off = sig->at(stream.sig_index())._offset; 7001 if (off == -1) { 7002 assert(null_check, "Missing null check at"); 7003 if (toReg->is_stack()) { 7004 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize; 7005 movq(Address(rsp, st_off), 1); 7006 } else { 7007 movq(toReg->as_Register(), 1); 7008 } 7009 continue; 7010 } 7011 assert(off > 0, "offset in object should be positive"); 7012 Address fromAddr = Address(fromReg, off); 7013 if (!toReg->is_XMMRegister()) { 7014 Register dst = toReg->is_stack() ? tmp2 : toReg->as_Register(); 7015 if (is_reference_type(bt)) { 7016 load_heap_oop(dst, fromAddr); 7017 } else { 7018 bool is_signed = (bt != T_CHAR) && (bt != T_BOOLEAN); 7019 load_sized_value(dst, fromAddr, type2aelembytes(bt), is_signed); 7020 } 7021 if (toReg->is_stack()) { 7022 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize; 7023 movq(Address(rsp, st_off), dst); 7024 } 7025 } else if (bt == T_DOUBLE) { 7026 movdbl(toReg->as_XMMRegister(), fromAddr); 7027 } else { 7028 assert(bt == T_FLOAT, "must be float"); 7029 movflt(toReg->as_XMMRegister(), fromAddr); 7030 } 7031 } 7032 if (progress && null_check) { 7033 if (done) { 7034 jmp(L_notNull); 7035 bind(L_null); 7036 // Set IsInit field to zero to signal that the argument is null. 7037 // Also set all oop fields to zero to make the GC happy. 7038 stream.reset(sig_index, to_index); 7039 while (stream.next(toReg, bt)) { 7040 if (sig->at(stream.sig_index())._offset == -1 || 7041 bt == T_OBJECT || bt == T_ARRAY) { 7042 if (toReg->is_stack()) { 7043 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize; 7044 movq(Address(rsp, st_off), 0); 7045 } else { 7046 xorq(toReg->as_Register(), toReg->as_Register()); 7047 } 7048 } 7049 } 7050 bind(L_notNull); 7051 } else { 7052 bind(L_null); 7053 } 7054 } 7055 7056 sig_index = stream.sig_index(); 7057 to_index = stream.regs_index(); 7058 7059 if (mark_done && reg_state[from->value()] != reg_written) { 7060 // This is okay because no one else will write to that slot 7061 reg_state[from->value()] = reg_writable; 7062 } 7063 from_index--; 7064 assert(progress || (start_offset == offset()), "should not emit code"); 7065 return done; 7066 } 7067 7068 bool MacroAssembler::pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index, 7069 VMRegPair* from, int from_count, int& from_index, VMReg to, 7070 RegState reg_state[], Register val_array) { 7071 assert(sig->at(sig_index)._bt == T_METADATA, "should be at delimiter"); 7072 assert(to->is_valid(), "destination must be valid"); 7073 7074 if (reg_state[to->value()] == reg_written) { 7075 skip_unpacked_fields(sig, sig_index, from, from_count, from_index); 7076 return true; // Already written 7077 } 7078 7079 // TODO 8284443 Isn't it an issue if below code uses r14 as tmp when it contains a spilled value? 7080 // Be careful with r14 because it's used for spilling (see MacroAssembler::spill_reg_for). 7081 Register val_obj_tmp = r11; 7082 Register from_reg_tmp = r14; 7083 Register tmp1 = r10; 7084 Register tmp2 = r13; 7085 Register tmp3 = rbx; 7086 Register val_obj = to->is_stack() ? val_obj_tmp : to->as_Register(); 7087 7088 assert_different_registers(val_obj_tmp, from_reg_tmp, tmp1, tmp2, tmp3, val_array); 7089 7090 if (reg_state[to->value()] == reg_readonly) { 7091 if (!is_reg_in_unpacked_fields(sig, sig_index, to, from, from_count, from_index)) { 7092 skip_unpacked_fields(sig, sig_index, from, from_count, from_index); 7093 return false; // Not yet writable 7094 } 7095 val_obj = val_obj_tmp; 7096 } 7097 7098 int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + vtarg_index * type2aelembytes(T_OBJECT); 7099 load_heap_oop(val_obj, Address(val_array, index)); 7100 7101 ScalarizedInlineArgsStream stream(sig, sig_index, from, from_count, from_index); 7102 VMReg fromReg; 7103 BasicType bt; 7104 Label L_null; 7105 while (stream.next(fromReg, bt)) { 7106 assert(fromReg->is_valid(), "source must be valid"); 7107 reg_state[fromReg->value()] = reg_writable; 7108 7109 int off = sig->at(stream.sig_index())._offset; 7110 if (off == -1) { 7111 // Nullable inline type argument, emit null check 7112 Label L_notNull; 7113 if (fromReg->is_stack()) { 7114 int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize; 7115 testb(Address(rsp, ld_off), 1); 7116 } else { 7117 testb(fromReg->as_Register(), 1); 7118 } 7119 jcc(Assembler::notZero, L_notNull); 7120 movptr(val_obj, 0); 7121 jmp(L_null); 7122 bind(L_notNull); 7123 continue; 7124 } 7125 7126 assert(off > 0, "offset in object should be positive"); 7127 size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize; 7128 7129 Address dst(val_obj, off); 7130 if (!fromReg->is_XMMRegister()) { 7131 Register src; 7132 if (fromReg->is_stack()) { 7133 src = from_reg_tmp; 7134 int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize; 7135 load_sized_value(src, Address(rsp, ld_off), size_in_bytes, /* is_signed */ false); 7136 } else { 7137 src = fromReg->as_Register(); 7138 } 7139 assert_different_registers(dst.base(), src, tmp1, tmp2, tmp3, val_array); 7140 if (is_reference_type(bt)) { 7141 store_heap_oop(dst, src, tmp1, tmp2, tmp3, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED); 7142 } else { 7143 store_sized_value(dst, src, size_in_bytes); 7144 } 7145 } else if (bt == T_DOUBLE) { 7146 movdbl(dst, fromReg->as_XMMRegister()); 7147 } else { 7148 assert(bt == T_FLOAT, "must be float"); 7149 movflt(dst, fromReg->as_XMMRegister()); 7150 } 7151 } 7152 bind(L_null); 7153 sig_index = stream.sig_index(); 7154 from_index = stream.regs_index(); 7155 7156 assert(reg_state[to->value()] == reg_writable, "must have already been read"); 7157 bool success = move_helper(val_obj->as_VMReg(), to, T_OBJECT, reg_state); 7158 assert(success, "to register must be writeable"); 7159 return true; 7160 } 7161 7162 VMReg MacroAssembler::spill_reg_for(VMReg reg) { 7163 return reg->is_XMMRegister() ? xmm8->as_VMReg() : r14->as_VMReg(); 7164 } 7165 7166 void MacroAssembler::remove_frame(int initial_framesize, bool needs_stack_repair) { 7167 assert((initial_framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned"); 7168 if (needs_stack_repair) { 7169 movq(rbp, Address(rsp, initial_framesize)); 7170 // The stack increment resides just below the saved rbp 7171 addq(rsp, Address(rsp, initial_framesize - wordSize)); 7172 } else { 7173 if (initial_framesize > 0) { 7174 addq(rsp, initial_framesize); 7175 } 7176 pop(rbp); 7177 } 7178 } 7179 7180 // Clearing constant sized memory using YMM/ZMM registers. 7181 void MacroAssembler::clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask) { 7182 assert(UseAVX > 2 && VM_Version::supports_avx512vl(), ""); 7183 bool use64byteVector = (MaxVectorSize > 32) && (VM_Version::avx3_threshold() == 0); 7184 7185 int vector64_count = (cnt & (~0x7)) >> 3; 7186 cnt = cnt & 0x7; 7187 const int fill64_per_loop = 4; 7188 const int max_unrolled_fill64 = 8; 7189 7190 // 64 byte initialization loop. 7191 vpxor(xtmp, xtmp, xtmp, use64byteVector ? AVX_512bit : AVX_256bit); 7192 int start64 = 0; 7193 if (vector64_count > max_unrolled_fill64) { 7194 Label LOOP; 7195 Register index = rtmp; 7196 7197 start64 = vector64_count - (vector64_count % fill64_per_loop); 7198 7199 movl(index, 0); 7200 BIND(LOOP); 7201 for (int i = 0; i < fill64_per_loop; i++) { 7202 fill64(Address(base, index, Address::times_1, i * 64), xtmp, use64byteVector); 7203 } 7204 addl(index, fill64_per_loop * 64); 7205 cmpl(index, start64 * 64); 7206 jccb(Assembler::less, LOOP); 7207 } 7208 for (int i = start64; i < vector64_count; i++) { 7209 fill64(base, i * 64, xtmp, use64byteVector); 7210 } 7211 7212 // Clear remaining 64 byte tail. 7213 int disp = vector64_count * 64; 7214 if (cnt) { 7215 switch (cnt) { 7216 case 1: 7217 movq(Address(base, disp), xtmp); 7218 break; 7219 case 2: 7220 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_128bit); 7221 break; 7222 case 3: 7223 movl(rtmp, 0x7); 7224 kmovwl(mask, rtmp); 7225 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_256bit); 7226 break; 7227 case 4: 7228 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 7229 break; 7230 case 5: 7231 if (use64byteVector) { 7232 movl(rtmp, 0x1F); 7233 kmovwl(mask, rtmp); 7234 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit); 7235 } else { 7236 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 7237 movq(Address(base, disp + 32), xtmp); 7238 } 7239 break; 7240 case 6: 7241 if (use64byteVector) { 7242 movl(rtmp, 0x3F); 7243 kmovwl(mask, rtmp); 7244 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit); 7245 } else { 7246 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 7247 evmovdqu(T_LONG, k0, Address(base, disp + 32), xtmp, false, Assembler::AVX_128bit); 7248 } 7249 break; 7250 case 7: 7251 if (use64byteVector) { 7252 movl(rtmp, 0x7F); 7253 kmovwl(mask, rtmp); 7254 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit); 7255 } else { 7256 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 7257 movl(rtmp, 0x7); 7258 kmovwl(mask, rtmp); 7259 evmovdqu(T_LONG, mask, Address(base, disp + 32), xtmp, true, Assembler::AVX_256bit); 7260 } 7261 break; 7262 default: 7263 fatal("Unexpected length : %d\n",cnt); 7264 break; 7265 } 7266 } 7267 } 7268 7269 void MacroAssembler::clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, 7270 bool is_large, bool word_copy_only, KRegister mask) { 7271 // cnt - number of qwords (8-byte words). 7272 // base - start address, qword aligned. 7273 // is_large - if optimizers know cnt is larger than InitArrayShortSize 7274 assert(base==rdi, "base register must be edi for rep stos"); 7275 assert(val==rax, "val register must be eax for rep stos"); 7276 assert(cnt==rcx, "cnt register must be ecx for rep stos"); 7277 assert(InitArrayShortSize % BytesPerLong == 0, 7278 "InitArrayShortSize should be the multiple of BytesPerLong"); 7279 7280 Label DONE; 7281 7282 if (!is_large) { 7283 Label LOOP, LONG; 7284 cmpptr(cnt, InitArrayShortSize/BytesPerLong); 7285 jccb(Assembler::greater, LONG); 7286 7287 NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM 7288 7289 decrement(cnt); 7290 jccb(Assembler::negative, DONE); // Zero length 7291 7292 // Use individual pointer-sized stores for small counts: 7293 BIND(LOOP); 7294 movptr(Address(base, cnt, Address::times_ptr), val); 7295 decrement(cnt); 7296 jccb(Assembler::greaterEqual, LOOP); 7297 jmpb(DONE); 7298 7299 BIND(LONG); 7300 } 7301 7302 // Use longer rep-prefixed ops for non-small counts: 7303 if (UseFastStosb && !word_copy_only) { 7304 shlptr(cnt, 3); // convert to number of bytes 7305 rep_stosb(); 7306 } else if (UseXMMForObjInit) { 7307 xmm_clear_mem(base, cnt, val, xtmp, mask); 7308 } else { 7309 NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM 7310 rep_stos(); 7311 } 7312 7313 BIND(DONE); 7314 } 7315 7316 #endif //COMPILER2_OR_JVMCI 7317 7318 7319 void MacroAssembler::generate_fill(BasicType t, bool aligned, 7320 Register to, Register value, Register count, 7321 Register rtmp, XMMRegister xtmp) { 7322 ShortBranchVerifier sbv(this); 7323 assert_different_registers(to, value, count, rtmp); 7324 Label L_exit; 7325 Label L_fill_2_bytes, L_fill_4_bytes; 7326 7327 #if defined(COMPILER2) && defined(_LP64) 7328 if(MaxVectorSize >=32 && 7329 VM_Version::supports_avx512vlbw() && 7330 VM_Version::supports_bmi2()) { 7331 generate_fill_avx3(t, to, value, count, rtmp, xtmp); 7332 return; 7333 } 7334 #endif 7335 7336 int shift = -1; 7337 switch (t) { 7338 case T_BYTE: 7339 shift = 2; 7340 break; 7341 case T_SHORT: 7342 shift = 1; 7343 break; 7344 case T_INT: 7345 shift = 0; 7346 break; 7347 default: ShouldNotReachHere(); 7348 } 7349 7350 if (t == T_BYTE) { 7351 andl(value, 0xff); 7352 movl(rtmp, value); 7353 shll(rtmp, 8); 7354 orl(value, rtmp); 7355 } 7356 if (t == T_SHORT) { 7357 andl(value, 0xffff); 7358 } 7359 if (t == T_BYTE || t == T_SHORT) { 7360 movl(rtmp, value); 7361 shll(rtmp, 16); 7362 orl(value, rtmp); 7363 } 7364 7365 cmpptr(count, 2<<shift); // Short arrays (< 8 bytes) fill by element 7366 jcc(Assembler::below, L_fill_4_bytes); // use unsigned cmp 7367 if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) { 7368 Label L_skip_align2; 7369 // align source address at 4 bytes address boundary 7370 if (t == T_BYTE) { 7371 Label L_skip_align1; 7372 // One byte misalignment happens only for byte arrays 7373 testptr(to, 1); 7374 jccb(Assembler::zero, L_skip_align1); 7375 movb(Address(to, 0), value); 7376 increment(to); 7377 decrement(count); 7378 BIND(L_skip_align1); 7379 } 7380 // Two bytes misalignment happens only for byte and short (char) arrays 7381 testptr(to, 2); 7382 jccb(Assembler::zero, L_skip_align2); 7383 movw(Address(to, 0), value); 7384 addptr(to, 2); 7385 subptr(count, 1<<(shift-1)); 7386 BIND(L_skip_align2); 7387 } 7388 if (UseSSE < 2) { 7389 Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes; 7390 // Fill 32-byte chunks 7391 subptr(count, 8 << shift); 7392 jcc(Assembler::less, L_check_fill_8_bytes); 7393 align(16); 7394 7395 BIND(L_fill_32_bytes_loop); 7396 7397 for (int i = 0; i < 32; i += 4) { 7398 movl(Address(to, i), value); 7399 } 7400 7401 addptr(to, 32); 7402 subptr(count, 8 << shift); 7403 jcc(Assembler::greaterEqual, L_fill_32_bytes_loop); 7404 BIND(L_check_fill_8_bytes); 7405 addptr(count, 8 << shift); 7406 jccb(Assembler::zero, L_exit); 7407 jmpb(L_fill_8_bytes); 7408 7409 // 7410 // length is too short, just fill qwords 7411 // 7412 BIND(L_fill_8_bytes_loop); 7413 movl(Address(to, 0), value); 7414 movl(Address(to, 4), value); 7415 addptr(to, 8); 7416 BIND(L_fill_8_bytes); 7417 subptr(count, 1 << (shift + 1)); 7418 jcc(Assembler::greaterEqual, L_fill_8_bytes_loop); 7419 // fall through to fill 4 bytes 7420 } else { 7421 Label L_fill_32_bytes; 7422 if (!UseUnalignedLoadStores) { 7423 // align to 8 bytes, we know we are 4 byte aligned to start 7424 testptr(to, 4); 7425 jccb(Assembler::zero, L_fill_32_bytes); 7426 movl(Address(to, 0), value); 7427 addptr(to, 4); 7428 subptr(count, 1<<shift); 7429 } 7430 BIND(L_fill_32_bytes); 7431 { 7432 assert( UseSSE >= 2, "supported cpu only" ); 7433 Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes; 7434 movdl(xtmp, value); 7435 if (UseAVX >= 2 && UseUnalignedLoadStores) { 7436 Label L_check_fill_32_bytes; 7437 if (UseAVX > 2) { 7438 // Fill 64-byte chunks 7439 Label L_fill_64_bytes_loop_avx3, L_check_fill_64_bytes_avx2; 7440 7441 // If number of bytes to fill < VM_Version::avx3_threshold(), perform fill using AVX2 7442 cmpptr(count, VM_Version::avx3_threshold()); 7443 jccb(Assembler::below, L_check_fill_64_bytes_avx2); 7444 7445 vpbroadcastd(xtmp, xtmp, Assembler::AVX_512bit); 7446 7447 subptr(count, 16 << shift); 7448 jccb(Assembler::less, L_check_fill_32_bytes); 7449 align(16); 7450 7451 BIND(L_fill_64_bytes_loop_avx3); 7452 evmovdqul(Address(to, 0), xtmp, Assembler::AVX_512bit); 7453 addptr(to, 64); 7454 subptr(count, 16 << shift); 7455 jcc(Assembler::greaterEqual, L_fill_64_bytes_loop_avx3); 7456 jmpb(L_check_fill_32_bytes); 7457 7458 BIND(L_check_fill_64_bytes_avx2); 7459 } 7460 // Fill 64-byte chunks 7461 Label L_fill_64_bytes_loop; 7462 vpbroadcastd(xtmp, xtmp, Assembler::AVX_256bit); 7463 7464 subptr(count, 16 << shift); 7465 jcc(Assembler::less, L_check_fill_32_bytes); 7466 align(16); 7467 7468 BIND(L_fill_64_bytes_loop); 7469 vmovdqu(Address(to, 0), xtmp); 7470 vmovdqu(Address(to, 32), xtmp); 7471 addptr(to, 64); 7472 subptr(count, 16 << shift); 7473 jcc(Assembler::greaterEqual, L_fill_64_bytes_loop); 7474 7475 BIND(L_check_fill_32_bytes); 7476 addptr(count, 8 << shift); 7477 jccb(Assembler::less, L_check_fill_8_bytes); 7478 vmovdqu(Address(to, 0), xtmp); 7479 addptr(to, 32); 7480 subptr(count, 8 << shift); 7481 7482 BIND(L_check_fill_8_bytes); 7483 // clean upper bits of YMM registers 7484 movdl(xtmp, value); 7485 pshufd(xtmp, xtmp, 0); 7486 } else { 7487 // Fill 32-byte chunks 7488 pshufd(xtmp, xtmp, 0); 7489 7490 subptr(count, 8 << shift); 7491 jcc(Assembler::less, L_check_fill_8_bytes); 7492 align(16); 7493 7494 BIND(L_fill_32_bytes_loop); 7495 7496 if (UseUnalignedLoadStores) { 7497 movdqu(Address(to, 0), xtmp); 7498 movdqu(Address(to, 16), xtmp); 7499 } else { 7500 movq(Address(to, 0), xtmp); 7501 movq(Address(to, 8), xtmp); 7502 movq(Address(to, 16), xtmp); 7503 movq(Address(to, 24), xtmp); 7504 } 7505 7506 addptr(to, 32); 7507 subptr(count, 8 << shift); 7508 jcc(Assembler::greaterEqual, L_fill_32_bytes_loop); 7509 7510 BIND(L_check_fill_8_bytes); 7511 } 7512 addptr(count, 8 << shift); 7513 jccb(Assembler::zero, L_exit); 7514 jmpb(L_fill_8_bytes); 7515 7516 // 7517 // length is too short, just fill qwords 7518 // 7519 BIND(L_fill_8_bytes_loop); 7520 movq(Address(to, 0), xtmp); 7521 addptr(to, 8); 7522 BIND(L_fill_8_bytes); 7523 subptr(count, 1 << (shift + 1)); 7524 jcc(Assembler::greaterEqual, L_fill_8_bytes_loop); 7525 } 7526 } 7527 // fill trailing 4 bytes 7528 BIND(L_fill_4_bytes); 7529 testl(count, 1<<shift); 7530 jccb(Assembler::zero, L_fill_2_bytes); 7531 movl(Address(to, 0), value); 7532 if (t == T_BYTE || t == T_SHORT) { 7533 Label L_fill_byte; 7534 addptr(to, 4); 7535 BIND(L_fill_2_bytes); 7536 // fill trailing 2 bytes 7537 testl(count, 1<<(shift-1)); 7538 jccb(Assembler::zero, L_fill_byte); 7539 movw(Address(to, 0), value); 7540 if (t == T_BYTE) { 7541 addptr(to, 2); 7542 BIND(L_fill_byte); 7543 // fill trailing byte 7544 testl(count, 1); 7545 jccb(Assembler::zero, L_exit); 7546 movb(Address(to, 0), value); 7547 } else { 7548 BIND(L_fill_byte); 7549 } 7550 } else { 7551 BIND(L_fill_2_bytes); 7552 } 7553 BIND(L_exit); 7554 } 7555 7556 void MacroAssembler::evpbroadcast(BasicType type, XMMRegister dst, Register src, int vector_len) { 7557 switch(type) { 7558 case T_BYTE: 7559 case T_BOOLEAN: 7560 evpbroadcastb(dst, src, vector_len); 7561 break; 7562 case T_SHORT: 7563 case T_CHAR: 7564 evpbroadcastw(dst, src, vector_len); 7565 break; 7566 case T_INT: 7567 case T_FLOAT: 7568 evpbroadcastd(dst, src, vector_len); 7569 break; 7570 case T_LONG: 7571 case T_DOUBLE: 7572 evpbroadcastq(dst, src, vector_len); 7573 break; 7574 default: 7575 fatal("Unhandled type : %s", type2name(type)); 7576 break; 7577 } 7578 } 7579 7580 // encode char[] to byte[] in ISO_8859_1 or ASCII 7581 //@IntrinsicCandidate 7582 //private static int implEncodeISOArray(byte[] sa, int sp, 7583 //byte[] da, int dp, int len) { 7584 // int i = 0; 7585 // for (; i < len; i++) { 7586 // char c = StringUTF16.getChar(sa, sp++); 7587 // if (c > '\u00FF') 7588 // break; 7589 // da[dp++] = (byte)c; 7590 // } 7591 // return i; 7592 //} 7593 // 7594 //@IntrinsicCandidate 7595 //private static int implEncodeAsciiArray(char[] sa, int sp, 7596 // byte[] da, int dp, int len) { 7597 // int i = 0; 7598 // for (; i < len; i++) { 7599 // char c = sa[sp++]; 7600 // if (c >= '\u0080') 7601 // break; 7602 // da[dp++] = (byte)c; 7603 // } 7604 // return i; 7605 //} 7606 void MacroAssembler::encode_iso_array(Register src, Register dst, Register len, 7607 XMMRegister tmp1Reg, XMMRegister tmp2Reg, 7608 XMMRegister tmp3Reg, XMMRegister tmp4Reg, 7609 Register tmp5, Register result, bool ascii) { 7610 7611 // rsi: src 7612 // rdi: dst 7613 // rdx: len 7614 // rcx: tmp5 7615 // rax: result 7616 ShortBranchVerifier sbv(this); 7617 assert_different_registers(src, dst, len, tmp5, result); 7618 Label L_done, L_copy_1_char, L_copy_1_char_exit; 7619 7620 int mask = ascii ? 0xff80ff80 : 0xff00ff00; 7621 int short_mask = ascii ? 0xff80 : 0xff00; 7622 7623 // set result 7624 xorl(result, result); 7625 // check for zero length 7626 testl(len, len); 7627 jcc(Assembler::zero, L_done); 7628 7629 movl(result, len); 7630 7631 // Setup pointers 7632 lea(src, Address(src, len, Address::times_2)); // char[] 7633 lea(dst, Address(dst, len, Address::times_1)); // byte[] 7634 negptr(len); 7635 7636 if (UseSSE42Intrinsics || UseAVX >= 2) { 7637 Label L_copy_8_chars, L_copy_8_chars_exit; 7638 Label L_chars_16_check, L_copy_16_chars, L_copy_16_chars_exit; 7639 7640 if (UseAVX >= 2) { 7641 Label L_chars_32_check, L_copy_32_chars, L_copy_32_chars_exit; 7642 movl(tmp5, mask); // create mask to test for Unicode or non-ASCII chars in vector 7643 movdl(tmp1Reg, tmp5); 7644 vpbroadcastd(tmp1Reg, tmp1Reg, Assembler::AVX_256bit); 7645 jmp(L_chars_32_check); 7646 7647 bind(L_copy_32_chars); 7648 vmovdqu(tmp3Reg, Address(src, len, Address::times_2, -64)); 7649 vmovdqu(tmp4Reg, Address(src, len, Address::times_2, -32)); 7650 vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1); 7651 vptest(tmp2Reg, tmp1Reg); // check for Unicode or non-ASCII chars in vector 7652 jccb(Assembler::notZero, L_copy_32_chars_exit); 7653 vpackuswb(tmp3Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1); 7654 vpermq(tmp4Reg, tmp3Reg, 0xD8, /* vector_len */ 1); 7655 vmovdqu(Address(dst, len, Address::times_1, -32), tmp4Reg); 7656 7657 bind(L_chars_32_check); 7658 addptr(len, 32); 7659 jcc(Assembler::lessEqual, L_copy_32_chars); 7660 7661 bind(L_copy_32_chars_exit); 7662 subptr(len, 16); 7663 jccb(Assembler::greater, L_copy_16_chars_exit); 7664 7665 } else if (UseSSE42Intrinsics) { 7666 movl(tmp5, mask); // create mask to test for Unicode or non-ASCII chars in vector 7667 movdl(tmp1Reg, tmp5); 7668 pshufd(tmp1Reg, tmp1Reg, 0); 7669 jmpb(L_chars_16_check); 7670 } 7671 7672 bind(L_copy_16_chars); 7673 if (UseAVX >= 2) { 7674 vmovdqu(tmp2Reg, Address(src, len, Address::times_2, -32)); 7675 vptest(tmp2Reg, tmp1Reg); 7676 jcc(Assembler::notZero, L_copy_16_chars_exit); 7677 vpackuswb(tmp2Reg, tmp2Reg, tmp1Reg, /* vector_len */ 1); 7678 vpermq(tmp3Reg, tmp2Reg, 0xD8, /* vector_len */ 1); 7679 } else { 7680 if (UseAVX > 0) { 7681 movdqu(tmp3Reg, Address(src, len, Address::times_2, -32)); 7682 movdqu(tmp4Reg, Address(src, len, Address::times_2, -16)); 7683 vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 0); 7684 } else { 7685 movdqu(tmp3Reg, Address(src, len, Address::times_2, -32)); 7686 por(tmp2Reg, tmp3Reg); 7687 movdqu(tmp4Reg, Address(src, len, Address::times_2, -16)); 7688 por(tmp2Reg, tmp4Reg); 7689 } 7690 ptest(tmp2Reg, tmp1Reg); // check for Unicode or non-ASCII chars in vector 7691 jccb(Assembler::notZero, L_copy_16_chars_exit); 7692 packuswb(tmp3Reg, tmp4Reg); 7693 } 7694 movdqu(Address(dst, len, Address::times_1, -16), tmp3Reg); 7695 7696 bind(L_chars_16_check); 7697 addptr(len, 16); 7698 jcc(Assembler::lessEqual, L_copy_16_chars); 7699 7700 bind(L_copy_16_chars_exit); 7701 if (UseAVX >= 2) { 7702 // clean upper bits of YMM registers 7703 vpxor(tmp2Reg, tmp2Reg); 7704 vpxor(tmp3Reg, tmp3Reg); 7705 vpxor(tmp4Reg, tmp4Reg); 7706 movdl(tmp1Reg, tmp5); 7707 pshufd(tmp1Reg, tmp1Reg, 0); 7708 } 7709 subptr(len, 8); 7710 jccb(Assembler::greater, L_copy_8_chars_exit); 7711 7712 bind(L_copy_8_chars); 7713 movdqu(tmp3Reg, Address(src, len, Address::times_2, -16)); 7714 ptest(tmp3Reg, tmp1Reg); 7715 jccb(Assembler::notZero, L_copy_8_chars_exit); 7716 packuswb(tmp3Reg, tmp1Reg); 7717 movq(Address(dst, len, Address::times_1, -8), tmp3Reg); 7718 addptr(len, 8); 7719 jccb(Assembler::lessEqual, L_copy_8_chars); 7720 7721 bind(L_copy_8_chars_exit); 7722 subptr(len, 8); 7723 jccb(Assembler::zero, L_done); 7724 } 7725 7726 bind(L_copy_1_char); 7727 load_unsigned_short(tmp5, Address(src, len, Address::times_2, 0)); 7728 testl(tmp5, short_mask); // check if Unicode or non-ASCII char 7729 jccb(Assembler::notZero, L_copy_1_char_exit); 7730 movb(Address(dst, len, Address::times_1, 0), tmp5); 7731 addptr(len, 1); 7732 jccb(Assembler::less, L_copy_1_char); 7733 7734 bind(L_copy_1_char_exit); 7735 addptr(result, len); // len is negative count of not processed elements 7736 7737 bind(L_done); 7738 } 7739 7740 #ifdef _LP64 7741 /** 7742 * Helper for multiply_to_len(). 7743 */ 7744 void MacroAssembler::add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2) { 7745 addq(dest_lo, src1); 7746 adcq(dest_hi, 0); 7747 addq(dest_lo, src2); 7748 adcq(dest_hi, 0); 7749 } 7750 7751 /** 7752 * Multiply 64 bit by 64 bit first loop. 7753 */ 7754 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 7755 Register y, Register y_idx, Register z, 7756 Register carry, Register product, 7757 Register idx, Register kdx) { 7758 // 7759 // jlong carry, x[], y[], z[]; 7760 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 7761 // huge_128 product = y[idx] * x[xstart] + carry; 7762 // z[kdx] = (jlong)product; 7763 // carry = (jlong)(product >>> 64); 7764 // } 7765 // z[xstart] = carry; 7766 // 7767 7768 Label L_first_loop, L_first_loop_exit; 7769 Label L_one_x, L_one_y, L_multiply; 7770 7771 decrementl(xstart); 7772 jcc(Assembler::negative, L_one_x); 7773 7774 movq(x_xstart, Address(x, xstart, Address::times_4, 0)); 7775 rorq(x_xstart, 32); // convert big-endian to little-endian 7776 7777 bind(L_first_loop); 7778 decrementl(idx); 7779 jcc(Assembler::negative, L_first_loop_exit); 7780 decrementl(idx); 7781 jcc(Assembler::negative, L_one_y); 7782 movq(y_idx, Address(y, idx, Address::times_4, 0)); 7783 rorq(y_idx, 32); // convert big-endian to little-endian 7784 bind(L_multiply); 7785 movq(product, x_xstart); 7786 mulq(y_idx); // product(rax) * y_idx -> rdx:rax 7787 addq(product, carry); 7788 adcq(rdx, 0); 7789 subl(kdx, 2); 7790 movl(Address(z, kdx, Address::times_4, 4), product); 7791 shrq(product, 32); 7792 movl(Address(z, kdx, Address::times_4, 0), product); 7793 movq(carry, rdx); 7794 jmp(L_first_loop); 7795 7796 bind(L_one_y); 7797 movl(y_idx, Address(y, 0)); 7798 jmp(L_multiply); 7799 7800 bind(L_one_x); 7801 movl(x_xstart, Address(x, 0)); 7802 jmp(L_first_loop); 7803 7804 bind(L_first_loop_exit); 7805 } 7806 7807 /** 7808 * Multiply 64 bit by 64 bit and add 128 bit. 7809 */ 7810 void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y, Register z, 7811 Register yz_idx, Register idx, 7812 Register carry, Register product, int offset) { 7813 // huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry; 7814 // z[kdx] = (jlong)product; 7815 7816 movq(yz_idx, Address(y, idx, Address::times_4, offset)); 7817 rorq(yz_idx, 32); // convert big-endian to little-endian 7818 movq(product, x_xstart); 7819 mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax) 7820 movq(yz_idx, Address(z, idx, Address::times_4, offset)); 7821 rorq(yz_idx, 32); // convert big-endian to little-endian 7822 7823 add2_with_carry(rdx, product, carry, yz_idx); 7824 7825 movl(Address(z, idx, Address::times_4, offset+4), product); 7826 shrq(product, 32); 7827 movl(Address(z, idx, Address::times_4, offset), product); 7828 7829 } 7830 7831 /** 7832 * Multiply 128 bit by 128 bit. Unrolled inner loop. 7833 */ 7834 void MacroAssembler::multiply_128_x_128_loop(Register x_xstart, Register y, Register z, 7835 Register yz_idx, Register idx, Register jdx, 7836 Register carry, Register product, 7837 Register carry2) { 7838 // jlong carry, x[], y[], z[]; 7839 // int kdx = ystart+1; 7840 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 7841 // huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry; 7842 // z[kdx+idx+1] = (jlong)product; 7843 // jlong carry2 = (jlong)(product >>> 64); 7844 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry2; 7845 // z[kdx+idx] = (jlong)product; 7846 // carry = (jlong)(product >>> 64); 7847 // } 7848 // idx += 2; 7849 // if (idx > 0) { 7850 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry; 7851 // z[kdx+idx] = (jlong)product; 7852 // carry = (jlong)(product >>> 64); 7853 // } 7854 // 7855 7856 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 7857 7858 movl(jdx, idx); 7859 andl(jdx, 0xFFFFFFFC); 7860 shrl(jdx, 2); 7861 7862 bind(L_third_loop); 7863 subl(jdx, 1); 7864 jcc(Assembler::negative, L_third_loop_exit); 7865 subl(idx, 4); 7866 7867 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 8); 7868 movq(carry2, rdx); 7869 7870 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product, 0); 7871 movq(carry, rdx); 7872 jmp(L_third_loop); 7873 7874 bind (L_third_loop_exit); 7875 7876 andl (idx, 0x3); 7877 jcc(Assembler::zero, L_post_third_loop_done); 7878 7879 Label L_check_1; 7880 subl(idx, 2); 7881 jcc(Assembler::negative, L_check_1); 7882 7883 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 0); 7884 movq(carry, rdx); 7885 7886 bind (L_check_1); 7887 addl (idx, 0x2); 7888 andl (idx, 0x1); 7889 subl(idx, 1); 7890 jcc(Assembler::negative, L_post_third_loop_done); 7891 7892 movl(yz_idx, Address(y, idx, Address::times_4, 0)); 7893 movq(product, x_xstart); 7894 mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax) 7895 movl(yz_idx, Address(z, idx, Address::times_4, 0)); 7896 7897 add2_with_carry(rdx, product, yz_idx, carry); 7898 7899 movl(Address(z, idx, Address::times_4, 0), product); 7900 shrq(product, 32); 7901 7902 shlq(rdx, 32); 7903 orq(product, rdx); 7904 movq(carry, product); 7905 7906 bind(L_post_third_loop_done); 7907 } 7908 7909 /** 7910 * Multiply 128 bit by 128 bit using BMI2. Unrolled inner loop. 7911 * 7912 */ 7913 void MacroAssembler::multiply_128_x_128_bmi2_loop(Register y, Register z, 7914 Register carry, Register carry2, 7915 Register idx, Register jdx, 7916 Register yz_idx1, Register yz_idx2, 7917 Register tmp, Register tmp3, Register tmp4) { 7918 assert(UseBMI2Instructions, "should be used only when BMI2 is available"); 7919 7920 // jlong carry, x[], y[], z[]; 7921 // int kdx = ystart+1; 7922 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 7923 // huge_128 tmp3 = (y[idx+1] * rdx) + z[kdx+idx+1] + carry; 7924 // jlong carry2 = (jlong)(tmp3 >>> 64); 7925 // huge_128 tmp4 = (y[idx] * rdx) + z[kdx+idx] + carry2; 7926 // carry = (jlong)(tmp4 >>> 64); 7927 // z[kdx+idx+1] = (jlong)tmp3; 7928 // z[kdx+idx] = (jlong)tmp4; 7929 // } 7930 // idx += 2; 7931 // if (idx > 0) { 7932 // yz_idx1 = (y[idx] * rdx) + z[kdx+idx] + carry; 7933 // z[kdx+idx] = (jlong)yz_idx1; 7934 // carry = (jlong)(yz_idx1 >>> 64); 7935 // } 7936 // 7937 7938 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 7939 7940 movl(jdx, idx); 7941 andl(jdx, 0xFFFFFFFC); 7942 shrl(jdx, 2); 7943 7944 bind(L_third_loop); 7945 subl(jdx, 1); 7946 jcc(Assembler::negative, L_third_loop_exit); 7947 subl(idx, 4); 7948 7949 movq(yz_idx1, Address(y, idx, Address::times_4, 8)); 7950 rorxq(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian 7951 movq(yz_idx2, Address(y, idx, Address::times_4, 0)); 7952 rorxq(yz_idx2, yz_idx2, 32); 7953 7954 mulxq(tmp4, tmp3, yz_idx1); // yz_idx1 * rdx -> tmp4:tmp3 7955 mulxq(carry2, tmp, yz_idx2); // yz_idx2 * rdx -> carry2:tmp 7956 7957 movq(yz_idx1, Address(z, idx, Address::times_4, 8)); 7958 rorxq(yz_idx1, yz_idx1, 32); 7959 movq(yz_idx2, Address(z, idx, Address::times_4, 0)); 7960 rorxq(yz_idx2, yz_idx2, 32); 7961 7962 if (VM_Version::supports_adx()) { 7963 adcxq(tmp3, carry); 7964 adoxq(tmp3, yz_idx1); 7965 7966 adcxq(tmp4, tmp); 7967 adoxq(tmp4, yz_idx2); 7968 7969 movl(carry, 0); // does not affect flags 7970 adcxq(carry2, carry); 7971 adoxq(carry2, carry); 7972 } else { 7973 add2_with_carry(tmp4, tmp3, carry, yz_idx1); 7974 add2_with_carry(carry2, tmp4, tmp, yz_idx2); 7975 } 7976 movq(carry, carry2); 7977 7978 movl(Address(z, idx, Address::times_4, 12), tmp3); 7979 shrq(tmp3, 32); 7980 movl(Address(z, idx, Address::times_4, 8), tmp3); 7981 7982 movl(Address(z, idx, Address::times_4, 4), tmp4); 7983 shrq(tmp4, 32); 7984 movl(Address(z, idx, Address::times_4, 0), tmp4); 7985 7986 jmp(L_third_loop); 7987 7988 bind (L_third_loop_exit); 7989 7990 andl (idx, 0x3); 7991 jcc(Assembler::zero, L_post_third_loop_done); 7992 7993 Label L_check_1; 7994 subl(idx, 2); 7995 jcc(Assembler::negative, L_check_1); 7996 7997 movq(yz_idx1, Address(y, idx, Address::times_4, 0)); 7998 rorxq(yz_idx1, yz_idx1, 32); 7999 mulxq(tmp4, tmp3, yz_idx1); // yz_idx1 * rdx -> tmp4:tmp3 8000 movq(yz_idx2, Address(z, idx, Address::times_4, 0)); 8001 rorxq(yz_idx2, yz_idx2, 32); 8002 8003 add2_with_carry(tmp4, tmp3, carry, yz_idx2); 8004 8005 movl(Address(z, idx, Address::times_4, 4), tmp3); 8006 shrq(tmp3, 32); 8007 movl(Address(z, idx, Address::times_4, 0), tmp3); 8008 movq(carry, tmp4); 8009 8010 bind (L_check_1); 8011 addl (idx, 0x2); 8012 andl (idx, 0x1); 8013 subl(idx, 1); 8014 jcc(Assembler::negative, L_post_third_loop_done); 8015 movl(tmp4, Address(y, idx, Address::times_4, 0)); 8016 mulxq(carry2, tmp3, tmp4); // tmp4 * rdx -> carry2:tmp3 8017 movl(tmp4, Address(z, idx, Address::times_4, 0)); 8018 8019 add2_with_carry(carry2, tmp3, tmp4, carry); 8020 8021 movl(Address(z, idx, Address::times_4, 0), tmp3); 8022 shrq(tmp3, 32); 8023 8024 shlq(carry2, 32); 8025 orq(tmp3, carry2); 8026 movq(carry, tmp3); 8027 8028 bind(L_post_third_loop_done); 8029 } 8030 8031 /** 8032 * Code for BigInteger::multiplyToLen() intrinsic. 8033 * 8034 * rdi: x 8035 * rax: xlen 8036 * rsi: y 8037 * rcx: ylen 8038 * r8: z 8039 * r11: tmp0 8040 * r12: tmp1 8041 * r13: tmp2 8042 * r14: tmp3 8043 * r15: tmp4 8044 * rbx: tmp5 8045 * 8046 */ 8047 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register tmp0, 8048 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5) { 8049 ShortBranchVerifier sbv(this); 8050 assert_different_registers(x, xlen, y, ylen, z, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, rdx); 8051 8052 push(tmp0); 8053 push(tmp1); 8054 push(tmp2); 8055 push(tmp3); 8056 push(tmp4); 8057 push(tmp5); 8058 8059 push(xlen); 8060 8061 const Register idx = tmp1; 8062 const Register kdx = tmp2; 8063 const Register xstart = tmp3; 8064 8065 const Register y_idx = tmp4; 8066 const Register carry = tmp5; 8067 const Register product = xlen; 8068 const Register x_xstart = tmp0; 8069 8070 // First Loop. 8071 // 8072 // final static long LONG_MASK = 0xffffffffL; 8073 // int xstart = xlen - 1; 8074 // int ystart = ylen - 1; 8075 // long carry = 0; 8076 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 8077 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry; 8078 // z[kdx] = (int)product; 8079 // carry = product >>> 32; 8080 // } 8081 // z[xstart] = (int)carry; 8082 // 8083 8084 movl(idx, ylen); // idx = ylen; 8085 lea(kdx, Address(xlen, ylen)); // kdx = xlen+ylen; 8086 xorq(carry, carry); // carry = 0; 8087 8088 Label L_done; 8089 8090 movl(xstart, xlen); 8091 decrementl(xstart); 8092 jcc(Assembler::negative, L_done); 8093 8094 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx); 8095 8096 Label L_second_loop; 8097 testl(kdx, kdx); 8098 jcc(Assembler::zero, L_second_loop); 8099 8100 Label L_carry; 8101 subl(kdx, 1); 8102 jcc(Assembler::zero, L_carry); 8103 8104 movl(Address(z, kdx, Address::times_4, 0), carry); 8105 shrq(carry, 32); 8106 subl(kdx, 1); 8107 8108 bind(L_carry); 8109 movl(Address(z, kdx, Address::times_4, 0), carry); 8110 8111 // Second and third (nested) loops. 8112 // 8113 // for (int i = xstart-1; i >= 0; i--) { // Second loop 8114 // carry = 0; 8115 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop 8116 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) + 8117 // (z[k] & LONG_MASK) + carry; 8118 // z[k] = (int)product; 8119 // carry = product >>> 32; 8120 // } 8121 // z[i] = (int)carry; 8122 // } 8123 // 8124 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx 8125 8126 const Register jdx = tmp1; 8127 8128 bind(L_second_loop); 8129 xorl(carry, carry); // carry = 0; 8130 movl(jdx, ylen); // j = ystart+1 8131 8132 subl(xstart, 1); // i = xstart-1; 8133 jcc(Assembler::negative, L_done); 8134 8135 push (z); 8136 8137 Label L_last_x; 8138 lea(z, Address(z, xstart, Address::times_4, 4)); // z = z + k - j 8139 subl(xstart, 1); // i = xstart-1; 8140 jcc(Assembler::negative, L_last_x); 8141 8142 if (UseBMI2Instructions) { 8143 movq(rdx, Address(x, xstart, Address::times_4, 0)); 8144 rorxq(rdx, rdx, 32); // convert big-endian to little-endian 8145 } else { 8146 movq(x_xstart, Address(x, xstart, Address::times_4, 0)); 8147 rorq(x_xstart, 32); // convert big-endian to little-endian 8148 } 8149 8150 Label L_third_loop_prologue; 8151 bind(L_third_loop_prologue); 8152 8153 push (x); 8154 push (xstart); 8155 push (ylen); 8156 8157 8158 if (UseBMI2Instructions) { 8159 multiply_128_x_128_bmi2_loop(y, z, carry, x, jdx, ylen, product, tmp2, x_xstart, tmp3, tmp4); 8160 } else { // !UseBMI2Instructions 8161 multiply_128_x_128_loop(x_xstart, y, z, y_idx, jdx, ylen, carry, product, x); 8162 } 8163 8164 pop(ylen); 8165 pop(xlen); 8166 pop(x); 8167 pop(z); 8168 8169 movl(tmp3, xlen); 8170 addl(tmp3, 1); 8171 movl(Address(z, tmp3, Address::times_4, 0), carry); 8172 subl(tmp3, 1); 8173 jccb(Assembler::negative, L_done); 8174 8175 shrq(carry, 32); 8176 movl(Address(z, tmp3, Address::times_4, 0), carry); 8177 jmp(L_second_loop); 8178 8179 // Next infrequent code is moved outside loops. 8180 bind(L_last_x); 8181 if (UseBMI2Instructions) { 8182 movl(rdx, Address(x, 0)); 8183 } else { 8184 movl(x_xstart, Address(x, 0)); 8185 } 8186 jmp(L_third_loop_prologue); 8187 8188 bind(L_done); 8189 8190 pop(xlen); 8191 8192 pop(tmp5); 8193 pop(tmp4); 8194 pop(tmp3); 8195 pop(tmp2); 8196 pop(tmp1); 8197 pop(tmp0); 8198 } 8199 8200 void MacroAssembler::vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale, 8201 Register result, Register tmp1, Register tmp2, XMMRegister rymm0, XMMRegister rymm1, XMMRegister rymm2){ 8202 assert(UseSSE42Intrinsics, "SSE4.2 must be enabled."); 8203 Label VECTOR16_LOOP, VECTOR8_LOOP, VECTOR4_LOOP; 8204 Label VECTOR8_TAIL, VECTOR4_TAIL; 8205 Label VECTOR32_NOT_EQUAL, VECTOR16_NOT_EQUAL, VECTOR8_NOT_EQUAL, VECTOR4_NOT_EQUAL; 8206 Label SAME_TILL_END, DONE; 8207 Label BYTES_LOOP, BYTES_TAIL, BYTES_NOT_EQUAL; 8208 8209 //scale is in rcx in both Win64 and Unix 8210 ShortBranchVerifier sbv(this); 8211 8212 shlq(length); 8213 xorq(result, result); 8214 8215 if ((AVX3Threshold == 0) && (UseAVX > 2) && 8216 VM_Version::supports_avx512vlbw()) { 8217 Label VECTOR64_LOOP, VECTOR64_NOT_EQUAL, VECTOR32_TAIL; 8218 8219 cmpq(length, 64); 8220 jcc(Assembler::less, VECTOR32_TAIL); 8221 8222 movq(tmp1, length); 8223 andq(tmp1, 0x3F); // tail count 8224 andq(length, ~(0x3F)); //vector count 8225 8226 bind(VECTOR64_LOOP); 8227 // AVX512 code to compare 64 byte vectors. 8228 evmovdqub(rymm0, Address(obja, result), Assembler::AVX_512bit); 8229 evpcmpeqb(k7, rymm0, Address(objb, result), Assembler::AVX_512bit); 8230 kortestql(k7, k7); 8231 jcc(Assembler::aboveEqual, VECTOR64_NOT_EQUAL); // mismatch 8232 addq(result, 64); 8233 subq(length, 64); 8234 jccb(Assembler::notZero, VECTOR64_LOOP); 8235 8236 //bind(VECTOR64_TAIL); 8237 testq(tmp1, tmp1); 8238 jcc(Assembler::zero, SAME_TILL_END); 8239 8240 //bind(VECTOR64_TAIL); 8241 // AVX512 code to compare up to 63 byte vectors. 8242 mov64(tmp2, 0xFFFFFFFFFFFFFFFF); 8243 shlxq(tmp2, tmp2, tmp1); 8244 notq(tmp2); 8245 kmovql(k3, tmp2); 8246 8247 evmovdqub(rymm0, k3, Address(obja, result), false, Assembler::AVX_512bit); 8248 evpcmpeqb(k7, k3, rymm0, Address(objb, result), Assembler::AVX_512bit); 8249 8250 ktestql(k7, k3); 8251 jcc(Assembler::below, SAME_TILL_END); // not mismatch 8252 8253 bind(VECTOR64_NOT_EQUAL); 8254 kmovql(tmp1, k7); 8255 notq(tmp1); 8256 tzcntq(tmp1, tmp1); 8257 addq(result, tmp1); 8258 shrq(result); 8259 jmp(DONE); 8260 bind(VECTOR32_TAIL); 8261 } 8262 8263 cmpq(length, 8); 8264 jcc(Assembler::equal, VECTOR8_LOOP); 8265 jcc(Assembler::less, VECTOR4_TAIL); 8266 8267 if (UseAVX >= 2) { 8268 Label VECTOR16_TAIL, VECTOR32_LOOP; 8269 8270 cmpq(length, 16); 8271 jcc(Assembler::equal, VECTOR16_LOOP); 8272 jcc(Assembler::less, VECTOR8_LOOP); 8273 8274 cmpq(length, 32); 8275 jccb(Assembler::less, VECTOR16_TAIL); 8276 8277 subq(length, 32); 8278 bind(VECTOR32_LOOP); 8279 vmovdqu(rymm0, Address(obja, result)); 8280 vmovdqu(rymm1, Address(objb, result)); 8281 vpxor(rymm2, rymm0, rymm1, Assembler::AVX_256bit); 8282 vptest(rymm2, rymm2); 8283 jcc(Assembler::notZero, VECTOR32_NOT_EQUAL);//mismatch found 8284 addq(result, 32); 8285 subq(length, 32); 8286 jcc(Assembler::greaterEqual, VECTOR32_LOOP); 8287 addq(length, 32); 8288 jcc(Assembler::equal, SAME_TILL_END); 8289 //falling through if less than 32 bytes left //close the branch here. 8290 8291 bind(VECTOR16_TAIL); 8292 cmpq(length, 16); 8293 jccb(Assembler::less, VECTOR8_TAIL); 8294 bind(VECTOR16_LOOP); 8295 movdqu(rymm0, Address(obja, result)); 8296 movdqu(rymm1, Address(objb, result)); 8297 vpxor(rymm2, rymm0, rymm1, Assembler::AVX_128bit); 8298 ptest(rymm2, rymm2); 8299 jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found 8300 addq(result, 16); 8301 subq(length, 16); 8302 jcc(Assembler::equal, SAME_TILL_END); 8303 //falling through if less than 16 bytes left 8304 } else {//regular intrinsics 8305 8306 cmpq(length, 16); 8307 jccb(Assembler::less, VECTOR8_TAIL); 8308 8309 subq(length, 16); 8310 bind(VECTOR16_LOOP); 8311 movdqu(rymm0, Address(obja, result)); 8312 movdqu(rymm1, Address(objb, result)); 8313 pxor(rymm0, rymm1); 8314 ptest(rymm0, rymm0); 8315 jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found 8316 addq(result, 16); 8317 subq(length, 16); 8318 jccb(Assembler::greaterEqual, VECTOR16_LOOP); 8319 addq(length, 16); 8320 jcc(Assembler::equal, SAME_TILL_END); 8321 //falling through if less than 16 bytes left 8322 } 8323 8324 bind(VECTOR8_TAIL); 8325 cmpq(length, 8); 8326 jccb(Assembler::less, VECTOR4_TAIL); 8327 bind(VECTOR8_LOOP); 8328 movq(tmp1, Address(obja, result)); 8329 movq(tmp2, Address(objb, result)); 8330 xorq(tmp1, tmp2); 8331 testq(tmp1, tmp1); 8332 jcc(Assembler::notZero, VECTOR8_NOT_EQUAL);//mismatch found 8333 addq(result, 8); 8334 subq(length, 8); 8335 jcc(Assembler::equal, SAME_TILL_END); 8336 //falling through if less than 8 bytes left 8337 8338 bind(VECTOR4_TAIL); 8339 cmpq(length, 4); 8340 jccb(Assembler::less, BYTES_TAIL); 8341 bind(VECTOR4_LOOP); 8342 movl(tmp1, Address(obja, result)); 8343 xorl(tmp1, Address(objb, result)); 8344 testl(tmp1, tmp1); 8345 jcc(Assembler::notZero, VECTOR4_NOT_EQUAL);//mismatch found 8346 addq(result, 4); 8347 subq(length, 4); 8348 jcc(Assembler::equal, SAME_TILL_END); 8349 //falling through if less than 4 bytes left 8350 8351 bind(BYTES_TAIL); 8352 bind(BYTES_LOOP); 8353 load_unsigned_byte(tmp1, Address(obja, result)); 8354 load_unsigned_byte(tmp2, Address(objb, result)); 8355 xorl(tmp1, tmp2); 8356 testl(tmp1, tmp1); 8357 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found 8358 decq(length); 8359 jcc(Assembler::zero, SAME_TILL_END); 8360 incq(result); 8361 load_unsigned_byte(tmp1, Address(obja, result)); 8362 load_unsigned_byte(tmp2, Address(objb, result)); 8363 xorl(tmp1, tmp2); 8364 testl(tmp1, tmp1); 8365 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found 8366 decq(length); 8367 jcc(Assembler::zero, SAME_TILL_END); 8368 incq(result); 8369 load_unsigned_byte(tmp1, Address(obja, result)); 8370 load_unsigned_byte(tmp2, Address(objb, result)); 8371 xorl(tmp1, tmp2); 8372 testl(tmp1, tmp1); 8373 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found 8374 jmp(SAME_TILL_END); 8375 8376 if (UseAVX >= 2) { 8377 bind(VECTOR32_NOT_EQUAL); 8378 vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_256bit); 8379 vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_256bit); 8380 vpxor(rymm0, rymm0, rymm2, Assembler::AVX_256bit); 8381 vpmovmskb(tmp1, rymm0); 8382 bsfq(tmp1, tmp1); 8383 addq(result, tmp1); 8384 shrq(result); 8385 jmp(DONE); 8386 } 8387 8388 bind(VECTOR16_NOT_EQUAL); 8389 if (UseAVX >= 2) { 8390 vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_128bit); 8391 vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_128bit); 8392 pxor(rymm0, rymm2); 8393 } else { 8394 pcmpeqb(rymm2, rymm2); 8395 pxor(rymm0, rymm1); 8396 pcmpeqb(rymm0, rymm1); 8397 pxor(rymm0, rymm2); 8398 } 8399 pmovmskb(tmp1, rymm0); 8400 bsfq(tmp1, tmp1); 8401 addq(result, tmp1); 8402 shrq(result); 8403 jmpb(DONE); 8404 8405 bind(VECTOR8_NOT_EQUAL); 8406 bind(VECTOR4_NOT_EQUAL); 8407 bsfq(tmp1, tmp1); 8408 shrq(tmp1, 3); 8409 addq(result, tmp1); 8410 bind(BYTES_NOT_EQUAL); 8411 shrq(result); 8412 jmpb(DONE); 8413 8414 bind(SAME_TILL_END); 8415 mov64(result, -1); 8416 8417 bind(DONE); 8418 } 8419 8420 //Helper functions for square_to_len() 8421 8422 /** 8423 * Store the squares of x[], right shifted one bit (divided by 2) into z[] 8424 * Preserves x and z and modifies rest of the registers. 8425 */ 8426 void MacroAssembler::square_rshift(Register x, Register xlen, Register z, Register tmp1, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 8427 // Perform square and right shift by 1 8428 // Handle odd xlen case first, then for even xlen do the following 8429 // jlong carry = 0; 8430 // for (int j=0, i=0; j < xlen; j+=2, i+=4) { 8431 // huge_128 product = x[j:j+1] * x[j:j+1]; 8432 // z[i:i+1] = (carry << 63) | (jlong)(product >>> 65); 8433 // z[i+2:i+3] = (jlong)(product >>> 1); 8434 // carry = (jlong)product; 8435 // } 8436 8437 xorq(tmp5, tmp5); // carry 8438 xorq(rdxReg, rdxReg); 8439 xorl(tmp1, tmp1); // index for x 8440 xorl(tmp4, tmp4); // index for z 8441 8442 Label L_first_loop, L_first_loop_exit; 8443 8444 testl(xlen, 1); 8445 jccb(Assembler::zero, L_first_loop); //jump if xlen is even 8446 8447 // Square and right shift by 1 the odd element using 32 bit multiply 8448 movl(raxReg, Address(x, tmp1, Address::times_4, 0)); 8449 imulq(raxReg, raxReg); 8450 shrq(raxReg, 1); 8451 adcq(tmp5, 0); 8452 movq(Address(z, tmp4, Address::times_4, 0), raxReg); 8453 incrementl(tmp1); 8454 addl(tmp4, 2); 8455 8456 // Square and right shift by 1 the rest using 64 bit multiply 8457 bind(L_first_loop); 8458 cmpptr(tmp1, xlen); 8459 jccb(Assembler::equal, L_first_loop_exit); 8460 8461 // Square 8462 movq(raxReg, Address(x, tmp1, Address::times_4, 0)); 8463 rorq(raxReg, 32); // convert big-endian to little-endian 8464 mulq(raxReg); // 64-bit multiply rax * rax -> rdx:rax 8465 8466 // Right shift by 1 and save carry 8467 shrq(tmp5, 1); // rdx:rax:tmp5 = (tmp5:rdx:rax) >>> 1 8468 rcrq(rdxReg, 1); 8469 rcrq(raxReg, 1); 8470 adcq(tmp5, 0); 8471 8472 // Store result in z 8473 movq(Address(z, tmp4, Address::times_4, 0), rdxReg); 8474 movq(Address(z, tmp4, Address::times_4, 8), raxReg); 8475 8476 // Update indices for x and z 8477 addl(tmp1, 2); 8478 addl(tmp4, 4); 8479 jmp(L_first_loop); 8480 8481 bind(L_first_loop_exit); 8482 } 8483 8484 8485 /** 8486 * Perform the following multiply add operation using BMI2 instructions 8487 * carry:sum = sum + op1*op2 + carry 8488 * op2 should be in rdx 8489 * op2 is preserved, all other registers are modified 8490 */ 8491 void MacroAssembler::multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, Register tmp2) { 8492 // assert op2 is rdx 8493 mulxq(tmp2, op1, op1); // op1 * op2 -> tmp2:op1 8494 addq(sum, carry); 8495 adcq(tmp2, 0); 8496 addq(sum, op1); 8497 adcq(tmp2, 0); 8498 movq(carry, tmp2); 8499 } 8500 8501 /** 8502 * Perform the following multiply add operation: 8503 * carry:sum = sum + op1*op2 + carry 8504 * Preserves op1, op2 and modifies rest of registers 8505 */ 8506 void MacroAssembler::multiply_add_64(Register sum, Register op1, Register op2, Register carry, Register rdxReg, Register raxReg) { 8507 // rdx:rax = op1 * op2 8508 movq(raxReg, op2); 8509 mulq(op1); 8510 8511 // rdx:rax = sum + carry + rdx:rax 8512 addq(sum, carry); 8513 adcq(rdxReg, 0); 8514 addq(sum, raxReg); 8515 adcq(rdxReg, 0); 8516 8517 // carry:sum = rdx:sum 8518 movq(carry, rdxReg); 8519 } 8520 8521 /** 8522 * Add 64 bit long carry into z[] with carry propagation. 8523 * Preserves z and carry register values and modifies rest of registers. 8524 * 8525 */ 8526 void MacroAssembler::add_one_64(Register z, Register zlen, Register carry, Register tmp1) { 8527 Label L_fourth_loop, L_fourth_loop_exit; 8528 8529 movl(tmp1, 1); 8530 subl(zlen, 2); 8531 addq(Address(z, zlen, Address::times_4, 0), carry); 8532 8533 bind(L_fourth_loop); 8534 jccb(Assembler::carryClear, L_fourth_loop_exit); 8535 subl(zlen, 2); 8536 jccb(Assembler::negative, L_fourth_loop_exit); 8537 addq(Address(z, zlen, Address::times_4, 0), tmp1); 8538 jmp(L_fourth_loop); 8539 bind(L_fourth_loop_exit); 8540 } 8541 8542 /** 8543 * Shift z[] left by 1 bit. 8544 * Preserves x, len, z and zlen registers and modifies rest of the registers. 8545 * 8546 */ 8547 void MacroAssembler::lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4) { 8548 8549 Label L_fifth_loop, L_fifth_loop_exit; 8550 8551 // Fifth loop 8552 // Perform primitiveLeftShift(z, zlen, 1) 8553 8554 const Register prev_carry = tmp1; 8555 const Register new_carry = tmp4; 8556 const Register value = tmp2; 8557 const Register zidx = tmp3; 8558 8559 // int zidx, carry; 8560 // long value; 8561 // carry = 0; 8562 // for (zidx = zlen-2; zidx >=0; zidx -= 2) { 8563 // (carry:value) = (z[i] << 1) | carry ; 8564 // z[i] = value; 8565 // } 8566 8567 movl(zidx, zlen); 8568 xorl(prev_carry, prev_carry); // clear carry flag and prev_carry register 8569 8570 bind(L_fifth_loop); 8571 decl(zidx); // Use decl to preserve carry flag 8572 decl(zidx); 8573 jccb(Assembler::negative, L_fifth_loop_exit); 8574 8575 if (UseBMI2Instructions) { 8576 movq(value, Address(z, zidx, Address::times_4, 0)); 8577 rclq(value, 1); 8578 rorxq(value, value, 32); 8579 movq(Address(z, zidx, Address::times_4, 0), value); // Store back in big endian form 8580 } 8581 else { 8582 // clear new_carry 8583 xorl(new_carry, new_carry); 8584 8585 // Shift z[i] by 1, or in previous carry and save new carry 8586 movq(value, Address(z, zidx, Address::times_4, 0)); 8587 shlq(value, 1); 8588 adcl(new_carry, 0); 8589 8590 orq(value, prev_carry); 8591 rorq(value, 0x20); 8592 movq(Address(z, zidx, Address::times_4, 0), value); // Store back in big endian form 8593 8594 // Set previous carry = new carry 8595 movl(prev_carry, new_carry); 8596 } 8597 jmp(L_fifth_loop); 8598 8599 bind(L_fifth_loop_exit); 8600 } 8601 8602 8603 /** 8604 * Code for BigInteger::squareToLen() intrinsic 8605 * 8606 * rdi: x 8607 * rsi: len 8608 * r8: z 8609 * rcx: zlen 8610 * r12: tmp1 8611 * r13: tmp2 8612 * r14: tmp3 8613 * r15: tmp4 8614 * rbx: tmp5 8615 * 8616 */ 8617 void MacroAssembler::square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 8618 8619 Label L_second_loop, L_second_loop_exit, L_third_loop, L_third_loop_exit, L_last_x, L_multiply; 8620 push(tmp1); 8621 push(tmp2); 8622 push(tmp3); 8623 push(tmp4); 8624 push(tmp5); 8625 8626 // First loop 8627 // Store the squares, right shifted one bit (i.e., divided by 2). 8628 square_rshift(x, len, z, tmp1, tmp3, tmp4, tmp5, rdxReg, raxReg); 8629 8630 // Add in off-diagonal sums. 8631 // 8632 // Second, third (nested) and fourth loops. 8633 // zlen +=2; 8634 // for (int xidx=len-2,zidx=zlen-4; xidx > 0; xidx-=2,zidx-=4) { 8635 // carry = 0; 8636 // long op2 = x[xidx:xidx+1]; 8637 // for (int j=xidx-2,k=zidx; j >= 0; j-=2) { 8638 // k -= 2; 8639 // long op1 = x[j:j+1]; 8640 // long sum = z[k:k+1]; 8641 // carry:sum = multiply_add_64(sum, op1, op2, carry, tmp_regs); 8642 // z[k:k+1] = sum; 8643 // } 8644 // add_one_64(z, k, carry, tmp_regs); 8645 // } 8646 8647 const Register carry = tmp5; 8648 const Register sum = tmp3; 8649 const Register op1 = tmp4; 8650 Register op2 = tmp2; 8651 8652 push(zlen); 8653 push(len); 8654 addl(zlen,2); 8655 bind(L_second_loop); 8656 xorq(carry, carry); 8657 subl(zlen, 4); 8658 subl(len, 2); 8659 push(zlen); 8660 push(len); 8661 cmpl(len, 0); 8662 jccb(Assembler::lessEqual, L_second_loop_exit); 8663 8664 // Multiply an array by one 64 bit long. 8665 if (UseBMI2Instructions) { 8666 op2 = rdxReg; 8667 movq(op2, Address(x, len, Address::times_4, 0)); 8668 rorxq(op2, op2, 32); 8669 } 8670 else { 8671 movq(op2, Address(x, len, Address::times_4, 0)); 8672 rorq(op2, 32); 8673 } 8674 8675 bind(L_third_loop); 8676 decrementl(len); 8677 jccb(Assembler::negative, L_third_loop_exit); 8678 decrementl(len); 8679 jccb(Assembler::negative, L_last_x); 8680 8681 movq(op1, Address(x, len, Address::times_4, 0)); 8682 rorq(op1, 32); 8683 8684 bind(L_multiply); 8685 subl(zlen, 2); 8686 movq(sum, Address(z, zlen, Address::times_4, 0)); 8687 8688 // Multiply 64 bit by 64 bit and add 64 bits lower half and upper 64 bits as carry. 8689 if (UseBMI2Instructions) { 8690 multiply_add_64_bmi2(sum, op1, op2, carry, tmp2); 8691 } 8692 else { 8693 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 8694 } 8695 8696 movq(Address(z, zlen, Address::times_4, 0), sum); 8697 8698 jmp(L_third_loop); 8699 bind(L_third_loop_exit); 8700 8701 // Fourth loop 8702 // Add 64 bit long carry into z with carry propagation. 8703 // Uses offsetted zlen. 8704 add_one_64(z, zlen, carry, tmp1); 8705 8706 pop(len); 8707 pop(zlen); 8708 jmp(L_second_loop); 8709 8710 // Next infrequent code is moved outside loops. 8711 bind(L_last_x); 8712 movl(op1, Address(x, 0)); 8713 jmp(L_multiply); 8714 8715 bind(L_second_loop_exit); 8716 pop(len); 8717 pop(zlen); 8718 pop(len); 8719 pop(zlen); 8720 8721 // Fifth loop 8722 // Shift z left 1 bit. 8723 lshift_by_1(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4); 8724 8725 // z[zlen-1] |= x[len-1] & 1; 8726 movl(tmp3, Address(x, len, Address::times_4, -4)); 8727 andl(tmp3, 1); 8728 orl(Address(z, zlen, Address::times_4, -4), tmp3); 8729 8730 pop(tmp5); 8731 pop(tmp4); 8732 pop(tmp3); 8733 pop(tmp2); 8734 pop(tmp1); 8735 } 8736 8737 /** 8738 * Helper function for mul_add() 8739 * Multiply the in[] by int k and add to out[] starting at offset offs using 8740 * 128 bit by 32 bit multiply and return the carry in tmp5. 8741 * Only quad int aligned length of in[] is operated on in this function. 8742 * k is in rdxReg for BMI2Instructions, for others it is in tmp2. 8743 * This function preserves out, in and k registers. 8744 * len and offset point to the appropriate index in "in" & "out" correspondingly 8745 * tmp5 has the carry. 8746 * other registers are temporary and are modified. 8747 * 8748 */ 8749 void MacroAssembler::mul_add_128_x_32_loop(Register out, Register in, 8750 Register offset, Register len, Register tmp1, Register tmp2, Register tmp3, 8751 Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 8752 8753 Label L_first_loop, L_first_loop_exit; 8754 8755 movl(tmp1, len); 8756 shrl(tmp1, 2); 8757 8758 bind(L_first_loop); 8759 subl(tmp1, 1); 8760 jccb(Assembler::negative, L_first_loop_exit); 8761 8762 subl(len, 4); 8763 subl(offset, 4); 8764 8765 Register op2 = tmp2; 8766 const Register sum = tmp3; 8767 const Register op1 = tmp4; 8768 const Register carry = tmp5; 8769 8770 if (UseBMI2Instructions) { 8771 op2 = rdxReg; 8772 } 8773 8774 movq(op1, Address(in, len, Address::times_4, 8)); 8775 rorq(op1, 32); 8776 movq(sum, Address(out, offset, Address::times_4, 8)); 8777 rorq(sum, 32); 8778 if (UseBMI2Instructions) { 8779 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg); 8780 } 8781 else { 8782 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 8783 } 8784 // Store back in big endian from little endian 8785 rorq(sum, 0x20); 8786 movq(Address(out, offset, Address::times_4, 8), sum); 8787 8788 movq(op1, Address(in, len, Address::times_4, 0)); 8789 rorq(op1, 32); 8790 movq(sum, Address(out, offset, Address::times_4, 0)); 8791 rorq(sum, 32); 8792 if (UseBMI2Instructions) { 8793 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg); 8794 } 8795 else { 8796 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 8797 } 8798 // Store back in big endian from little endian 8799 rorq(sum, 0x20); 8800 movq(Address(out, offset, Address::times_4, 0), sum); 8801 8802 jmp(L_first_loop); 8803 bind(L_first_loop_exit); 8804 } 8805 8806 /** 8807 * Code for BigInteger::mulAdd() intrinsic 8808 * 8809 * rdi: out 8810 * rsi: in 8811 * r11: offs (out.length - offset) 8812 * rcx: len 8813 * r8: k 8814 * r12: tmp1 8815 * r13: tmp2 8816 * r14: tmp3 8817 * r15: tmp4 8818 * rbx: tmp5 8819 * Multiply the in[] by word k and add to out[], return the carry in rax 8820 */ 8821 void MacroAssembler::mul_add(Register out, Register in, Register offs, 8822 Register len, Register k, Register tmp1, Register tmp2, Register tmp3, 8823 Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 8824 8825 Label L_carry, L_last_in, L_done; 8826 8827 // carry = 0; 8828 // for (int j=len-1; j >= 0; j--) { 8829 // long product = (in[j] & LONG_MASK) * kLong + 8830 // (out[offs] & LONG_MASK) + carry; 8831 // out[offs--] = (int)product; 8832 // carry = product >>> 32; 8833 // } 8834 // 8835 push(tmp1); 8836 push(tmp2); 8837 push(tmp3); 8838 push(tmp4); 8839 push(tmp5); 8840 8841 Register op2 = tmp2; 8842 const Register sum = tmp3; 8843 const Register op1 = tmp4; 8844 const Register carry = tmp5; 8845 8846 if (UseBMI2Instructions) { 8847 op2 = rdxReg; 8848 movl(op2, k); 8849 } 8850 else { 8851 movl(op2, k); 8852 } 8853 8854 xorq(carry, carry); 8855 8856 //First loop 8857 8858 //Multiply in[] by k in a 4 way unrolled loop using 128 bit by 32 bit multiply 8859 //The carry is in tmp5 8860 mul_add_128_x_32_loop(out, in, offs, len, tmp1, tmp2, tmp3, tmp4, tmp5, rdxReg, raxReg); 8861 8862 //Multiply the trailing in[] entry using 64 bit by 32 bit, if any 8863 decrementl(len); 8864 jccb(Assembler::negative, L_carry); 8865 decrementl(len); 8866 jccb(Assembler::negative, L_last_in); 8867 8868 movq(op1, Address(in, len, Address::times_4, 0)); 8869 rorq(op1, 32); 8870 8871 subl(offs, 2); 8872 movq(sum, Address(out, offs, Address::times_4, 0)); 8873 rorq(sum, 32); 8874 8875 if (UseBMI2Instructions) { 8876 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg); 8877 } 8878 else { 8879 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 8880 } 8881 8882 // Store back in big endian from little endian 8883 rorq(sum, 0x20); 8884 movq(Address(out, offs, Address::times_4, 0), sum); 8885 8886 testl(len, len); 8887 jccb(Assembler::zero, L_carry); 8888 8889 //Multiply the last in[] entry, if any 8890 bind(L_last_in); 8891 movl(op1, Address(in, 0)); 8892 movl(sum, Address(out, offs, Address::times_4, -4)); 8893 8894 movl(raxReg, k); 8895 mull(op1); //tmp4 * eax -> edx:eax 8896 addl(sum, carry); 8897 adcl(rdxReg, 0); 8898 addl(sum, raxReg); 8899 adcl(rdxReg, 0); 8900 movl(carry, rdxReg); 8901 8902 movl(Address(out, offs, Address::times_4, -4), sum); 8903 8904 bind(L_carry); 8905 //return tmp5/carry as carry in rax 8906 movl(rax, carry); 8907 8908 bind(L_done); 8909 pop(tmp5); 8910 pop(tmp4); 8911 pop(tmp3); 8912 pop(tmp2); 8913 pop(tmp1); 8914 } 8915 #endif 8916 8917 /** 8918 * Emits code to update CRC-32 with a byte value according to constants in table 8919 * 8920 * @param [in,out]crc Register containing the crc. 8921 * @param [in]val Register containing the byte to fold into the CRC. 8922 * @param [in]table Register containing the table of crc constants. 8923 * 8924 * uint32_t crc; 8925 * val = crc_table[(val ^ crc) & 0xFF]; 8926 * crc = val ^ (crc >> 8); 8927 * 8928 */ 8929 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 8930 xorl(val, crc); 8931 andl(val, 0xFF); 8932 shrl(crc, 8); // unsigned shift 8933 xorl(crc, Address(table, val, Address::times_4, 0)); 8934 } 8935 8936 /** 8937 * Fold 128-bit data chunk 8938 */ 8939 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) { 8940 if (UseAVX > 0) { 8941 vpclmulhdq(xtmp, xK, xcrc); // [123:64] 8942 vpclmulldq(xcrc, xK, xcrc); // [63:0] 8943 vpxor(xcrc, xcrc, Address(buf, offset), 0 /* vector_len */); 8944 pxor(xcrc, xtmp); 8945 } else { 8946 movdqa(xtmp, xcrc); 8947 pclmulhdq(xtmp, xK); // [123:64] 8948 pclmulldq(xcrc, xK); // [63:0] 8949 pxor(xcrc, xtmp); 8950 movdqu(xtmp, Address(buf, offset)); 8951 pxor(xcrc, xtmp); 8952 } 8953 } 8954 8955 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf) { 8956 if (UseAVX > 0) { 8957 vpclmulhdq(xtmp, xK, xcrc); 8958 vpclmulldq(xcrc, xK, xcrc); 8959 pxor(xcrc, xbuf); 8960 pxor(xcrc, xtmp); 8961 } else { 8962 movdqa(xtmp, xcrc); 8963 pclmulhdq(xtmp, xK); 8964 pclmulldq(xcrc, xK); 8965 pxor(xcrc, xbuf); 8966 pxor(xcrc, xtmp); 8967 } 8968 } 8969 8970 /** 8971 * 8-bit folds to compute 32-bit CRC 8972 * 8973 * uint64_t xcrc; 8974 * timesXtoThe32[xcrc & 0xFF] ^ (xcrc >> 8); 8975 */ 8976 void MacroAssembler::fold_8bit_crc32(XMMRegister xcrc, Register table, XMMRegister xtmp, Register tmp) { 8977 movdl(tmp, xcrc); 8978 andl(tmp, 0xFF); 8979 movdl(xtmp, Address(table, tmp, Address::times_4, 0)); 8980 psrldq(xcrc, 1); // unsigned shift one byte 8981 pxor(xcrc, xtmp); 8982 } 8983 8984 /** 8985 * uint32_t crc; 8986 * timesXtoThe32[crc & 0xFF] ^ (crc >> 8); 8987 */ 8988 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) { 8989 movl(tmp, crc); 8990 andl(tmp, 0xFF); 8991 shrl(crc, 8); 8992 xorl(crc, Address(table, tmp, Address::times_4, 0)); 8993 } 8994 8995 /** 8996 * @param crc register containing existing CRC (32-bit) 8997 * @param buf register pointing to input byte buffer (byte*) 8998 * @param len register containing number of bytes 8999 * @param table register that will contain address of CRC table 9000 * @param tmp scratch register 9001 */ 9002 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp) { 9003 assert_different_registers(crc, buf, len, table, tmp, rax); 9004 9005 Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned; 9006 Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop; 9007 9008 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 9009 // context for the registers used, where all instructions below are using 128-bit mode 9010 // On EVEX without VL and BW, these instructions will all be AVX. 9011 lea(table, ExternalAddress(StubRoutines::crc_table_addr())); 9012 notl(crc); // ~crc 9013 cmpl(len, 16); 9014 jcc(Assembler::less, L_tail); 9015 9016 // Align buffer to 16 bytes 9017 movl(tmp, buf); 9018 andl(tmp, 0xF); 9019 jccb(Assembler::zero, L_aligned); 9020 subl(tmp, 16); 9021 addl(len, tmp); 9022 9023 align(4); 9024 BIND(L_align_loop); 9025 movsbl(rax, Address(buf, 0)); // load byte with sign extension 9026 update_byte_crc32(crc, rax, table); 9027 increment(buf); 9028 incrementl(tmp); 9029 jccb(Assembler::less, L_align_loop); 9030 9031 BIND(L_aligned); 9032 movl(tmp, len); // save 9033 shrl(len, 4); 9034 jcc(Assembler::zero, L_tail_restore); 9035 9036 // Fold crc into first bytes of vector 9037 movdqa(xmm1, Address(buf, 0)); 9038 movdl(rax, xmm1); 9039 xorl(crc, rax); 9040 if (VM_Version::supports_sse4_1()) { 9041 pinsrd(xmm1, crc, 0); 9042 } else { 9043 pinsrw(xmm1, crc, 0); 9044 shrl(crc, 16); 9045 pinsrw(xmm1, crc, 1); 9046 } 9047 addptr(buf, 16); 9048 subl(len, 4); // len > 0 9049 jcc(Assembler::less, L_fold_tail); 9050 9051 movdqa(xmm2, Address(buf, 0)); 9052 movdqa(xmm3, Address(buf, 16)); 9053 movdqa(xmm4, Address(buf, 32)); 9054 addptr(buf, 48); 9055 subl(len, 3); 9056 jcc(Assembler::lessEqual, L_fold_512b); 9057 9058 // Fold total 512 bits of polynomial on each iteration, 9059 // 128 bits per each of 4 parallel streams. 9060 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 32), rscratch1); 9061 9062 align32(); 9063 BIND(L_fold_512b_loop); 9064 fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0); 9065 fold_128bit_crc32(xmm2, xmm0, xmm5, buf, 16); 9066 fold_128bit_crc32(xmm3, xmm0, xmm5, buf, 32); 9067 fold_128bit_crc32(xmm4, xmm0, xmm5, buf, 48); 9068 addptr(buf, 64); 9069 subl(len, 4); 9070 jcc(Assembler::greater, L_fold_512b_loop); 9071 9072 // Fold 512 bits to 128 bits. 9073 BIND(L_fold_512b); 9074 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16), rscratch1); 9075 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm2); 9076 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm3); 9077 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm4); 9078 9079 // Fold the rest of 128 bits data chunks 9080 BIND(L_fold_tail); 9081 addl(len, 3); 9082 jccb(Assembler::lessEqual, L_fold_128b); 9083 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16), rscratch1); 9084 9085 BIND(L_fold_tail_loop); 9086 fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0); 9087 addptr(buf, 16); 9088 decrementl(len); 9089 jccb(Assembler::greater, L_fold_tail_loop); 9090 9091 // Fold 128 bits in xmm1 down into 32 bits in crc register. 9092 BIND(L_fold_128b); 9093 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr()), rscratch1); 9094 if (UseAVX > 0) { 9095 vpclmulqdq(xmm2, xmm0, xmm1, 0x1); 9096 vpand(xmm3, xmm0, xmm2, 0 /* vector_len */); 9097 vpclmulqdq(xmm0, xmm0, xmm3, 0x1); 9098 } else { 9099 movdqa(xmm2, xmm0); 9100 pclmulqdq(xmm2, xmm1, 0x1); 9101 movdqa(xmm3, xmm0); 9102 pand(xmm3, xmm2); 9103 pclmulqdq(xmm0, xmm3, 0x1); 9104 } 9105 psrldq(xmm1, 8); 9106 psrldq(xmm2, 4); 9107 pxor(xmm0, xmm1); 9108 pxor(xmm0, xmm2); 9109 9110 // 8 8-bit folds to compute 32-bit CRC. 9111 for (int j = 0; j < 4; j++) { 9112 fold_8bit_crc32(xmm0, table, xmm1, rax); 9113 } 9114 movdl(crc, xmm0); // mov 32 bits to general register 9115 for (int j = 0; j < 4; j++) { 9116 fold_8bit_crc32(crc, table, rax); 9117 } 9118 9119 BIND(L_tail_restore); 9120 movl(len, tmp); // restore 9121 BIND(L_tail); 9122 andl(len, 0xf); 9123 jccb(Assembler::zero, L_exit); 9124 9125 // Fold the rest of bytes 9126 align(4); 9127 BIND(L_tail_loop); 9128 movsbl(rax, Address(buf, 0)); // load byte with sign extension 9129 update_byte_crc32(crc, rax, table); 9130 increment(buf); 9131 decrementl(len); 9132 jccb(Assembler::greater, L_tail_loop); 9133 9134 BIND(L_exit); 9135 notl(crc); // ~c 9136 } 9137 9138 #ifdef _LP64 9139 // Helper function for AVX 512 CRC32 9140 // Fold 512-bit data chunks 9141 void MacroAssembler::fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, 9142 Register pos, int offset) { 9143 evmovdquq(xmm3, Address(buf, pos, Address::times_1, offset), Assembler::AVX_512bit); 9144 evpclmulqdq(xtmp, xcrc, xK, 0x10, Assembler::AVX_512bit); // [123:64] 9145 evpclmulqdq(xmm2, xcrc, xK, 0x01, Assembler::AVX_512bit); // [63:0] 9146 evpxorq(xcrc, xtmp, xmm2, Assembler::AVX_512bit /* vector_len */); 9147 evpxorq(xcrc, xcrc, xmm3, Assembler::AVX_512bit /* vector_len */); 9148 } 9149 9150 // Helper function for AVX 512 CRC32 9151 // Compute CRC32 for < 256B buffers 9152 void MacroAssembler::kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register table, Register pos, 9153 Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop, 9154 Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup) { 9155 9156 Label L_less_than_32, L_exact_16_left, L_less_than_16_left; 9157 Label L_less_than_8_left, L_less_than_4_left, L_less_than_2_left, L_zero_left; 9158 Label L_only_less_than_4, L_only_less_than_3, L_only_less_than_2; 9159 9160 // check if there is enough buffer to be able to fold 16B at a time 9161 cmpl(len, 32); 9162 jcc(Assembler::less, L_less_than_32); 9163 9164 // if there is, load the constants 9165 movdqu(xmm10, Address(table, 1 * 16)); //rk1 and rk2 in xmm10 9166 movdl(xmm0, crc); // get the initial crc value 9167 movdqu(xmm7, Address(buf, pos, Address::times_1, 0 * 16)); //load the plaintext 9168 pxor(xmm7, xmm0); 9169 9170 // update the buffer pointer 9171 addl(pos, 16); 9172 //update the counter.subtract 32 instead of 16 to save one instruction from the loop 9173 subl(len, 32); 9174 jmp(L_16B_reduction_loop); 9175 9176 bind(L_less_than_32); 9177 //mov initial crc to the return value. this is necessary for zero - length buffers. 9178 movl(rax, crc); 9179 testl(len, len); 9180 jcc(Assembler::equal, L_cleanup); 9181 9182 movdl(xmm0, crc); //get the initial crc value 9183 9184 cmpl(len, 16); 9185 jcc(Assembler::equal, L_exact_16_left); 9186 jcc(Assembler::less, L_less_than_16_left); 9187 9188 movdqu(xmm7, Address(buf, pos, Address::times_1, 0 * 16)); //load the plaintext 9189 pxor(xmm7, xmm0); //xor the initial crc value 9190 addl(pos, 16); 9191 subl(len, 16); 9192 movdqu(xmm10, Address(table, 1 * 16)); // rk1 and rk2 in xmm10 9193 jmp(L_get_last_two_xmms); 9194 9195 bind(L_less_than_16_left); 9196 //use stack space to load data less than 16 bytes, zero - out the 16B in memory first. 9197 pxor(xmm1, xmm1); 9198 movptr(tmp1, rsp); 9199 movdqu(Address(tmp1, 0 * 16), xmm1); 9200 9201 cmpl(len, 4); 9202 jcc(Assembler::less, L_only_less_than_4); 9203 9204 //backup the counter value 9205 movl(tmp2, len); 9206 cmpl(len, 8); 9207 jcc(Assembler::less, L_less_than_8_left); 9208 9209 //load 8 Bytes 9210 movq(rax, Address(buf, pos, Address::times_1, 0 * 16)); 9211 movq(Address(tmp1, 0 * 16), rax); 9212 addptr(tmp1, 8); 9213 subl(len, 8); 9214 addl(pos, 8); 9215 9216 bind(L_less_than_8_left); 9217 cmpl(len, 4); 9218 jcc(Assembler::less, L_less_than_4_left); 9219 9220 //load 4 Bytes 9221 movl(rax, Address(buf, pos, Address::times_1, 0)); 9222 movl(Address(tmp1, 0 * 16), rax); 9223 addptr(tmp1, 4); 9224 subl(len, 4); 9225 addl(pos, 4); 9226 9227 bind(L_less_than_4_left); 9228 cmpl(len, 2); 9229 jcc(Assembler::less, L_less_than_2_left); 9230 9231 // load 2 Bytes 9232 movw(rax, Address(buf, pos, Address::times_1, 0)); 9233 movl(Address(tmp1, 0 * 16), rax); 9234 addptr(tmp1, 2); 9235 subl(len, 2); 9236 addl(pos, 2); 9237 9238 bind(L_less_than_2_left); 9239 cmpl(len, 1); 9240 jcc(Assembler::less, L_zero_left); 9241 9242 // load 1 Byte 9243 movb(rax, Address(buf, pos, Address::times_1, 0)); 9244 movb(Address(tmp1, 0 * 16), rax); 9245 9246 bind(L_zero_left); 9247 movdqu(xmm7, Address(rsp, 0)); 9248 pxor(xmm7, xmm0); //xor the initial crc value 9249 9250 lea(rax, ExternalAddress(StubRoutines::x86::shuf_table_crc32_avx512_addr())); 9251 movdqu(xmm0, Address(rax, tmp2)); 9252 pshufb(xmm7, xmm0); 9253 jmp(L_128_done); 9254 9255 bind(L_exact_16_left); 9256 movdqu(xmm7, Address(buf, pos, Address::times_1, 0)); 9257 pxor(xmm7, xmm0); //xor the initial crc value 9258 jmp(L_128_done); 9259 9260 bind(L_only_less_than_4); 9261 cmpl(len, 3); 9262 jcc(Assembler::less, L_only_less_than_3); 9263 9264 // load 3 Bytes 9265 movb(rax, Address(buf, pos, Address::times_1, 0)); 9266 movb(Address(tmp1, 0), rax); 9267 9268 movb(rax, Address(buf, pos, Address::times_1, 1)); 9269 movb(Address(tmp1, 1), rax); 9270 9271 movb(rax, Address(buf, pos, Address::times_1, 2)); 9272 movb(Address(tmp1, 2), rax); 9273 9274 movdqu(xmm7, Address(rsp, 0)); 9275 pxor(xmm7, xmm0); //xor the initial crc value 9276 9277 pslldq(xmm7, 0x5); 9278 jmp(L_barrett); 9279 bind(L_only_less_than_3); 9280 cmpl(len, 2); 9281 jcc(Assembler::less, L_only_less_than_2); 9282 9283 // load 2 Bytes 9284 movb(rax, Address(buf, pos, Address::times_1, 0)); 9285 movb(Address(tmp1, 0), rax); 9286 9287 movb(rax, Address(buf, pos, Address::times_1, 1)); 9288 movb(Address(tmp1, 1), rax); 9289 9290 movdqu(xmm7, Address(rsp, 0)); 9291 pxor(xmm7, xmm0); //xor the initial crc value 9292 9293 pslldq(xmm7, 0x6); 9294 jmp(L_barrett); 9295 9296 bind(L_only_less_than_2); 9297 //load 1 Byte 9298 movb(rax, Address(buf, pos, Address::times_1, 0)); 9299 movb(Address(tmp1, 0), rax); 9300 9301 movdqu(xmm7, Address(rsp, 0)); 9302 pxor(xmm7, xmm0); //xor the initial crc value 9303 9304 pslldq(xmm7, 0x7); 9305 } 9306 9307 /** 9308 * Compute CRC32 using AVX512 instructions 9309 * param crc register containing existing CRC (32-bit) 9310 * param buf register pointing to input byte buffer (byte*) 9311 * param len register containing number of bytes 9312 * param table address of crc or crc32c table 9313 * param tmp1 scratch register 9314 * param tmp2 scratch register 9315 * return rax result register 9316 * 9317 * This routine is identical for crc32c with the exception of the precomputed constant 9318 * table which will be passed as the table argument. The calculation steps are 9319 * the same for both variants. 9320 */ 9321 void MacroAssembler::kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2) { 9322 assert_different_registers(crc, buf, len, table, tmp1, tmp2, rax, r12); 9323 9324 Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned; 9325 Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop; 9326 Label L_less_than_256, L_fold_128_B_loop, L_fold_256_B_loop; 9327 Label L_fold_128_B_register, L_final_reduction_for_128, L_16B_reduction_loop; 9328 Label L_128_done, L_get_last_two_xmms, L_barrett, L_cleanup; 9329 9330 const Register pos = r12; 9331 push(r12); 9332 subptr(rsp, 16 * 2 + 8); 9333 9334 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 9335 // context for the registers used, where all instructions below are using 128-bit mode 9336 // On EVEX without VL and BW, these instructions will all be AVX. 9337 movl(pos, 0); 9338 9339 // check if smaller than 256B 9340 cmpl(len, 256); 9341 jcc(Assembler::less, L_less_than_256); 9342 9343 // load the initial crc value 9344 movdl(xmm10, crc); 9345 9346 // receive the initial 64B data, xor the initial crc value 9347 evmovdquq(xmm0, Address(buf, pos, Address::times_1, 0 * 64), Assembler::AVX_512bit); 9348 evmovdquq(xmm4, Address(buf, pos, Address::times_1, 1 * 64), Assembler::AVX_512bit); 9349 evpxorq(xmm0, xmm0, xmm10, Assembler::AVX_512bit); 9350 evbroadcasti32x4(xmm10, Address(table, 2 * 16), Assembler::AVX_512bit); //zmm10 has rk3 and rk4 9351 9352 subl(len, 256); 9353 cmpl(len, 256); 9354 jcc(Assembler::less, L_fold_128_B_loop); 9355 9356 evmovdquq(xmm7, Address(buf, pos, Address::times_1, 2 * 64), Assembler::AVX_512bit); 9357 evmovdquq(xmm8, Address(buf, pos, Address::times_1, 3 * 64), Assembler::AVX_512bit); 9358 evbroadcasti32x4(xmm16, Address(table, 0 * 16), Assembler::AVX_512bit); //zmm16 has rk-1 and rk-2 9359 subl(len, 256); 9360 9361 bind(L_fold_256_B_loop); 9362 addl(pos, 256); 9363 fold512bit_crc32_avx512(xmm0, xmm16, xmm1, buf, pos, 0 * 64); 9364 fold512bit_crc32_avx512(xmm4, xmm16, xmm1, buf, pos, 1 * 64); 9365 fold512bit_crc32_avx512(xmm7, xmm16, xmm1, buf, pos, 2 * 64); 9366 fold512bit_crc32_avx512(xmm8, xmm16, xmm1, buf, pos, 3 * 64); 9367 9368 subl(len, 256); 9369 jcc(Assembler::greaterEqual, L_fold_256_B_loop); 9370 9371 // Fold 256 into 128 9372 addl(pos, 256); 9373 evpclmulqdq(xmm1, xmm0, xmm10, 0x01, Assembler::AVX_512bit); 9374 evpclmulqdq(xmm2, xmm0, xmm10, 0x10, Assembler::AVX_512bit); 9375 vpternlogq(xmm7, 0x96, xmm1, xmm2, Assembler::AVX_512bit); // xor ABC 9376 9377 evpclmulqdq(xmm5, xmm4, xmm10, 0x01, Assembler::AVX_512bit); 9378 evpclmulqdq(xmm6, xmm4, xmm10, 0x10, Assembler::AVX_512bit); 9379 vpternlogq(xmm8, 0x96, xmm5, xmm6, Assembler::AVX_512bit); // xor ABC 9380 9381 evmovdquq(xmm0, xmm7, Assembler::AVX_512bit); 9382 evmovdquq(xmm4, xmm8, Assembler::AVX_512bit); 9383 9384 addl(len, 128); 9385 jmp(L_fold_128_B_register); 9386 9387 // at this section of the code, there is 128 * x + y(0 <= y<128) bytes of buffer.The fold_128_B_loop 9388 // loop will fold 128B at a time until we have 128 + y Bytes of buffer 9389 9390 // fold 128B at a time.This section of the code folds 8 xmm registers in parallel 9391 bind(L_fold_128_B_loop); 9392 addl(pos, 128); 9393 fold512bit_crc32_avx512(xmm0, xmm10, xmm1, buf, pos, 0 * 64); 9394 fold512bit_crc32_avx512(xmm4, xmm10, xmm1, buf, pos, 1 * 64); 9395 9396 subl(len, 128); 9397 jcc(Assembler::greaterEqual, L_fold_128_B_loop); 9398 9399 addl(pos, 128); 9400 9401 // at this point, the buffer pointer is pointing at the last y Bytes of the buffer, where 0 <= y < 128 9402 // the 128B of folded data is in 8 of the xmm registers : xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7 9403 bind(L_fold_128_B_register); 9404 evmovdquq(xmm16, Address(table, 5 * 16), Assembler::AVX_512bit); // multiply by rk9-rk16 9405 evmovdquq(xmm11, Address(table, 9 * 16), Assembler::AVX_512bit); // multiply by rk17-rk20, rk1,rk2, 0,0 9406 evpclmulqdq(xmm1, xmm0, xmm16, 0x01, Assembler::AVX_512bit); 9407 evpclmulqdq(xmm2, xmm0, xmm16, 0x10, Assembler::AVX_512bit); 9408 // save last that has no multiplicand 9409 vextracti64x2(xmm7, xmm4, 3); 9410 9411 evpclmulqdq(xmm5, xmm4, xmm11, 0x01, Assembler::AVX_512bit); 9412 evpclmulqdq(xmm6, xmm4, xmm11, 0x10, Assembler::AVX_512bit); 9413 // Needed later in reduction loop 9414 movdqu(xmm10, Address(table, 1 * 16)); 9415 vpternlogq(xmm1, 0x96, xmm2, xmm5, Assembler::AVX_512bit); // xor ABC 9416 vpternlogq(xmm1, 0x96, xmm6, xmm7, Assembler::AVX_512bit); // xor ABC 9417 9418 // Swap 1,0,3,2 - 01 00 11 10 9419 evshufi64x2(xmm8, xmm1, xmm1, 0x4e, Assembler::AVX_512bit); 9420 evpxorq(xmm8, xmm8, xmm1, Assembler::AVX_256bit); 9421 vextracti128(xmm5, xmm8, 1); 9422 evpxorq(xmm7, xmm5, xmm8, Assembler::AVX_128bit); 9423 9424 // instead of 128, we add 128 - 16 to the loop counter to save 1 instruction from the loop 9425 // instead of a cmp instruction, we use the negative flag with the jl instruction 9426 addl(len, 128 - 16); 9427 jcc(Assembler::less, L_final_reduction_for_128); 9428 9429 bind(L_16B_reduction_loop); 9430 vpclmulqdq(xmm8, xmm7, xmm10, 0x01); 9431 vpclmulqdq(xmm7, xmm7, xmm10, 0x10); 9432 vpxor(xmm7, xmm7, xmm8, Assembler::AVX_128bit); 9433 movdqu(xmm0, Address(buf, pos, Address::times_1, 0 * 16)); 9434 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 9435 addl(pos, 16); 9436 subl(len, 16); 9437 jcc(Assembler::greaterEqual, L_16B_reduction_loop); 9438 9439 bind(L_final_reduction_for_128); 9440 addl(len, 16); 9441 jcc(Assembler::equal, L_128_done); 9442 9443 bind(L_get_last_two_xmms); 9444 movdqu(xmm2, xmm7); 9445 addl(pos, len); 9446 movdqu(xmm1, Address(buf, pos, Address::times_1, -16)); 9447 subl(pos, len); 9448 9449 // get rid of the extra data that was loaded before 9450 // load the shift constant 9451 lea(rax, ExternalAddress(StubRoutines::x86::shuf_table_crc32_avx512_addr())); 9452 movdqu(xmm0, Address(rax, len)); 9453 addl(rax, len); 9454 9455 vpshufb(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 9456 //Change mask to 512 9457 vpxor(xmm0, xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr() + 2 * 16), Assembler::AVX_128bit, tmp2); 9458 vpshufb(xmm2, xmm2, xmm0, Assembler::AVX_128bit); 9459 9460 blendvpb(xmm2, xmm2, xmm1, xmm0, Assembler::AVX_128bit); 9461 vpclmulqdq(xmm8, xmm7, xmm10, 0x01); 9462 vpclmulqdq(xmm7, xmm7, xmm10, 0x10); 9463 vpxor(xmm7, xmm7, xmm8, Assembler::AVX_128bit); 9464 vpxor(xmm7, xmm7, xmm2, Assembler::AVX_128bit); 9465 9466 bind(L_128_done); 9467 // compute crc of a 128-bit value 9468 movdqu(xmm10, Address(table, 3 * 16)); 9469 movdqu(xmm0, xmm7); 9470 9471 // 64b fold 9472 vpclmulqdq(xmm7, xmm7, xmm10, 0x0); 9473 vpsrldq(xmm0, xmm0, 0x8, Assembler::AVX_128bit); 9474 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 9475 9476 // 32b fold 9477 movdqu(xmm0, xmm7); 9478 vpslldq(xmm7, xmm7, 0x4, Assembler::AVX_128bit); 9479 vpclmulqdq(xmm7, xmm7, xmm10, 0x10); 9480 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 9481 jmp(L_barrett); 9482 9483 bind(L_less_than_256); 9484 kernel_crc32_avx512_256B(crc, buf, len, table, pos, tmp1, tmp2, L_barrett, L_16B_reduction_loop, L_get_last_two_xmms, L_128_done, L_cleanup); 9485 9486 //barrett reduction 9487 bind(L_barrett); 9488 vpand(xmm7, xmm7, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr() + 1 * 16), Assembler::AVX_128bit, tmp2); 9489 movdqu(xmm1, xmm7); 9490 movdqu(xmm2, xmm7); 9491 movdqu(xmm10, Address(table, 4 * 16)); 9492 9493 pclmulqdq(xmm7, xmm10, 0x0); 9494 pxor(xmm7, xmm2); 9495 vpand(xmm7, xmm7, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr()), Assembler::AVX_128bit, tmp2); 9496 movdqu(xmm2, xmm7); 9497 pclmulqdq(xmm7, xmm10, 0x10); 9498 pxor(xmm7, xmm2); 9499 pxor(xmm7, xmm1); 9500 pextrd(crc, xmm7, 2); 9501 9502 bind(L_cleanup); 9503 addptr(rsp, 16 * 2 + 8); 9504 pop(r12); 9505 } 9506 9507 // S. Gueron / Information Processing Letters 112 (2012) 184 9508 // Algorithm 4: Computing carry-less multiplication using a precomputed lookup table. 9509 // Input: A 32 bit value B = [byte3, byte2, byte1, byte0]. 9510 // Output: the 64-bit carry-less product of B * CONST 9511 void MacroAssembler::crc32c_ipl_alg4(Register in, uint32_t n, 9512 Register tmp1, Register tmp2, Register tmp3) { 9513 lea(tmp3, ExternalAddress(StubRoutines::crc32c_table_addr())); 9514 if (n > 0) { 9515 addq(tmp3, n * 256 * 8); 9516 } 9517 // Q1 = TABLEExt[n][B & 0xFF]; 9518 movl(tmp1, in); 9519 andl(tmp1, 0x000000FF); 9520 shll(tmp1, 3); 9521 addq(tmp1, tmp3); 9522 movq(tmp1, Address(tmp1, 0)); 9523 9524 // Q2 = TABLEExt[n][B >> 8 & 0xFF]; 9525 movl(tmp2, in); 9526 shrl(tmp2, 8); 9527 andl(tmp2, 0x000000FF); 9528 shll(tmp2, 3); 9529 addq(tmp2, tmp3); 9530 movq(tmp2, Address(tmp2, 0)); 9531 9532 shlq(tmp2, 8); 9533 xorq(tmp1, tmp2); 9534 9535 // Q3 = TABLEExt[n][B >> 16 & 0xFF]; 9536 movl(tmp2, in); 9537 shrl(tmp2, 16); 9538 andl(tmp2, 0x000000FF); 9539 shll(tmp2, 3); 9540 addq(tmp2, tmp3); 9541 movq(tmp2, Address(tmp2, 0)); 9542 9543 shlq(tmp2, 16); 9544 xorq(tmp1, tmp2); 9545 9546 // Q4 = TABLEExt[n][B >> 24 & 0xFF]; 9547 shrl(in, 24); 9548 andl(in, 0x000000FF); 9549 shll(in, 3); 9550 addq(in, tmp3); 9551 movq(in, Address(in, 0)); 9552 9553 shlq(in, 24); 9554 xorq(in, tmp1); 9555 // return Q1 ^ Q2 << 8 ^ Q3 << 16 ^ Q4 << 24; 9556 } 9557 9558 void MacroAssembler::crc32c_pclmulqdq(XMMRegister w_xtmp1, 9559 Register in_out, 9560 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, 9561 XMMRegister w_xtmp2, 9562 Register tmp1, 9563 Register n_tmp2, Register n_tmp3) { 9564 if (is_pclmulqdq_supported) { 9565 movdl(w_xtmp1, in_out); // modified blindly 9566 9567 movl(tmp1, const_or_pre_comp_const_index); 9568 movdl(w_xtmp2, tmp1); 9569 pclmulqdq(w_xtmp1, w_xtmp2, 0); 9570 9571 movdq(in_out, w_xtmp1); 9572 } else { 9573 crc32c_ipl_alg4(in_out, const_or_pre_comp_const_index, tmp1, n_tmp2, n_tmp3); 9574 } 9575 } 9576 9577 // Recombination Alternative 2: No bit-reflections 9578 // T1 = (CRC_A * U1) << 1 9579 // T2 = (CRC_B * U2) << 1 9580 // C1 = T1 >> 32 9581 // C2 = T2 >> 32 9582 // T1 = T1 & 0xFFFFFFFF 9583 // T2 = T2 & 0xFFFFFFFF 9584 // T1 = CRC32(0, T1) 9585 // T2 = CRC32(0, T2) 9586 // C1 = C1 ^ T1 9587 // C2 = C2 ^ T2 9588 // CRC = C1 ^ C2 ^ CRC_C 9589 void MacroAssembler::crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, 9590 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 9591 Register tmp1, Register tmp2, 9592 Register n_tmp3) { 9593 crc32c_pclmulqdq(w_xtmp1, in_out, const_or_pre_comp_const_index_u1, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 9594 crc32c_pclmulqdq(w_xtmp2, in1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 9595 shlq(in_out, 1); 9596 movl(tmp1, in_out); 9597 shrq(in_out, 32); 9598 xorl(tmp2, tmp2); 9599 crc32(tmp2, tmp1, 4); 9600 xorl(in_out, tmp2); // we don't care about upper 32 bit contents here 9601 shlq(in1, 1); 9602 movl(tmp1, in1); 9603 shrq(in1, 32); 9604 xorl(tmp2, tmp2); 9605 crc32(tmp2, tmp1, 4); 9606 xorl(in1, tmp2); 9607 xorl(in_out, in1); 9608 xorl(in_out, in2); 9609 } 9610 9611 // Set N to predefined value 9612 // Subtract from a length of a buffer 9613 // execute in a loop: 9614 // CRC_A = 0xFFFFFFFF, CRC_B = 0, CRC_C = 0 9615 // for i = 1 to N do 9616 // CRC_A = CRC32(CRC_A, A[i]) 9617 // CRC_B = CRC32(CRC_B, B[i]) 9618 // CRC_C = CRC32(CRC_C, C[i]) 9619 // end for 9620 // Recombine 9621 void MacroAssembler::crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, 9622 Register in_out1, Register in_out2, Register in_out3, 9623 Register tmp1, Register tmp2, Register tmp3, 9624 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 9625 Register tmp4, Register tmp5, 9626 Register n_tmp6) { 9627 Label L_processPartitions; 9628 Label L_processPartition; 9629 Label L_exit; 9630 9631 bind(L_processPartitions); 9632 cmpl(in_out1, 3 * size); 9633 jcc(Assembler::less, L_exit); 9634 xorl(tmp1, tmp1); 9635 xorl(tmp2, tmp2); 9636 movq(tmp3, in_out2); 9637 addq(tmp3, size); 9638 9639 bind(L_processPartition); 9640 crc32(in_out3, Address(in_out2, 0), 8); 9641 crc32(tmp1, Address(in_out2, size), 8); 9642 crc32(tmp2, Address(in_out2, size * 2), 8); 9643 addq(in_out2, 8); 9644 cmpq(in_out2, tmp3); 9645 jcc(Assembler::less, L_processPartition); 9646 crc32c_rec_alt2(const_or_pre_comp_const_index_u1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, in_out3, tmp1, tmp2, 9647 w_xtmp1, w_xtmp2, w_xtmp3, 9648 tmp4, tmp5, 9649 n_tmp6); 9650 addq(in_out2, 2 * size); 9651 subl(in_out1, 3 * size); 9652 jmp(L_processPartitions); 9653 9654 bind(L_exit); 9655 } 9656 #else 9657 void MacroAssembler::crc32c_ipl_alg4(Register in_out, uint32_t n, 9658 Register tmp1, Register tmp2, Register tmp3, 9659 XMMRegister xtmp1, XMMRegister xtmp2) { 9660 lea(tmp3, ExternalAddress(StubRoutines::crc32c_table_addr())); 9661 if (n > 0) { 9662 addl(tmp3, n * 256 * 8); 9663 } 9664 // Q1 = TABLEExt[n][B & 0xFF]; 9665 movl(tmp1, in_out); 9666 andl(tmp1, 0x000000FF); 9667 shll(tmp1, 3); 9668 addl(tmp1, tmp3); 9669 movq(xtmp1, Address(tmp1, 0)); 9670 9671 // Q2 = TABLEExt[n][B >> 8 & 0xFF]; 9672 movl(tmp2, in_out); 9673 shrl(tmp2, 8); 9674 andl(tmp2, 0x000000FF); 9675 shll(tmp2, 3); 9676 addl(tmp2, tmp3); 9677 movq(xtmp2, Address(tmp2, 0)); 9678 9679 psllq(xtmp2, 8); 9680 pxor(xtmp1, xtmp2); 9681 9682 // Q3 = TABLEExt[n][B >> 16 & 0xFF]; 9683 movl(tmp2, in_out); 9684 shrl(tmp2, 16); 9685 andl(tmp2, 0x000000FF); 9686 shll(tmp2, 3); 9687 addl(tmp2, tmp3); 9688 movq(xtmp2, Address(tmp2, 0)); 9689 9690 psllq(xtmp2, 16); 9691 pxor(xtmp1, xtmp2); 9692 9693 // Q4 = TABLEExt[n][B >> 24 & 0xFF]; 9694 shrl(in_out, 24); 9695 andl(in_out, 0x000000FF); 9696 shll(in_out, 3); 9697 addl(in_out, tmp3); 9698 movq(xtmp2, Address(in_out, 0)); 9699 9700 psllq(xtmp2, 24); 9701 pxor(xtmp1, xtmp2); // Result in CXMM 9702 // return Q1 ^ Q2 << 8 ^ Q3 << 16 ^ Q4 << 24; 9703 } 9704 9705 void MacroAssembler::crc32c_pclmulqdq(XMMRegister w_xtmp1, 9706 Register in_out, 9707 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, 9708 XMMRegister w_xtmp2, 9709 Register tmp1, 9710 Register n_tmp2, Register n_tmp3) { 9711 if (is_pclmulqdq_supported) { 9712 movdl(w_xtmp1, in_out); 9713 9714 movl(tmp1, const_or_pre_comp_const_index); 9715 movdl(w_xtmp2, tmp1); 9716 pclmulqdq(w_xtmp1, w_xtmp2, 0); 9717 // Keep result in XMM since GPR is 32 bit in length 9718 } else { 9719 crc32c_ipl_alg4(in_out, const_or_pre_comp_const_index, tmp1, n_tmp2, n_tmp3, w_xtmp1, w_xtmp2); 9720 } 9721 } 9722 9723 void MacroAssembler::crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, 9724 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 9725 Register tmp1, Register tmp2, 9726 Register n_tmp3) { 9727 crc32c_pclmulqdq(w_xtmp1, in_out, const_or_pre_comp_const_index_u1, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 9728 crc32c_pclmulqdq(w_xtmp2, in1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 9729 9730 psllq(w_xtmp1, 1); 9731 movdl(tmp1, w_xtmp1); 9732 psrlq(w_xtmp1, 32); 9733 movdl(in_out, w_xtmp1); 9734 9735 xorl(tmp2, tmp2); 9736 crc32(tmp2, tmp1, 4); 9737 xorl(in_out, tmp2); 9738 9739 psllq(w_xtmp2, 1); 9740 movdl(tmp1, w_xtmp2); 9741 psrlq(w_xtmp2, 32); 9742 movdl(in1, w_xtmp2); 9743 9744 xorl(tmp2, tmp2); 9745 crc32(tmp2, tmp1, 4); 9746 xorl(in1, tmp2); 9747 xorl(in_out, in1); 9748 xorl(in_out, in2); 9749 } 9750 9751 void MacroAssembler::crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, 9752 Register in_out1, Register in_out2, Register in_out3, 9753 Register tmp1, Register tmp2, Register tmp3, 9754 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 9755 Register tmp4, Register tmp5, 9756 Register n_tmp6) { 9757 Label L_processPartitions; 9758 Label L_processPartition; 9759 Label L_exit; 9760 9761 bind(L_processPartitions); 9762 cmpl(in_out1, 3 * size); 9763 jcc(Assembler::less, L_exit); 9764 xorl(tmp1, tmp1); 9765 xorl(tmp2, tmp2); 9766 movl(tmp3, in_out2); 9767 addl(tmp3, size); 9768 9769 bind(L_processPartition); 9770 crc32(in_out3, Address(in_out2, 0), 4); 9771 crc32(tmp1, Address(in_out2, size), 4); 9772 crc32(tmp2, Address(in_out2, size*2), 4); 9773 crc32(in_out3, Address(in_out2, 0+4), 4); 9774 crc32(tmp1, Address(in_out2, size+4), 4); 9775 crc32(tmp2, Address(in_out2, size*2+4), 4); 9776 addl(in_out2, 8); 9777 cmpl(in_out2, tmp3); 9778 jcc(Assembler::less, L_processPartition); 9779 9780 push(tmp3); 9781 push(in_out1); 9782 push(in_out2); 9783 tmp4 = tmp3; 9784 tmp5 = in_out1; 9785 n_tmp6 = in_out2; 9786 9787 crc32c_rec_alt2(const_or_pre_comp_const_index_u1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, in_out3, tmp1, tmp2, 9788 w_xtmp1, w_xtmp2, w_xtmp3, 9789 tmp4, tmp5, 9790 n_tmp6); 9791 9792 pop(in_out2); 9793 pop(in_out1); 9794 pop(tmp3); 9795 9796 addl(in_out2, 2 * size); 9797 subl(in_out1, 3 * size); 9798 jmp(L_processPartitions); 9799 9800 bind(L_exit); 9801 } 9802 #endif //LP64 9803 9804 #ifdef _LP64 9805 // Algorithm 2: Pipelined usage of the CRC32 instruction. 9806 // Input: A buffer I of L bytes. 9807 // Output: the CRC32C value of the buffer. 9808 // Notations: 9809 // Write L = 24N + r, with N = floor (L/24). 9810 // r = L mod 24 (0 <= r < 24). 9811 // Consider I as the concatenation of A|B|C|R, where A, B, C, each, 9812 // N quadwords, and R consists of r bytes. 9813 // A[j] = I [8j+7:8j], j= 0, 1, ..., N-1 9814 // B[j] = I [N + 8j+7:N + 8j], j= 0, 1, ..., N-1 9815 // C[j] = I [2N + 8j+7:2N + 8j], j= 0, 1, ..., N-1 9816 // if r > 0 R[j] = I [3N +j], j= 0, 1, ...,r-1 9817 void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, 9818 Register tmp1, Register tmp2, Register tmp3, 9819 Register tmp4, Register tmp5, Register tmp6, 9820 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 9821 bool is_pclmulqdq_supported) { 9822 uint32_t const_or_pre_comp_const_index[CRC32C_NUM_PRECOMPUTED_CONSTANTS]; 9823 Label L_wordByWord; 9824 Label L_byteByByteProlog; 9825 Label L_byteByByte; 9826 Label L_exit; 9827 9828 if (is_pclmulqdq_supported ) { 9829 const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::_crc32c_table_addr; 9830 const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::_crc32c_table_addr+1); 9831 9832 const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 2); 9833 const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 3); 9834 9835 const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 4); 9836 const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 5); 9837 assert((CRC32C_NUM_PRECOMPUTED_CONSTANTS - 1 ) == 5, "Checking whether you declared all of the constants based on the number of \"chunks\""); 9838 } else { 9839 const_or_pre_comp_const_index[0] = 1; 9840 const_or_pre_comp_const_index[1] = 0; 9841 9842 const_or_pre_comp_const_index[2] = 3; 9843 const_or_pre_comp_const_index[3] = 2; 9844 9845 const_or_pre_comp_const_index[4] = 5; 9846 const_or_pre_comp_const_index[5] = 4; 9847 } 9848 crc32c_proc_chunk(CRC32C_HIGH, const_or_pre_comp_const_index[0], const_or_pre_comp_const_index[1], is_pclmulqdq_supported, 9849 in2, in1, in_out, 9850 tmp1, tmp2, tmp3, 9851 w_xtmp1, w_xtmp2, w_xtmp3, 9852 tmp4, tmp5, 9853 tmp6); 9854 crc32c_proc_chunk(CRC32C_MIDDLE, const_or_pre_comp_const_index[2], const_or_pre_comp_const_index[3], is_pclmulqdq_supported, 9855 in2, in1, in_out, 9856 tmp1, tmp2, tmp3, 9857 w_xtmp1, w_xtmp2, w_xtmp3, 9858 tmp4, tmp5, 9859 tmp6); 9860 crc32c_proc_chunk(CRC32C_LOW, const_or_pre_comp_const_index[4], const_or_pre_comp_const_index[5], is_pclmulqdq_supported, 9861 in2, in1, in_out, 9862 tmp1, tmp2, tmp3, 9863 w_xtmp1, w_xtmp2, w_xtmp3, 9864 tmp4, tmp5, 9865 tmp6); 9866 movl(tmp1, in2); 9867 andl(tmp1, 0x00000007); 9868 negl(tmp1); 9869 addl(tmp1, in2); 9870 addq(tmp1, in1); 9871 9872 cmpq(in1, tmp1); 9873 jccb(Assembler::greaterEqual, L_byteByByteProlog); 9874 align(16); 9875 BIND(L_wordByWord); 9876 crc32(in_out, Address(in1, 0), 8); 9877 addq(in1, 8); 9878 cmpq(in1, tmp1); 9879 jcc(Assembler::less, L_wordByWord); 9880 9881 BIND(L_byteByByteProlog); 9882 andl(in2, 0x00000007); 9883 movl(tmp2, 1); 9884 9885 cmpl(tmp2, in2); 9886 jccb(Assembler::greater, L_exit); 9887 BIND(L_byteByByte); 9888 crc32(in_out, Address(in1, 0), 1); 9889 incq(in1); 9890 incl(tmp2); 9891 cmpl(tmp2, in2); 9892 jcc(Assembler::lessEqual, L_byteByByte); 9893 9894 BIND(L_exit); 9895 } 9896 #else 9897 void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, 9898 Register tmp1, Register tmp2, Register tmp3, 9899 Register tmp4, Register tmp5, Register tmp6, 9900 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 9901 bool is_pclmulqdq_supported) { 9902 uint32_t const_or_pre_comp_const_index[CRC32C_NUM_PRECOMPUTED_CONSTANTS]; 9903 Label L_wordByWord; 9904 Label L_byteByByteProlog; 9905 Label L_byteByByte; 9906 Label L_exit; 9907 9908 if (is_pclmulqdq_supported) { 9909 const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::_crc32c_table_addr; 9910 const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 1); 9911 9912 const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 2); 9913 const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 3); 9914 9915 const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 4); 9916 const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 5); 9917 } else { 9918 const_or_pre_comp_const_index[0] = 1; 9919 const_or_pre_comp_const_index[1] = 0; 9920 9921 const_or_pre_comp_const_index[2] = 3; 9922 const_or_pre_comp_const_index[3] = 2; 9923 9924 const_or_pre_comp_const_index[4] = 5; 9925 const_or_pre_comp_const_index[5] = 4; 9926 } 9927 crc32c_proc_chunk(CRC32C_HIGH, const_or_pre_comp_const_index[0], const_or_pre_comp_const_index[1], is_pclmulqdq_supported, 9928 in2, in1, in_out, 9929 tmp1, tmp2, tmp3, 9930 w_xtmp1, w_xtmp2, w_xtmp3, 9931 tmp4, tmp5, 9932 tmp6); 9933 crc32c_proc_chunk(CRC32C_MIDDLE, const_or_pre_comp_const_index[2], const_or_pre_comp_const_index[3], is_pclmulqdq_supported, 9934 in2, in1, in_out, 9935 tmp1, tmp2, tmp3, 9936 w_xtmp1, w_xtmp2, w_xtmp3, 9937 tmp4, tmp5, 9938 tmp6); 9939 crc32c_proc_chunk(CRC32C_LOW, const_or_pre_comp_const_index[4], const_or_pre_comp_const_index[5], is_pclmulqdq_supported, 9940 in2, in1, in_out, 9941 tmp1, tmp2, tmp3, 9942 w_xtmp1, w_xtmp2, w_xtmp3, 9943 tmp4, tmp5, 9944 tmp6); 9945 movl(tmp1, in2); 9946 andl(tmp1, 0x00000007); 9947 negl(tmp1); 9948 addl(tmp1, in2); 9949 addl(tmp1, in1); 9950 9951 BIND(L_wordByWord); 9952 cmpl(in1, tmp1); 9953 jcc(Assembler::greaterEqual, L_byteByByteProlog); 9954 crc32(in_out, Address(in1,0), 4); 9955 addl(in1, 4); 9956 jmp(L_wordByWord); 9957 9958 BIND(L_byteByByteProlog); 9959 andl(in2, 0x00000007); 9960 movl(tmp2, 1); 9961 9962 BIND(L_byteByByte); 9963 cmpl(tmp2, in2); 9964 jccb(Assembler::greater, L_exit); 9965 movb(tmp1, Address(in1, 0)); 9966 crc32(in_out, tmp1, 1); 9967 incl(in1); 9968 incl(tmp2); 9969 jmp(L_byteByByte); 9970 9971 BIND(L_exit); 9972 } 9973 #endif // LP64 9974 #undef BIND 9975 #undef BLOCK_COMMENT 9976 9977 // Compress char[] array to byte[]. 9978 // Intrinsic for java.lang.StringUTF16.compress(char[] src, int srcOff, byte[] dst, int dstOff, int len) 9979 // Return the array length if every element in array can be encoded, 9980 // otherwise, the index of first non-latin1 (> 0xff) character. 9981 // @IntrinsicCandidate 9982 // public static int compress(char[] src, int srcOff, byte[] dst, int dstOff, int len) { 9983 // for (int i = 0; i < len; i++) { 9984 // char c = src[srcOff]; 9985 // if (c > 0xff) { 9986 // return i; // return index of non-latin1 char 9987 // } 9988 // dst[dstOff] = (byte)c; 9989 // srcOff++; 9990 // dstOff++; 9991 // } 9992 // return len; 9993 // } 9994 void MacroAssembler::char_array_compress(Register src, Register dst, Register len, 9995 XMMRegister tmp1Reg, XMMRegister tmp2Reg, 9996 XMMRegister tmp3Reg, XMMRegister tmp4Reg, 9997 Register tmp5, Register result, KRegister mask1, KRegister mask2) { 9998 Label copy_chars_loop, done, reset_sp, copy_tail; 9999 10000 // rsi: src 10001 // rdi: dst 10002 // rdx: len 10003 // rcx: tmp5 10004 // rax: result 10005 10006 // rsi holds start addr of source char[] to be compressed 10007 // rdi holds start addr of destination byte[] 10008 // rdx holds length 10009 10010 assert(len != result, ""); 10011 10012 // save length for return 10013 movl(result, len); 10014 10015 if ((AVX3Threshold == 0) && (UseAVX > 2) && // AVX512 10016 VM_Version::supports_avx512vlbw() && 10017 VM_Version::supports_bmi2()) { 10018 10019 Label copy_32_loop, copy_loop_tail, below_threshold, reset_for_copy_tail; 10020 10021 // alignment 10022 Label post_alignment; 10023 10024 // if length of the string is less than 32, handle it the old fashioned way 10025 testl(len, -32); 10026 jcc(Assembler::zero, below_threshold); 10027 10028 // First check whether a character is compressible ( <= 0xFF). 10029 // Create mask to test for Unicode chars inside zmm vector 10030 movl(tmp5, 0x00FF); 10031 evpbroadcastw(tmp2Reg, tmp5, Assembler::AVX_512bit); 10032 10033 testl(len, -64); 10034 jccb(Assembler::zero, post_alignment); 10035 10036 movl(tmp5, dst); 10037 andl(tmp5, (32 - 1)); 10038 negl(tmp5); 10039 andl(tmp5, (32 - 1)); 10040 10041 // bail out when there is nothing to be done 10042 testl(tmp5, 0xFFFFFFFF); 10043 jccb(Assembler::zero, post_alignment); 10044 10045 // ~(~0 << len), where len is the # of remaining elements to process 10046 movl(len, 0xFFFFFFFF); 10047 shlxl(len, len, tmp5); 10048 notl(len); 10049 kmovdl(mask2, len); 10050 movl(len, result); 10051 10052 evmovdquw(tmp1Reg, mask2, Address(src, 0), /*merge*/ false, Assembler::AVX_512bit); 10053 evpcmpw(mask1, mask2, tmp1Reg, tmp2Reg, Assembler::le, /*signed*/ false, Assembler::AVX_512bit); 10054 ktestd(mask1, mask2); 10055 jcc(Assembler::carryClear, copy_tail); 10056 10057 evpmovwb(Address(dst, 0), mask2, tmp1Reg, Assembler::AVX_512bit); 10058 10059 addptr(src, tmp5); 10060 addptr(src, tmp5); 10061 addptr(dst, tmp5); 10062 subl(len, tmp5); 10063 10064 bind(post_alignment); 10065 // end of alignment 10066 10067 movl(tmp5, len); 10068 andl(tmp5, (32 - 1)); // tail count (in chars) 10069 andl(len, ~(32 - 1)); // vector count (in chars) 10070 jccb(Assembler::zero, copy_loop_tail); 10071 10072 lea(src, Address(src, len, Address::times_2)); 10073 lea(dst, Address(dst, len, Address::times_1)); 10074 negptr(len); 10075 10076 bind(copy_32_loop); 10077 evmovdquw(tmp1Reg, Address(src, len, Address::times_2), Assembler::AVX_512bit); 10078 evpcmpuw(mask1, tmp1Reg, tmp2Reg, Assembler::le, Assembler::AVX_512bit); 10079 kortestdl(mask1, mask1); 10080 jccb(Assembler::carryClear, reset_for_copy_tail); 10081 10082 // All elements in current processed chunk are valid candidates for 10083 // compression. Write a truncated byte elements to the memory. 10084 evpmovwb(Address(dst, len, Address::times_1), tmp1Reg, Assembler::AVX_512bit); 10085 addptr(len, 32); 10086 jccb(Assembler::notZero, copy_32_loop); 10087 10088 bind(copy_loop_tail); 10089 // bail out when there is nothing to be done 10090 testl(tmp5, 0xFFFFFFFF); 10091 jcc(Assembler::zero, done); 10092 10093 movl(len, tmp5); 10094 10095 // ~(~0 << len), where len is the # of remaining elements to process 10096 movl(tmp5, 0xFFFFFFFF); 10097 shlxl(tmp5, tmp5, len); 10098 notl(tmp5); 10099 10100 kmovdl(mask2, tmp5); 10101 10102 evmovdquw(tmp1Reg, mask2, Address(src, 0), /*merge*/ false, Assembler::AVX_512bit); 10103 evpcmpw(mask1, mask2, tmp1Reg, tmp2Reg, Assembler::le, /*signed*/ false, Assembler::AVX_512bit); 10104 ktestd(mask1, mask2); 10105 jcc(Assembler::carryClear, copy_tail); 10106 10107 evpmovwb(Address(dst, 0), mask2, tmp1Reg, Assembler::AVX_512bit); 10108 jmp(done); 10109 10110 bind(reset_for_copy_tail); 10111 lea(src, Address(src, tmp5, Address::times_2)); 10112 lea(dst, Address(dst, tmp5, Address::times_1)); 10113 subptr(len, tmp5); 10114 jmp(copy_chars_loop); 10115 10116 bind(below_threshold); 10117 } 10118 10119 if (UseSSE42Intrinsics) { 10120 Label copy_32_loop, copy_16, copy_tail_sse, reset_for_copy_tail; 10121 10122 // vectored compression 10123 testl(len, 0xfffffff8); 10124 jcc(Assembler::zero, copy_tail); 10125 10126 movl(tmp5, 0xff00ff00); // create mask to test for Unicode chars in vectors 10127 movdl(tmp1Reg, tmp5); 10128 pshufd(tmp1Reg, tmp1Reg, 0); // store Unicode mask in tmp1Reg 10129 10130 andl(len, 0xfffffff0); 10131 jccb(Assembler::zero, copy_16); 10132 10133 // compress 16 chars per iter 10134 pxor(tmp4Reg, tmp4Reg); 10135 10136 lea(src, Address(src, len, Address::times_2)); 10137 lea(dst, Address(dst, len, Address::times_1)); 10138 negptr(len); 10139 10140 bind(copy_32_loop); 10141 movdqu(tmp2Reg, Address(src, len, Address::times_2)); // load 1st 8 characters 10142 por(tmp4Reg, tmp2Reg); 10143 movdqu(tmp3Reg, Address(src, len, Address::times_2, 16)); // load next 8 characters 10144 por(tmp4Reg, tmp3Reg); 10145 ptest(tmp4Reg, tmp1Reg); // check for Unicode chars in next vector 10146 jccb(Assembler::notZero, reset_for_copy_tail); 10147 packuswb(tmp2Reg, tmp3Reg); // only ASCII chars; compress each to 1 byte 10148 movdqu(Address(dst, len, Address::times_1), tmp2Reg); 10149 addptr(len, 16); 10150 jccb(Assembler::notZero, copy_32_loop); 10151 10152 // compress next vector of 8 chars (if any) 10153 bind(copy_16); 10154 // len = 0 10155 testl(result, 0x00000008); // check if there's a block of 8 chars to compress 10156 jccb(Assembler::zero, copy_tail_sse); 10157 10158 pxor(tmp3Reg, tmp3Reg); 10159 10160 movdqu(tmp2Reg, Address(src, 0)); 10161 ptest(tmp2Reg, tmp1Reg); // check for Unicode chars in vector 10162 jccb(Assembler::notZero, reset_for_copy_tail); 10163 packuswb(tmp2Reg, tmp3Reg); // only LATIN1 chars; compress each to 1 byte 10164 movq(Address(dst, 0), tmp2Reg); 10165 addptr(src, 16); 10166 addptr(dst, 8); 10167 jmpb(copy_tail_sse); 10168 10169 bind(reset_for_copy_tail); 10170 movl(tmp5, result); 10171 andl(tmp5, 0x0000000f); 10172 lea(src, Address(src, tmp5, Address::times_2)); 10173 lea(dst, Address(dst, tmp5, Address::times_1)); 10174 subptr(len, tmp5); 10175 jmpb(copy_chars_loop); 10176 10177 bind(copy_tail_sse); 10178 movl(len, result); 10179 andl(len, 0x00000007); // tail count (in chars) 10180 } 10181 // compress 1 char per iter 10182 bind(copy_tail); 10183 testl(len, len); 10184 jccb(Assembler::zero, done); 10185 lea(src, Address(src, len, Address::times_2)); 10186 lea(dst, Address(dst, len, Address::times_1)); 10187 negptr(len); 10188 10189 bind(copy_chars_loop); 10190 load_unsigned_short(tmp5, Address(src, len, Address::times_2)); 10191 testl(tmp5, 0xff00); // check if Unicode char 10192 jccb(Assembler::notZero, reset_sp); 10193 movb(Address(dst, len, Address::times_1), tmp5); // ASCII char; compress to 1 byte 10194 increment(len); 10195 jccb(Assembler::notZero, copy_chars_loop); 10196 10197 // add len then return (len will be zero if compress succeeded, otherwise negative) 10198 bind(reset_sp); 10199 addl(result, len); 10200 10201 bind(done); 10202 } 10203 10204 // Inflate byte[] array to char[]. 10205 // ..\jdk\src\java.base\share\classes\java\lang\StringLatin1.java 10206 // @IntrinsicCandidate 10207 // private static void inflate(byte[] src, int srcOff, char[] dst, int dstOff, int len) { 10208 // for (int i = 0; i < len; i++) { 10209 // dst[dstOff++] = (char)(src[srcOff++] & 0xff); 10210 // } 10211 // } 10212 void MacroAssembler::byte_array_inflate(Register src, Register dst, Register len, 10213 XMMRegister tmp1, Register tmp2, KRegister mask) { 10214 Label copy_chars_loop, done, below_threshold, avx3_threshold; 10215 // rsi: src 10216 // rdi: dst 10217 // rdx: len 10218 // rcx: tmp2 10219 10220 // rsi holds start addr of source byte[] to be inflated 10221 // rdi holds start addr of destination char[] 10222 // rdx holds length 10223 assert_different_registers(src, dst, len, tmp2); 10224 movl(tmp2, len); 10225 if ((UseAVX > 2) && // AVX512 10226 VM_Version::supports_avx512vlbw() && 10227 VM_Version::supports_bmi2()) { 10228 10229 Label copy_32_loop, copy_tail; 10230 Register tmp3_aliased = len; 10231 10232 // if length of the string is less than 16, handle it in an old fashioned way 10233 testl(len, -16); 10234 jcc(Assembler::zero, below_threshold); 10235 10236 testl(len, -1 * AVX3Threshold); 10237 jcc(Assembler::zero, avx3_threshold); 10238 10239 // In order to use only one arithmetic operation for the main loop we use 10240 // this pre-calculation 10241 andl(tmp2, (32 - 1)); // tail count (in chars), 32 element wide loop 10242 andl(len, -32); // vector count 10243 jccb(Assembler::zero, copy_tail); 10244 10245 lea(src, Address(src, len, Address::times_1)); 10246 lea(dst, Address(dst, len, Address::times_2)); 10247 negptr(len); 10248 10249 10250 // inflate 32 chars per iter 10251 bind(copy_32_loop); 10252 vpmovzxbw(tmp1, Address(src, len, Address::times_1), Assembler::AVX_512bit); 10253 evmovdquw(Address(dst, len, Address::times_2), tmp1, Assembler::AVX_512bit); 10254 addptr(len, 32); 10255 jcc(Assembler::notZero, copy_32_loop); 10256 10257 bind(copy_tail); 10258 // bail out when there is nothing to be done 10259 testl(tmp2, -1); // we don't destroy the contents of tmp2 here 10260 jcc(Assembler::zero, done); 10261 10262 // ~(~0 << length), where length is the # of remaining elements to process 10263 movl(tmp3_aliased, -1); 10264 shlxl(tmp3_aliased, tmp3_aliased, tmp2); 10265 notl(tmp3_aliased); 10266 kmovdl(mask, tmp3_aliased); 10267 evpmovzxbw(tmp1, mask, Address(src, 0), Assembler::AVX_512bit); 10268 evmovdquw(Address(dst, 0), mask, tmp1, /*merge*/ true, Assembler::AVX_512bit); 10269 10270 jmp(done); 10271 bind(avx3_threshold); 10272 } 10273 if (UseSSE42Intrinsics) { 10274 Label copy_16_loop, copy_8_loop, copy_bytes, copy_new_tail, copy_tail; 10275 10276 if (UseAVX > 1) { 10277 andl(tmp2, (16 - 1)); 10278 andl(len, -16); 10279 jccb(Assembler::zero, copy_new_tail); 10280 } else { 10281 andl(tmp2, 0x00000007); // tail count (in chars) 10282 andl(len, 0xfffffff8); // vector count (in chars) 10283 jccb(Assembler::zero, copy_tail); 10284 } 10285 10286 // vectored inflation 10287 lea(src, Address(src, len, Address::times_1)); 10288 lea(dst, Address(dst, len, Address::times_2)); 10289 negptr(len); 10290 10291 if (UseAVX > 1) { 10292 bind(copy_16_loop); 10293 vpmovzxbw(tmp1, Address(src, len, Address::times_1), Assembler::AVX_256bit); 10294 vmovdqu(Address(dst, len, Address::times_2), tmp1); 10295 addptr(len, 16); 10296 jcc(Assembler::notZero, copy_16_loop); 10297 10298 bind(below_threshold); 10299 bind(copy_new_tail); 10300 movl(len, tmp2); 10301 andl(tmp2, 0x00000007); 10302 andl(len, 0xFFFFFFF8); 10303 jccb(Assembler::zero, copy_tail); 10304 10305 pmovzxbw(tmp1, Address(src, 0)); 10306 movdqu(Address(dst, 0), tmp1); 10307 addptr(src, 8); 10308 addptr(dst, 2 * 8); 10309 10310 jmp(copy_tail, true); 10311 } 10312 10313 // inflate 8 chars per iter 10314 bind(copy_8_loop); 10315 pmovzxbw(tmp1, Address(src, len, Address::times_1)); // unpack to 8 words 10316 movdqu(Address(dst, len, Address::times_2), tmp1); 10317 addptr(len, 8); 10318 jcc(Assembler::notZero, copy_8_loop); 10319 10320 bind(copy_tail); 10321 movl(len, tmp2); 10322 10323 cmpl(len, 4); 10324 jccb(Assembler::less, copy_bytes); 10325 10326 movdl(tmp1, Address(src, 0)); // load 4 byte chars 10327 pmovzxbw(tmp1, tmp1); 10328 movq(Address(dst, 0), tmp1); 10329 subptr(len, 4); 10330 addptr(src, 4); 10331 addptr(dst, 8); 10332 10333 bind(copy_bytes); 10334 } else { 10335 bind(below_threshold); 10336 } 10337 10338 testl(len, len); 10339 jccb(Assembler::zero, done); 10340 lea(src, Address(src, len, Address::times_1)); 10341 lea(dst, Address(dst, len, Address::times_2)); 10342 negptr(len); 10343 10344 // inflate 1 char per iter 10345 bind(copy_chars_loop); 10346 load_unsigned_byte(tmp2, Address(src, len, Address::times_1)); // load byte char 10347 movw(Address(dst, len, Address::times_2), tmp2); // inflate byte char to word 10348 increment(len); 10349 jcc(Assembler::notZero, copy_chars_loop); 10350 10351 bind(done); 10352 } 10353 10354 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, XMMRegister src, bool merge, int vector_len) { 10355 switch(type) { 10356 case T_BYTE: 10357 case T_BOOLEAN: 10358 evmovdqub(dst, kmask, src, merge, vector_len); 10359 break; 10360 case T_CHAR: 10361 case T_SHORT: 10362 evmovdquw(dst, kmask, src, merge, vector_len); 10363 break; 10364 case T_INT: 10365 case T_FLOAT: 10366 evmovdqul(dst, kmask, src, merge, vector_len); 10367 break; 10368 case T_LONG: 10369 case T_DOUBLE: 10370 evmovdquq(dst, kmask, src, merge, vector_len); 10371 break; 10372 default: 10373 fatal("Unexpected type argument %s", type2name(type)); 10374 break; 10375 } 10376 } 10377 10378 10379 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, bool merge, int vector_len) { 10380 switch(type) { 10381 case T_BYTE: 10382 case T_BOOLEAN: 10383 evmovdqub(dst, kmask, src, merge, vector_len); 10384 break; 10385 case T_CHAR: 10386 case T_SHORT: 10387 evmovdquw(dst, kmask, src, merge, vector_len); 10388 break; 10389 case T_INT: 10390 case T_FLOAT: 10391 evmovdqul(dst, kmask, src, merge, vector_len); 10392 break; 10393 case T_LONG: 10394 case T_DOUBLE: 10395 evmovdquq(dst, kmask, src, merge, vector_len); 10396 break; 10397 default: 10398 fatal("Unexpected type argument %s", type2name(type)); 10399 break; 10400 } 10401 } 10402 10403 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, bool merge, int vector_len) { 10404 switch(type) { 10405 case T_BYTE: 10406 case T_BOOLEAN: 10407 evmovdqub(dst, kmask, src, merge, vector_len); 10408 break; 10409 case T_CHAR: 10410 case T_SHORT: 10411 evmovdquw(dst, kmask, src, merge, vector_len); 10412 break; 10413 case T_INT: 10414 case T_FLOAT: 10415 evmovdqul(dst, kmask, src, merge, vector_len); 10416 break; 10417 case T_LONG: 10418 case T_DOUBLE: 10419 evmovdquq(dst, kmask, src, merge, vector_len); 10420 break; 10421 default: 10422 fatal("Unexpected type argument %s", type2name(type)); 10423 break; 10424 } 10425 } 10426 10427 void MacroAssembler::knot(uint masklen, KRegister dst, KRegister src, KRegister ktmp, Register rtmp) { 10428 switch(masklen) { 10429 case 2: 10430 knotbl(dst, src); 10431 movl(rtmp, 3); 10432 kmovbl(ktmp, rtmp); 10433 kandbl(dst, ktmp, dst); 10434 break; 10435 case 4: 10436 knotbl(dst, src); 10437 movl(rtmp, 15); 10438 kmovbl(ktmp, rtmp); 10439 kandbl(dst, ktmp, dst); 10440 break; 10441 case 8: 10442 knotbl(dst, src); 10443 break; 10444 case 16: 10445 knotwl(dst, src); 10446 break; 10447 case 32: 10448 knotdl(dst, src); 10449 break; 10450 case 64: 10451 knotql(dst, src); 10452 break; 10453 default: 10454 fatal("Unexpected vector length %d", masklen); 10455 break; 10456 } 10457 } 10458 10459 void MacroAssembler::kand(BasicType type, KRegister dst, KRegister src1, KRegister src2) { 10460 switch(type) { 10461 case T_BOOLEAN: 10462 case T_BYTE: 10463 kandbl(dst, src1, src2); 10464 break; 10465 case T_CHAR: 10466 case T_SHORT: 10467 kandwl(dst, src1, src2); 10468 break; 10469 case T_INT: 10470 case T_FLOAT: 10471 kanddl(dst, src1, src2); 10472 break; 10473 case T_LONG: 10474 case T_DOUBLE: 10475 kandql(dst, src1, src2); 10476 break; 10477 default: 10478 fatal("Unexpected type argument %s", type2name(type)); 10479 break; 10480 } 10481 } 10482 10483 void MacroAssembler::kor(BasicType type, KRegister dst, KRegister src1, KRegister src2) { 10484 switch(type) { 10485 case T_BOOLEAN: 10486 case T_BYTE: 10487 korbl(dst, src1, src2); 10488 break; 10489 case T_CHAR: 10490 case T_SHORT: 10491 korwl(dst, src1, src2); 10492 break; 10493 case T_INT: 10494 case T_FLOAT: 10495 kordl(dst, src1, src2); 10496 break; 10497 case T_LONG: 10498 case T_DOUBLE: 10499 korql(dst, src1, src2); 10500 break; 10501 default: 10502 fatal("Unexpected type argument %s", type2name(type)); 10503 break; 10504 } 10505 } 10506 10507 void MacroAssembler::kxor(BasicType type, KRegister dst, KRegister src1, KRegister src2) { 10508 switch(type) { 10509 case T_BOOLEAN: 10510 case T_BYTE: 10511 kxorbl(dst, src1, src2); 10512 break; 10513 case T_CHAR: 10514 case T_SHORT: 10515 kxorwl(dst, src1, src2); 10516 break; 10517 case T_INT: 10518 case T_FLOAT: 10519 kxordl(dst, src1, src2); 10520 break; 10521 case T_LONG: 10522 case T_DOUBLE: 10523 kxorql(dst, src1, src2); 10524 break; 10525 default: 10526 fatal("Unexpected type argument %s", type2name(type)); 10527 break; 10528 } 10529 } 10530 10531 void MacroAssembler::evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 10532 switch(type) { 10533 case T_BOOLEAN: 10534 case T_BYTE: 10535 evpermb(dst, mask, nds, src, merge, vector_len); break; 10536 case T_CHAR: 10537 case T_SHORT: 10538 evpermw(dst, mask, nds, src, merge, vector_len); break; 10539 case T_INT: 10540 case T_FLOAT: 10541 evpermd(dst, mask, nds, src, merge, vector_len); break; 10542 case T_LONG: 10543 case T_DOUBLE: 10544 evpermq(dst, mask, nds, src, merge, vector_len); break; 10545 default: 10546 fatal("Unexpected type argument %s", type2name(type)); break; 10547 } 10548 } 10549 10550 void MacroAssembler::evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 10551 switch(type) { 10552 case T_BOOLEAN: 10553 case T_BYTE: 10554 evpermb(dst, mask, nds, src, merge, vector_len); break; 10555 case T_CHAR: 10556 case T_SHORT: 10557 evpermw(dst, mask, nds, src, merge, vector_len); break; 10558 case T_INT: 10559 case T_FLOAT: 10560 evpermd(dst, mask, nds, src, merge, vector_len); break; 10561 case T_LONG: 10562 case T_DOUBLE: 10563 evpermq(dst, mask, nds, src, merge, vector_len); break; 10564 default: 10565 fatal("Unexpected type argument %s", type2name(type)); break; 10566 } 10567 } 10568 10569 void MacroAssembler::evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 10570 switch(type) { 10571 case T_BYTE: 10572 evpminub(dst, mask, nds, src, merge, vector_len); break; 10573 case T_SHORT: 10574 evpminuw(dst, mask, nds, src, merge, vector_len); break; 10575 case T_INT: 10576 evpminud(dst, mask, nds, src, merge, vector_len); break; 10577 case T_LONG: 10578 evpminuq(dst, mask, nds, src, merge, vector_len); break; 10579 default: 10580 fatal("Unexpected type argument %s", type2name(type)); break; 10581 } 10582 } 10583 10584 void MacroAssembler::evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 10585 switch(type) { 10586 case T_BYTE: 10587 evpmaxub(dst, mask, nds, src, merge, vector_len); break; 10588 case T_SHORT: 10589 evpmaxuw(dst, mask, nds, src, merge, vector_len); break; 10590 case T_INT: 10591 evpmaxud(dst, mask, nds, src, merge, vector_len); break; 10592 case T_LONG: 10593 evpmaxuq(dst, mask, nds, src, merge, vector_len); break; 10594 default: 10595 fatal("Unexpected type argument %s", type2name(type)); break; 10596 } 10597 } 10598 10599 void MacroAssembler::evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 10600 switch(type) { 10601 case T_BYTE: 10602 evpminub(dst, mask, nds, src, merge, vector_len); break; 10603 case T_SHORT: 10604 evpminuw(dst, mask, nds, src, merge, vector_len); break; 10605 case T_INT: 10606 evpminud(dst, mask, nds, src, merge, vector_len); break; 10607 case T_LONG: 10608 evpminuq(dst, mask, nds, src, merge, vector_len); break; 10609 default: 10610 fatal("Unexpected type argument %s", type2name(type)); break; 10611 } 10612 } 10613 10614 void MacroAssembler::evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 10615 switch(type) { 10616 case T_BYTE: 10617 evpmaxub(dst, mask, nds, src, merge, vector_len); break; 10618 case T_SHORT: 10619 evpmaxuw(dst, mask, nds, src, merge, vector_len); break; 10620 case T_INT: 10621 evpmaxud(dst, mask, nds, src, merge, vector_len); break; 10622 case T_LONG: 10623 evpmaxuq(dst, mask, nds, src, merge, vector_len); break; 10624 default: 10625 fatal("Unexpected type argument %s", type2name(type)); break; 10626 } 10627 } 10628 10629 void MacroAssembler::evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 10630 switch(type) { 10631 case T_BYTE: 10632 evpminsb(dst, mask, nds, src, merge, vector_len); break; 10633 case T_SHORT: 10634 evpminsw(dst, mask, nds, src, merge, vector_len); break; 10635 case T_INT: 10636 evpminsd(dst, mask, nds, src, merge, vector_len); break; 10637 case T_LONG: 10638 evpminsq(dst, mask, nds, src, merge, vector_len); break; 10639 default: 10640 fatal("Unexpected type argument %s", type2name(type)); break; 10641 } 10642 } 10643 10644 void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 10645 switch(type) { 10646 case T_BYTE: 10647 evpmaxsb(dst, mask, nds, src, merge, vector_len); break; 10648 case T_SHORT: 10649 evpmaxsw(dst, mask, nds, src, merge, vector_len); break; 10650 case T_INT: 10651 evpmaxsd(dst, mask, nds, src, merge, vector_len); break; 10652 case T_LONG: 10653 evpmaxsq(dst, mask, nds, src, merge, vector_len); break; 10654 default: 10655 fatal("Unexpected type argument %s", type2name(type)); break; 10656 } 10657 } 10658 10659 void MacroAssembler::evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 10660 switch(type) { 10661 case T_BYTE: 10662 evpminsb(dst, mask, nds, src, merge, vector_len); break; 10663 case T_SHORT: 10664 evpminsw(dst, mask, nds, src, merge, vector_len); break; 10665 case T_INT: 10666 evpminsd(dst, mask, nds, src, merge, vector_len); break; 10667 case T_LONG: 10668 evpminsq(dst, mask, nds, src, merge, vector_len); break; 10669 default: 10670 fatal("Unexpected type argument %s", type2name(type)); break; 10671 } 10672 } 10673 10674 void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 10675 switch(type) { 10676 case T_BYTE: 10677 evpmaxsb(dst, mask, nds, src, merge, vector_len); break; 10678 case T_SHORT: 10679 evpmaxsw(dst, mask, nds, src, merge, vector_len); break; 10680 case T_INT: 10681 evpmaxsd(dst, mask, nds, src, merge, vector_len); break; 10682 case T_LONG: 10683 evpmaxsq(dst, mask, nds, src, merge, vector_len); break; 10684 default: 10685 fatal("Unexpected type argument %s", type2name(type)); break; 10686 } 10687 } 10688 10689 void MacroAssembler::evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 10690 switch(type) { 10691 case T_INT: 10692 evpxord(dst, mask, nds, src, merge, vector_len); break; 10693 case T_LONG: 10694 evpxorq(dst, mask, nds, src, merge, vector_len); break; 10695 default: 10696 fatal("Unexpected type argument %s", type2name(type)); break; 10697 } 10698 } 10699 10700 void MacroAssembler::evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 10701 switch(type) { 10702 case T_INT: 10703 evpxord(dst, mask, nds, src, merge, vector_len); break; 10704 case T_LONG: 10705 evpxorq(dst, mask, nds, src, merge, vector_len); break; 10706 default: 10707 fatal("Unexpected type argument %s", type2name(type)); break; 10708 } 10709 } 10710 10711 void MacroAssembler::evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 10712 switch(type) { 10713 case T_INT: 10714 Assembler::evpord(dst, mask, nds, src, merge, vector_len); break; 10715 case T_LONG: 10716 evporq(dst, mask, nds, src, merge, vector_len); break; 10717 default: 10718 fatal("Unexpected type argument %s", type2name(type)); break; 10719 } 10720 } 10721 10722 void MacroAssembler::evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 10723 switch(type) { 10724 case T_INT: 10725 Assembler::evpord(dst, mask, nds, src, merge, vector_len); break; 10726 case T_LONG: 10727 evporq(dst, mask, nds, src, merge, vector_len); break; 10728 default: 10729 fatal("Unexpected type argument %s", type2name(type)); break; 10730 } 10731 } 10732 10733 void MacroAssembler::evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 10734 switch(type) { 10735 case T_INT: 10736 evpandd(dst, mask, nds, src, merge, vector_len); break; 10737 case T_LONG: 10738 evpandq(dst, mask, nds, src, merge, vector_len); break; 10739 default: 10740 fatal("Unexpected type argument %s", type2name(type)); break; 10741 } 10742 } 10743 10744 void MacroAssembler::evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 10745 switch(type) { 10746 case T_INT: 10747 evpandd(dst, mask, nds, src, merge, vector_len); break; 10748 case T_LONG: 10749 evpandq(dst, mask, nds, src, merge, vector_len); break; 10750 default: 10751 fatal("Unexpected type argument %s", type2name(type)); break; 10752 } 10753 } 10754 10755 void MacroAssembler::kortest(uint masklen, KRegister src1, KRegister src2) { 10756 switch(masklen) { 10757 case 8: 10758 kortestbl(src1, src2); 10759 break; 10760 case 16: 10761 kortestwl(src1, src2); 10762 break; 10763 case 32: 10764 kortestdl(src1, src2); 10765 break; 10766 case 64: 10767 kortestql(src1, src2); 10768 break; 10769 default: 10770 fatal("Unexpected mask length %d", masklen); 10771 break; 10772 } 10773 } 10774 10775 10776 void MacroAssembler::ktest(uint masklen, KRegister src1, KRegister src2) { 10777 switch(masklen) { 10778 case 8: 10779 ktestbl(src1, src2); 10780 break; 10781 case 16: 10782 ktestwl(src1, src2); 10783 break; 10784 case 32: 10785 ktestdl(src1, src2); 10786 break; 10787 case 64: 10788 ktestql(src1, src2); 10789 break; 10790 default: 10791 fatal("Unexpected mask length %d", masklen); 10792 break; 10793 } 10794 } 10795 10796 void MacroAssembler::evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc) { 10797 switch(type) { 10798 case T_INT: 10799 evprold(dst, mask, src, shift, merge, vlen_enc); break; 10800 case T_LONG: 10801 evprolq(dst, mask, src, shift, merge, vlen_enc); break; 10802 default: 10803 fatal("Unexpected type argument %s", type2name(type)); break; 10804 break; 10805 } 10806 } 10807 10808 void MacroAssembler::evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc) { 10809 switch(type) { 10810 case T_INT: 10811 evprord(dst, mask, src, shift, merge, vlen_enc); break; 10812 case T_LONG: 10813 evprorq(dst, mask, src, shift, merge, vlen_enc); break; 10814 default: 10815 fatal("Unexpected type argument %s", type2name(type)); break; 10816 } 10817 } 10818 10819 void MacroAssembler::evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc) { 10820 switch(type) { 10821 case T_INT: 10822 evprolvd(dst, mask, src1, src2, merge, vlen_enc); break; 10823 case T_LONG: 10824 evprolvq(dst, mask, src1, src2, merge, vlen_enc); break; 10825 default: 10826 fatal("Unexpected type argument %s", type2name(type)); break; 10827 } 10828 } 10829 10830 void MacroAssembler::evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc) { 10831 switch(type) { 10832 case T_INT: 10833 evprorvd(dst, mask, src1, src2, merge, vlen_enc); break; 10834 case T_LONG: 10835 evprorvq(dst, mask, src1, src2, merge, vlen_enc); break; 10836 default: 10837 fatal("Unexpected type argument %s", type2name(type)); break; 10838 } 10839 } 10840 10841 void MacroAssembler::evpandq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 10842 assert(rscratch != noreg || always_reachable(src), "missing"); 10843 10844 if (reachable(src)) { 10845 evpandq(dst, nds, as_Address(src), vector_len); 10846 } else { 10847 lea(rscratch, src); 10848 evpandq(dst, nds, Address(rscratch, 0), vector_len); 10849 } 10850 } 10851 10852 void MacroAssembler::evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch) { 10853 assert(rscratch != noreg || always_reachable(src), "missing"); 10854 10855 if (reachable(src)) { 10856 Assembler::evpaddq(dst, mask, nds, as_Address(src), merge, vector_len); 10857 } else { 10858 lea(rscratch, src); 10859 Assembler::evpaddq(dst, mask, nds, Address(rscratch, 0), merge, vector_len); 10860 } 10861 } 10862 10863 void MacroAssembler::evporq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 10864 assert(rscratch != noreg || always_reachable(src), "missing"); 10865 10866 if (reachable(src)) { 10867 evporq(dst, nds, as_Address(src), vector_len); 10868 } else { 10869 lea(rscratch, src); 10870 evporq(dst, nds, Address(rscratch, 0), vector_len); 10871 } 10872 } 10873 10874 void MacroAssembler::vpshufb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 10875 assert(rscratch != noreg || always_reachable(src), "missing"); 10876 10877 if (reachable(src)) { 10878 vpshufb(dst, nds, as_Address(src), vector_len); 10879 } else { 10880 lea(rscratch, src); 10881 vpshufb(dst, nds, Address(rscratch, 0), vector_len); 10882 } 10883 } 10884 10885 void MacroAssembler::vpor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 10886 assert(rscratch != noreg || always_reachable(src), "missing"); 10887 10888 if (reachable(src)) { 10889 Assembler::vpor(dst, nds, as_Address(src), vector_len); 10890 } else { 10891 lea(rscratch, src); 10892 Assembler::vpor(dst, nds, Address(rscratch, 0), vector_len); 10893 } 10894 } 10895 10896 void MacroAssembler::vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, AddressLiteral src3, int vector_len, Register rscratch) { 10897 assert(rscratch != noreg || always_reachable(src3), "missing"); 10898 10899 if (reachable(src3)) { 10900 vpternlogq(dst, imm8, src2, as_Address(src3), vector_len); 10901 } else { 10902 lea(rscratch, src3); 10903 vpternlogq(dst, imm8, src2, Address(rscratch, 0), vector_len); 10904 } 10905 } 10906 10907 #if COMPILER2_OR_JVMCI 10908 10909 void MacroAssembler::fill_masked(BasicType bt, Address dst, XMMRegister xmm, KRegister mask, 10910 Register length, Register temp, int vec_enc) { 10911 // Computing mask for predicated vector store. 10912 movptr(temp, -1); 10913 bzhiq(temp, temp, length); 10914 kmov(mask, temp); 10915 evmovdqu(bt, mask, dst, xmm, true, vec_enc); 10916 } 10917 10918 // Set memory operation for length "less than" 64 bytes. 10919 void MacroAssembler::fill64_masked(uint shift, Register dst, int disp, 10920 XMMRegister xmm, KRegister mask, Register length, 10921 Register temp, bool use64byteVector) { 10922 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 10923 const BasicType type[] = { T_BYTE, T_SHORT, T_INT, T_LONG}; 10924 if (!use64byteVector) { 10925 fill32(dst, disp, xmm); 10926 subptr(length, 32 >> shift); 10927 fill32_masked(shift, dst, disp + 32, xmm, mask, length, temp); 10928 } else { 10929 assert(MaxVectorSize == 64, "vector length != 64"); 10930 fill_masked(type[shift], Address(dst, disp), xmm, mask, length, temp, Assembler::AVX_512bit); 10931 } 10932 } 10933 10934 10935 void MacroAssembler::fill32_masked(uint shift, Register dst, int disp, 10936 XMMRegister xmm, KRegister mask, Register length, 10937 Register temp) { 10938 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 10939 const BasicType type[] = { T_BYTE, T_SHORT, T_INT, T_LONG}; 10940 fill_masked(type[shift], Address(dst, disp), xmm, mask, length, temp, Assembler::AVX_256bit); 10941 } 10942 10943 10944 void MacroAssembler::fill32(Address dst, XMMRegister xmm) { 10945 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 10946 vmovdqu(dst, xmm); 10947 } 10948 10949 void MacroAssembler::fill32(Register dst, int disp, XMMRegister xmm) { 10950 fill32(Address(dst, disp), xmm); 10951 } 10952 10953 void MacroAssembler::fill64(Address dst, XMMRegister xmm, bool use64byteVector) { 10954 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 10955 if (!use64byteVector) { 10956 fill32(dst, xmm); 10957 fill32(dst.plus_disp(32), xmm); 10958 } else { 10959 evmovdquq(dst, xmm, Assembler::AVX_512bit); 10960 } 10961 } 10962 10963 void MacroAssembler::fill64(Register dst, int disp, XMMRegister xmm, bool use64byteVector) { 10964 fill64(Address(dst, disp), xmm, use64byteVector); 10965 } 10966 10967 #ifdef _LP64 10968 void MacroAssembler::generate_fill_avx3(BasicType type, Register to, Register value, 10969 Register count, Register rtmp, XMMRegister xtmp) { 10970 Label L_exit; 10971 Label L_fill_start; 10972 Label L_fill_64_bytes; 10973 Label L_fill_96_bytes; 10974 Label L_fill_128_bytes; 10975 Label L_fill_128_bytes_loop; 10976 Label L_fill_128_loop_header; 10977 Label L_fill_128_bytes_loop_header; 10978 Label L_fill_128_bytes_loop_pre_header; 10979 Label L_fill_zmm_sequence; 10980 10981 int shift = -1; 10982 int avx3threshold = VM_Version::avx3_threshold(); 10983 switch(type) { 10984 case T_BYTE: shift = 0; 10985 break; 10986 case T_SHORT: shift = 1; 10987 break; 10988 case T_INT: shift = 2; 10989 break; 10990 /* Uncomment when LONG fill stubs are supported. 10991 case T_LONG: shift = 3; 10992 break; 10993 */ 10994 default: 10995 fatal("Unhandled type: %s\n", type2name(type)); 10996 } 10997 10998 if ((avx3threshold != 0) || (MaxVectorSize == 32)) { 10999 11000 if (MaxVectorSize == 64) { 11001 cmpq(count, avx3threshold >> shift); 11002 jcc(Assembler::greater, L_fill_zmm_sequence); 11003 } 11004 11005 evpbroadcast(type, xtmp, value, Assembler::AVX_256bit); 11006 11007 bind(L_fill_start); 11008 11009 cmpq(count, 32 >> shift); 11010 jccb(Assembler::greater, L_fill_64_bytes); 11011 fill32_masked(shift, to, 0, xtmp, k2, count, rtmp); 11012 jmp(L_exit); 11013 11014 bind(L_fill_64_bytes); 11015 cmpq(count, 64 >> shift); 11016 jccb(Assembler::greater, L_fill_96_bytes); 11017 fill64_masked(shift, to, 0, xtmp, k2, count, rtmp); 11018 jmp(L_exit); 11019 11020 bind(L_fill_96_bytes); 11021 cmpq(count, 96 >> shift); 11022 jccb(Assembler::greater, L_fill_128_bytes); 11023 fill64(to, 0, xtmp); 11024 subq(count, 64 >> shift); 11025 fill32_masked(shift, to, 64, xtmp, k2, count, rtmp); 11026 jmp(L_exit); 11027 11028 bind(L_fill_128_bytes); 11029 cmpq(count, 128 >> shift); 11030 jccb(Assembler::greater, L_fill_128_bytes_loop_pre_header); 11031 fill64(to, 0, xtmp); 11032 fill32(to, 64, xtmp); 11033 subq(count, 96 >> shift); 11034 fill32_masked(shift, to, 96, xtmp, k2, count, rtmp); 11035 jmp(L_exit); 11036 11037 bind(L_fill_128_bytes_loop_pre_header); 11038 { 11039 mov(rtmp, to); 11040 andq(rtmp, 31); 11041 jccb(Assembler::zero, L_fill_128_bytes_loop_header); 11042 negq(rtmp); 11043 addq(rtmp, 32); 11044 mov64(r8, -1L); 11045 bzhiq(r8, r8, rtmp); 11046 kmovql(k2, r8); 11047 evmovdqu(T_BYTE, k2, Address(to, 0), xtmp, true, Assembler::AVX_256bit); 11048 addq(to, rtmp); 11049 shrq(rtmp, shift); 11050 subq(count, rtmp); 11051 } 11052 11053 cmpq(count, 128 >> shift); 11054 jcc(Assembler::less, L_fill_start); 11055 11056 bind(L_fill_128_bytes_loop_header); 11057 subq(count, 128 >> shift); 11058 11059 align32(); 11060 bind(L_fill_128_bytes_loop); 11061 fill64(to, 0, xtmp); 11062 fill64(to, 64, xtmp); 11063 addq(to, 128); 11064 subq(count, 128 >> shift); 11065 jccb(Assembler::greaterEqual, L_fill_128_bytes_loop); 11066 11067 addq(count, 128 >> shift); 11068 jcc(Assembler::zero, L_exit); 11069 jmp(L_fill_start); 11070 } 11071 11072 if (MaxVectorSize == 64) { 11073 // Sequence using 64 byte ZMM register. 11074 Label L_fill_128_bytes_zmm; 11075 Label L_fill_192_bytes_zmm; 11076 Label L_fill_192_bytes_loop_zmm; 11077 Label L_fill_192_bytes_loop_header_zmm; 11078 Label L_fill_192_bytes_loop_pre_header_zmm; 11079 Label L_fill_start_zmm_sequence; 11080 11081 bind(L_fill_zmm_sequence); 11082 evpbroadcast(type, xtmp, value, Assembler::AVX_512bit); 11083 11084 bind(L_fill_start_zmm_sequence); 11085 cmpq(count, 64 >> shift); 11086 jccb(Assembler::greater, L_fill_128_bytes_zmm); 11087 fill64_masked(shift, to, 0, xtmp, k2, count, rtmp, true); 11088 jmp(L_exit); 11089 11090 bind(L_fill_128_bytes_zmm); 11091 cmpq(count, 128 >> shift); 11092 jccb(Assembler::greater, L_fill_192_bytes_zmm); 11093 fill64(to, 0, xtmp, true); 11094 subq(count, 64 >> shift); 11095 fill64_masked(shift, to, 64, xtmp, k2, count, rtmp, true); 11096 jmp(L_exit); 11097 11098 bind(L_fill_192_bytes_zmm); 11099 cmpq(count, 192 >> shift); 11100 jccb(Assembler::greater, L_fill_192_bytes_loop_pre_header_zmm); 11101 fill64(to, 0, xtmp, true); 11102 fill64(to, 64, xtmp, true); 11103 subq(count, 128 >> shift); 11104 fill64_masked(shift, to, 128, xtmp, k2, count, rtmp, true); 11105 jmp(L_exit); 11106 11107 bind(L_fill_192_bytes_loop_pre_header_zmm); 11108 { 11109 movq(rtmp, to); 11110 andq(rtmp, 63); 11111 jccb(Assembler::zero, L_fill_192_bytes_loop_header_zmm); 11112 negq(rtmp); 11113 addq(rtmp, 64); 11114 mov64(r8, -1L); 11115 bzhiq(r8, r8, rtmp); 11116 kmovql(k2, r8); 11117 evmovdqu(T_BYTE, k2, Address(to, 0), xtmp, true, Assembler::AVX_512bit); 11118 addq(to, rtmp); 11119 shrq(rtmp, shift); 11120 subq(count, rtmp); 11121 } 11122 11123 cmpq(count, 192 >> shift); 11124 jcc(Assembler::less, L_fill_start_zmm_sequence); 11125 11126 bind(L_fill_192_bytes_loop_header_zmm); 11127 subq(count, 192 >> shift); 11128 11129 align32(); 11130 bind(L_fill_192_bytes_loop_zmm); 11131 fill64(to, 0, xtmp, true); 11132 fill64(to, 64, xtmp, true); 11133 fill64(to, 128, xtmp, true); 11134 addq(to, 192); 11135 subq(count, 192 >> shift); 11136 jccb(Assembler::greaterEqual, L_fill_192_bytes_loop_zmm); 11137 11138 addq(count, 192 >> shift); 11139 jcc(Assembler::zero, L_exit); 11140 jmp(L_fill_start_zmm_sequence); 11141 } 11142 bind(L_exit); 11143 } 11144 #endif 11145 #endif //COMPILER2_OR_JVMCI 11146 11147 11148 #ifdef _LP64 11149 void MacroAssembler::convert_f2i(Register dst, XMMRegister src) { 11150 Label done; 11151 cvttss2sil(dst, src); 11152 // Conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub 11153 cmpl(dst, 0x80000000); // float_sign_flip 11154 jccb(Assembler::notEqual, done); 11155 subptr(rsp, 8); 11156 movflt(Address(rsp, 0), src); 11157 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::f2i_fixup()))); 11158 pop(dst); 11159 bind(done); 11160 } 11161 11162 void MacroAssembler::convert_d2i(Register dst, XMMRegister src) { 11163 Label done; 11164 cvttsd2sil(dst, src); 11165 // Conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub 11166 cmpl(dst, 0x80000000); // float_sign_flip 11167 jccb(Assembler::notEqual, done); 11168 subptr(rsp, 8); 11169 movdbl(Address(rsp, 0), src); 11170 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::d2i_fixup()))); 11171 pop(dst); 11172 bind(done); 11173 } 11174 11175 void MacroAssembler::convert_f2l(Register dst, XMMRegister src) { 11176 Label done; 11177 cvttss2siq(dst, src); 11178 cmp64(dst, ExternalAddress((address) StubRoutines::x86::double_sign_flip())); 11179 jccb(Assembler::notEqual, done); 11180 subptr(rsp, 8); 11181 movflt(Address(rsp, 0), src); 11182 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::f2l_fixup()))); 11183 pop(dst); 11184 bind(done); 11185 } 11186 11187 void MacroAssembler::round_float(Register dst, XMMRegister src, Register rtmp, Register rcx) { 11188 // Following code is line by line assembly translation rounding algorithm. 11189 // Please refer to java.lang.Math.round(float) algorithm for details. 11190 const int32_t FloatConsts_EXP_BIT_MASK = 0x7F800000; 11191 const int32_t FloatConsts_SIGNIFICAND_WIDTH = 24; 11192 const int32_t FloatConsts_EXP_BIAS = 127; 11193 const int32_t FloatConsts_SIGNIF_BIT_MASK = 0x007FFFFF; 11194 const int32_t MINUS_32 = 0xFFFFFFE0; 11195 Label L_special_case, L_block1, L_exit; 11196 movl(rtmp, FloatConsts_EXP_BIT_MASK); 11197 movdl(dst, src); 11198 andl(dst, rtmp); 11199 sarl(dst, FloatConsts_SIGNIFICAND_WIDTH - 1); 11200 movl(rtmp, FloatConsts_SIGNIFICAND_WIDTH - 2 + FloatConsts_EXP_BIAS); 11201 subl(rtmp, dst); 11202 movl(rcx, rtmp); 11203 movl(dst, MINUS_32); 11204 testl(rtmp, dst); 11205 jccb(Assembler::notEqual, L_special_case); 11206 movdl(dst, src); 11207 andl(dst, FloatConsts_SIGNIF_BIT_MASK); 11208 orl(dst, FloatConsts_SIGNIF_BIT_MASK + 1); 11209 movdl(rtmp, src); 11210 testl(rtmp, rtmp); 11211 jccb(Assembler::greaterEqual, L_block1); 11212 negl(dst); 11213 bind(L_block1); 11214 sarl(dst); 11215 addl(dst, 0x1); 11216 sarl(dst, 0x1); 11217 jmp(L_exit); 11218 bind(L_special_case); 11219 convert_f2i(dst, src); 11220 bind(L_exit); 11221 } 11222 11223 void MacroAssembler::round_double(Register dst, XMMRegister src, Register rtmp, Register rcx) { 11224 // Following code is line by line assembly translation rounding algorithm. 11225 // Please refer to java.lang.Math.round(double) algorithm for details. 11226 const int64_t DoubleConsts_EXP_BIT_MASK = 0x7FF0000000000000L; 11227 const int64_t DoubleConsts_SIGNIFICAND_WIDTH = 53; 11228 const int64_t DoubleConsts_EXP_BIAS = 1023; 11229 const int64_t DoubleConsts_SIGNIF_BIT_MASK = 0x000FFFFFFFFFFFFFL; 11230 const int64_t MINUS_64 = 0xFFFFFFFFFFFFFFC0L; 11231 Label L_special_case, L_block1, L_exit; 11232 mov64(rtmp, DoubleConsts_EXP_BIT_MASK); 11233 movq(dst, src); 11234 andq(dst, rtmp); 11235 sarq(dst, DoubleConsts_SIGNIFICAND_WIDTH - 1); 11236 mov64(rtmp, DoubleConsts_SIGNIFICAND_WIDTH - 2 + DoubleConsts_EXP_BIAS); 11237 subq(rtmp, dst); 11238 movq(rcx, rtmp); 11239 mov64(dst, MINUS_64); 11240 testq(rtmp, dst); 11241 jccb(Assembler::notEqual, L_special_case); 11242 movq(dst, src); 11243 mov64(rtmp, DoubleConsts_SIGNIF_BIT_MASK); 11244 andq(dst, rtmp); 11245 mov64(rtmp, DoubleConsts_SIGNIF_BIT_MASK + 1); 11246 orq(dst, rtmp); 11247 movq(rtmp, src); 11248 testq(rtmp, rtmp); 11249 jccb(Assembler::greaterEqual, L_block1); 11250 negq(dst); 11251 bind(L_block1); 11252 sarq(dst); 11253 addq(dst, 0x1); 11254 sarq(dst, 0x1); 11255 jmp(L_exit); 11256 bind(L_special_case); 11257 convert_d2l(dst, src); 11258 bind(L_exit); 11259 } 11260 11261 void MacroAssembler::convert_d2l(Register dst, XMMRegister src) { 11262 Label done; 11263 cvttsd2siq(dst, src); 11264 cmp64(dst, ExternalAddress((address) StubRoutines::x86::double_sign_flip())); 11265 jccb(Assembler::notEqual, done); 11266 subptr(rsp, 8); 11267 movdbl(Address(rsp, 0), src); 11268 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::d2l_fixup()))); 11269 pop(dst); 11270 bind(done); 11271 } 11272 11273 void MacroAssembler::cache_wb(Address line) 11274 { 11275 // 64 bit cpus always support clflush 11276 assert(VM_Version::supports_clflush(), "clflush should be available"); 11277 bool optimized = VM_Version::supports_clflushopt(); 11278 bool no_evict = VM_Version::supports_clwb(); 11279 11280 // prefer clwb (writeback without evict) otherwise 11281 // prefer clflushopt (potentially parallel writeback with evict) 11282 // otherwise fallback on clflush (serial writeback with evict) 11283 11284 if (optimized) { 11285 if (no_evict) { 11286 clwb(line); 11287 } else { 11288 clflushopt(line); 11289 } 11290 } else { 11291 // no need for fence when using CLFLUSH 11292 clflush(line); 11293 } 11294 } 11295 11296 void MacroAssembler::cache_wbsync(bool is_pre) 11297 { 11298 assert(VM_Version::supports_clflush(), "clflush should be available"); 11299 bool optimized = VM_Version::supports_clflushopt(); 11300 bool no_evict = VM_Version::supports_clwb(); 11301 11302 // pick the correct implementation 11303 11304 if (!is_pre && (optimized || no_evict)) { 11305 // need an sfence for post flush when using clflushopt or clwb 11306 // otherwise no no need for any synchroniaztion 11307 11308 sfence(); 11309 } 11310 } 11311 11312 #endif // _LP64 11313 11314 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) { 11315 switch (cond) { 11316 // Note some conditions are synonyms for others 11317 case Assembler::zero: return Assembler::notZero; 11318 case Assembler::notZero: return Assembler::zero; 11319 case Assembler::less: return Assembler::greaterEqual; 11320 case Assembler::lessEqual: return Assembler::greater; 11321 case Assembler::greater: return Assembler::lessEqual; 11322 case Assembler::greaterEqual: return Assembler::less; 11323 case Assembler::below: return Assembler::aboveEqual; 11324 case Assembler::belowEqual: return Assembler::above; 11325 case Assembler::above: return Assembler::belowEqual; 11326 case Assembler::aboveEqual: return Assembler::below; 11327 case Assembler::overflow: return Assembler::noOverflow; 11328 case Assembler::noOverflow: return Assembler::overflow; 11329 case Assembler::negative: return Assembler::positive; 11330 case Assembler::positive: return Assembler::negative; 11331 case Assembler::parity: return Assembler::noParity; 11332 case Assembler::noParity: return Assembler::parity; 11333 } 11334 ShouldNotReachHere(); return Assembler::overflow; 11335 } 11336 11337 // 32-bit Windows has its own fast-path implementation 11338 // of get_thread 11339 #if !defined(WIN32) || defined(_LP64) 11340 11341 // This is simply a call to Thread::current() 11342 void MacroAssembler::get_thread(Register thread) { 11343 if (thread != rax) { 11344 push(rax); 11345 } 11346 LP64_ONLY(push(rdi);) 11347 LP64_ONLY(push(rsi);) 11348 push(rdx); 11349 push(rcx); 11350 #ifdef _LP64 11351 push(r8); 11352 push(r9); 11353 push(r10); 11354 push(r11); 11355 #endif 11356 11357 MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, Thread::current), 0); 11358 11359 #ifdef _LP64 11360 pop(r11); 11361 pop(r10); 11362 pop(r9); 11363 pop(r8); 11364 #endif 11365 pop(rcx); 11366 pop(rdx); 11367 LP64_ONLY(pop(rsi);) 11368 LP64_ONLY(pop(rdi);) 11369 if (thread != rax) { 11370 mov(thread, rax); 11371 pop(rax); 11372 } 11373 } 11374 11375 11376 #endif // !WIN32 || _LP64 11377 11378 void MacroAssembler::check_stack_alignment(Register sp, const char* msg, unsigned bias, Register tmp) { 11379 Label L_stack_ok; 11380 if (bias == 0) { 11381 testptr(sp, 2 * wordSize - 1); 11382 } else { 11383 // lea(tmp, Address(rsp, bias); 11384 mov(tmp, sp); 11385 addptr(tmp, bias); 11386 testptr(tmp, 2 * wordSize - 1); 11387 } 11388 jcc(Assembler::equal, L_stack_ok); 11389 block_comment(msg); 11390 stop(msg); 11391 bind(L_stack_ok); 11392 } 11393 11394 // Implements lightweight-locking. 11395 // 11396 // obj: the object to be locked 11397 // reg_rax: rax 11398 // thread: the thread which attempts to lock obj 11399 // tmp: a temporary register 11400 void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Register reg_rax, Register thread, Register tmp, Label& slow) { 11401 assert(reg_rax == rax, ""); 11402 assert_different_registers(basic_lock, obj, reg_rax, thread, tmp); 11403 11404 Label push; 11405 const Register top = tmp; 11406 11407 // Preload the markWord. It is important that this is the first 11408 // instruction emitted as it is part of C1's null check semantics. 11409 movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes())); 11410 11411 if (UseObjectMonitorTable) { 11412 // Clear cache in case fast locking succeeds. 11413 movptr(Address(basic_lock, BasicObjectLock::lock_offset() + in_ByteSize((BasicLock::object_monitor_cache_offset_in_bytes()))), 0); 11414 } 11415 11416 // Load top. 11417 movl(top, Address(thread, JavaThread::lock_stack_top_offset())); 11418 11419 // Check if the lock-stack is full. 11420 cmpl(top, LockStack::end_offset()); 11421 jcc(Assembler::greaterEqual, slow); 11422 11423 // Check for recursion. 11424 cmpptr(obj, Address(thread, top, Address::times_1, -oopSize)); 11425 jcc(Assembler::equal, push); 11426 11427 // Check header for monitor (0b10). 11428 testptr(reg_rax, markWord::monitor_value); 11429 jcc(Assembler::notZero, slow); 11430 11431 // Try to lock. Transition lock bits 0b01 => 0b00 11432 movptr(tmp, reg_rax); 11433 andptr(tmp, ~(int32_t)markWord::unlocked_value); 11434 orptr(reg_rax, markWord::unlocked_value); 11435 if (EnableValhalla) { 11436 // Mask inline_type bit such that we go to the slow path if object is an inline type 11437 andptr(reg_rax, ~((int) markWord::inline_type_bit_in_place)); 11438 } 11439 lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes())); 11440 jcc(Assembler::notEqual, slow); 11441 11442 // Restore top, CAS clobbers register. 11443 movl(top, Address(thread, JavaThread::lock_stack_top_offset())); 11444 11445 bind(push); 11446 // After successful lock, push object on lock-stack. 11447 movptr(Address(thread, top), obj); 11448 incrementl(top, oopSize); 11449 movl(Address(thread, JavaThread::lock_stack_top_offset()), top); 11450 } 11451 11452 // Implements lightweight-unlocking. 11453 // 11454 // obj: the object to be unlocked 11455 // reg_rax: rax 11456 // thread: the thread 11457 // tmp: a temporary register 11458 void MacroAssembler::lightweight_unlock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow) { 11459 assert(reg_rax == rax, ""); 11460 assert_different_registers(obj, reg_rax, thread, tmp); 11461 11462 Label unlocked, push_and_slow; 11463 const Register top = tmp; 11464 11465 // Check if obj is top of lock-stack. 11466 movl(top, Address(thread, JavaThread::lock_stack_top_offset())); 11467 cmpptr(obj, Address(thread, top, Address::times_1, -oopSize)); 11468 jcc(Assembler::notEqual, slow); 11469 11470 // Pop lock-stack. 11471 DEBUG_ONLY(movptr(Address(thread, top, Address::times_1, -oopSize), 0);) 11472 subl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize); 11473 11474 // Check if recursive. 11475 cmpptr(obj, Address(thread, top, Address::times_1, -2 * oopSize)); 11476 jcc(Assembler::equal, unlocked); 11477 11478 // Not recursive. Check header for monitor (0b10). 11479 movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes())); 11480 testptr(reg_rax, markWord::monitor_value); 11481 jcc(Assembler::notZero, push_and_slow); 11482 11483 #ifdef ASSERT 11484 // Check header not unlocked (0b01). 11485 Label not_unlocked; 11486 testptr(reg_rax, markWord::unlocked_value); 11487 jcc(Assembler::zero, not_unlocked); 11488 stop("lightweight_unlock already unlocked"); 11489 bind(not_unlocked); 11490 #endif 11491 11492 // Try to unlock. Transition lock bits 0b00 => 0b01 11493 movptr(tmp, reg_rax); 11494 orptr(tmp, markWord::unlocked_value); 11495 lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes())); 11496 jcc(Assembler::equal, unlocked); 11497 11498 bind(push_and_slow); 11499 // Restore lock-stack and handle the unlock in runtime. 11500 #ifdef ASSERT 11501 movl(top, Address(thread, JavaThread::lock_stack_top_offset())); 11502 movptr(Address(thread, top), obj); 11503 #endif 11504 addl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize); 11505 jmp(slow); 11506 11507 bind(unlocked); 11508 } 11509 11510 #ifdef _LP64 11511 // Saves legacy GPRs state on stack. 11512 void MacroAssembler::save_legacy_gprs() { 11513 subq(rsp, 16 * wordSize); 11514 movq(Address(rsp, 15 * wordSize), rax); 11515 movq(Address(rsp, 14 * wordSize), rcx); 11516 movq(Address(rsp, 13 * wordSize), rdx); 11517 movq(Address(rsp, 12 * wordSize), rbx); 11518 movq(Address(rsp, 10 * wordSize), rbp); 11519 movq(Address(rsp, 9 * wordSize), rsi); 11520 movq(Address(rsp, 8 * wordSize), rdi); 11521 movq(Address(rsp, 7 * wordSize), r8); 11522 movq(Address(rsp, 6 * wordSize), r9); 11523 movq(Address(rsp, 5 * wordSize), r10); 11524 movq(Address(rsp, 4 * wordSize), r11); 11525 movq(Address(rsp, 3 * wordSize), r12); 11526 movq(Address(rsp, 2 * wordSize), r13); 11527 movq(Address(rsp, wordSize), r14); 11528 movq(Address(rsp, 0), r15); 11529 } 11530 11531 // Resotres back legacy GPRs state from stack. 11532 void MacroAssembler::restore_legacy_gprs() { 11533 movq(r15, Address(rsp, 0)); 11534 movq(r14, Address(rsp, wordSize)); 11535 movq(r13, Address(rsp, 2 * wordSize)); 11536 movq(r12, Address(rsp, 3 * wordSize)); 11537 movq(r11, Address(rsp, 4 * wordSize)); 11538 movq(r10, Address(rsp, 5 * wordSize)); 11539 movq(r9, Address(rsp, 6 * wordSize)); 11540 movq(r8, Address(rsp, 7 * wordSize)); 11541 movq(rdi, Address(rsp, 8 * wordSize)); 11542 movq(rsi, Address(rsp, 9 * wordSize)); 11543 movq(rbp, Address(rsp, 10 * wordSize)); 11544 movq(rbx, Address(rsp, 12 * wordSize)); 11545 movq(rdx, Address(rsp, 13 * wordSize)); 11546 movq(rcx, Address(rsp, 14 * wordSize)); 11547 movq(rax, Address(rsp, 15 * wordSize)); 11548 addq(rsp, 16 * wordSize); 11549 } 11550 11551 void MacroAssembler::setcc(Assembler::Condition comparison, Register dst) { 11552 if (VM_Version::supports_apx_f()) { 11553 esetzucc(comparison, dst); 11554 } else { 11555 setb(comparison, dst); 11556 movzbl(dst, dst); 11557 } 11558 } 11559 #endif