1 /* 2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "asm/assembler.inline.hpp" 28 #include "code/compiledIC.hpp" 29 #include "compiler/compiler_globals.hpp" 30 #include "compiler/disassembler.hpp" 31 #include "ci/ciInlineKlass.hpp" 32 #include "crc32c.h" 33 #include "gc/shared/barrierSet.hpp" 34 #include "gc/shared/barrierSetAssembler.hpp" 35 #include "gc/shared/collectedHeap.inline.hpp" 36 #include "gc/shared/tlab_globals.hpp" 37 #include "interpreter/bytecodeHistogram.hpp" 38 #include "interpreter/interpreter.hpp" 39 #include "jvm.h" 40 #include "memory/resourceArea.hpp" 41 #include "memory/universe.hpp" 42 #include "oops/accessDecorators.hpp" 43 #include "oops/compressedKlass.inline.hpp" 44 #include "oops/compressedOops.inline.hpp" 45 #include "oops/klass.inline.hpp" 46 #include "oops/resolvedFieldEntry.hpp" 47 #include "prims/methodHandles.hpp" 48 #include "runtime/continuation.hpp" 49 #include "runtime/interfaceSupport.inline.hpp" 50 #include "runtime/javaThread.hpp" 51 #include "runtime/jniHandles.hpp" 52 #include "runtime/objectMonitor.hpp" 53 #include "runtime/os.hpp" 54 #include "runtime/safepoint.hpp" 55 #include "runtime/safepointMechanism.hpp" 56 #include "runtime/sharedRuntime.hpp" 57 #include "runtime/signature_cc.hpp" 58 #include "runtime/stubRoutines.hpp" 59 #include "utilities/checkedCast.hpp" 60 #include "utilities/macros.hpp" 61 #include "vmreg_x86.inline.hpp" 62 #ifdef COMPILER2 63 #include "opto/output.hpp" 64 #endif 65 66 #ifdef PRODUCT 67 #define BLOCK_COMMENT(str) /* nothing */ 68 #define STOP(error) stop(error) 69 #else 70 #define BLOCK_COMMENT(str) block_comment(str) 71 #define STOP(error) block_comment(error); stop(error) 72 #endif 73 74 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 75 76 #ifdef ASSERT 77 bool AbstractAssembler::pd_check_instruction_mark() { return true; } 78 #endif 79 80 static const Assembler::Condition reverse[] = { 81 Assembler::noOverflow /* overflow = 0x0 */ , 82 Assembler::overflow /* noOverflow = 0x1 */ , 83 Assembler::aboveEqual /* carrySet = 0x2, below = 0x2 */ , 84 Assembler::below /* aboveEqual = 0x3, carryClear = 0x3 */ , 85 Assembler::notZero /* zero = 0x4, equal = 0x4 */ , 86 Assembler::zero /* notZero = 0x5, notEqual = 0x5 */ , 87 Assembler::above /* belowEqual = 0x6 */ , 88 Assembler::belowEqual /* above = 0x7 */ , 89 Assembler::positive /* negative = 0x8 */ , 90 Assembler::negative /* positive = 0x9 */ , 91 Assembler::noParity /* parity = 0xa */ , 92 Assembler::parity /* noParity = 0xb */ , 93 Assembler::greaterEqual /* less = 0xc */ , 94 Assembler::less /* greaterEqual = 0xd */ , 95 Assembler::greater /* lessEqual = 0xe */ , 96 Assembler::lessEqual /* greater = 0xf, */ 97 98 }; 99 100 101 // Implementation of MacroAssembler 102 103 // First all the versions that have distinct versions depending on 32/64 bit 104 // Unless the difference is trivial (1 line or so). 105 106 #ifndef _LP64 107 108 // 32bit versions 109 110 Address MacroAssembler::as_Address(AddressLiteral adr) { 111 return Address(adr.target(), adr.rspec()); 112 } 113 114 Address MacroAssembler::as_Address(ArrayAddress adr, Register rscratch) { 115 assert(rscratch == noreg, ""); 116 return Address::make_array(adr); 117 } 118 119 void MacroAssembler::call_VM_leaf_base(address entry_point, 120 int number_of_arguments) { 121 call(RuntimeAddress(entry_point)); 122 increment(rsp, number_of_arguments * wordSize); 123 } 124 125 void MacroAssembler::cmpklass(Address src1, Metadata* obj) { 126 cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate()); 127 } 128 129 130 void MacroAssembler::cmpklass(Register src1, Metadata* obj) { 131 cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate()); 132 } 133 134 void MacroAssembler::cmpoop(Address src1, jobject obj) { 135 cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate()); 136 } 137 138 void MacroAssembler::cmpoop(Register src1, jobject obj, Register rscratch) { 139 assert(rscratch == noreg, "redundant"); 140 cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate()); 141 } 142 143 void MacroAssembler::extend_sign(Register hi, Register lo) { 144 // According to Intel Doc. AP-526, "Integer Divide", p.18. 145 if (VM_Version::is_P6() && hi == rdx && lo == rax) { 146 cdql(); 147 } else { 148 movl(hi, lo); 149 sarl(hi, 31); 150 } 151 } 152 153 void MacroAssembler::jC2(Register tmp, Label& L) { 154 // set parity bit if FPU flag C2 is set (via rax) 155 save_rax(tmp); 156 fwait(); fnstsw_ax(); 157 sahf(); 158 restore_rax(tmp); 159 // branch 160 jcc(Assembler::parity, L); 161 } 162 163 void MacroAssembler::jnC2(Register tmp, Label& L) { 164 // set parity bit if FPU flag C2 is set (via rax) 165 save_rax(tmp); 166 fwait(); fnstsw_ax(); 167 sahf(); 168 restore_rax(tmp); 169 // branch 170 jcc(Assembler::noParity, L); 171 } 172 173 // 32bit can do a case table jump in one instruction but we no longer allow the base 174 // to be installed in the Address class 175 void MacroAssembler::jump(ArrayAddress entry, Register rscratch) { 176 assert(rscratch == noreg, "not needed"); 177 jmp(as_Address(entry, noreg)); 178 } 179 180 // Note: y_lo will be destroyed 181 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) { 182 // Long compare for Java (semantics as described in JVM spec.) 183 Label high, low, done; 184 185 cmpl(x_hi, y_hi); 186 jcc(Assembler::less, low); 187 jcc(Assembler::greater, high); 188 // x_hi is the return register 189 xorl(x_hi, x_hi); 190 cmpl(x_lo, y_lo); 191 jcc(Assembler::below, low); 192 jcc(Assembler::equal, done); 193 194 bind(high); 195 xorl(x_hi, x_hi); 196 increment(x_hi); 197 jmp(done); 198 199 bind(low); 200 xorl(x_hi, x_hi); 201 decrementl(x_hi); 202 203 bind(done); 204 } 205 206 void MacroAssembler::lea(Register dst, AddressLiteral src) { 207 mov_literal32(dst, (int32_t)src.target(), src.rspec()); 208 } 209 210 void MacroAssembler::lea(Address dst, AddressLiteral adr, Register rscratch) { 211 assert(rscratch == noreg, "not needed"); 212 213 // leal(dst, as_Address(adr)); 214 // see note in movl as to why we must use a move 215 mov_literal32(dst, (int32_t)adr.target(), adr.rspec()); 216 } 217 218 void MacroAssembler::leave() { 219 mov(rsp, rbp); 220 pop(rbp); 221 } 222 223 void MacroAssembler::lmul(int x_rsp_offset, int y_rsp_offset) { 224 // Multiplication of two Java long values stored on the stack 225 // as illustrated below. Result is in rdx:rax. 226 // 227 // rsp ---> [ ?? ] \ \ 228 // .... | y_rsp_offset | 229 // [ y_lo ] / (in bytes) | x_rsp_offset 230 // [ y_hi ] | (in bytes) 231 // .... | 232 // [ x_lo ] / 233 // [ x_hi ] 234 // .... 235 // 236 // Basic idea: lo(result) = lo(x_lo * y_lo) 237 // hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) + lo(x_lo * y_hi) 238 Address x_hi(rsp, x_rsp_offset + wordSize); Address x_lo(rsp, x_rsp_offset); 239 Address y_hi(rsp, y_rsp_offset + wordSize); Address y_lo(rsp, y_rsp_offset); 240 Label quick; 241 // load x_hi, y_hi and check if quick 242 // multiplication is possible 243 movl(rbx, x_hi); 244 movl(rcx, y_hi); 245 movl(rax, rbx); 246 orl(rbx, rcx); // rbx, = 0 <=> x_hi = 0 and y_hi = 0 247 jcc(Assembler::zero, quick); // if rbx, = 0 do quick multiply 248 // do full multiplication 249 // 1st step 250 mull(y_lo); // x_hi * y_lo 251 movl(rbx, rax); // save lo(x_hi * y_lo) in rbx, 252 // 2nd step 253 movl(rax, x_lo); 254 mull(rcx); // x_lo * y_hi 255 addl(rbx, rax); // add lo(x_lo * y_hi) to rbx, 256 // 3rd step 257 bind(quick); // note: rbx, = 0 if quick multiply! 258 movl(rax, x_lo); 259 mull(y_lo); // x_lo * y_lo 260 addl(rdx, rbx); // correct hi(x_lo * y_lo) 261 } 262 263 void MacroAssembler::lneg(Register hi, Register lo) { 264 negl(lo); 265 adcl(hi, 0); 266 negl(hi); 267 } 268 269 void MacroAssembler::lshl(Register hi, Register lo) { 270 // Java shift left long support (semantics as described in JVM spec., p.305) 271 // (basic idea for shift counts s >= n: x << s == (x << n) << (s - n)) 272 // shift value is in rcx ! 273 assert(hi != rcx, "must not use rcx"); 274 assert(lo != rcx, "must not use rcx"); 275 const Register s = rcx; // shift count 276 const int n = BitsPerWord; 277 Label L; 278 andl(s, 0x3f); // s := s & 0x3f (s < 0x40) 279 cmpl(s, n); // if (s < n) 280 jcc(Assembler::less, L); // else (s >= n) 281 movl(hi, lo); // x := x << n 282 xorl(lo, lo); 283 // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n! 284 bind(L); // s (mod n) < n 285 shldl(hi, lo); // x := x << s 286 shll(lo); 287 } 288 289 290 void MacroAssembler::lshr(Register hi, Register lo, bool sign_extension) { 291 // Java shift right long support (semantics as described in JVM spec., p.306 & p.310) 292 // (basic idea for shift counts s >= n: x >> s == (x >> n) >> (s - n)) 293 assert(hi != rcx, "must not use rcx"); 294 assert(lo != rcx, "must not use rcx"); 295 const Register s = rcx; // shift count 296 const int n = BitsPerWord; 297 Label L; 298 andl(s, 0x3f); // s := s & 0x3f (s < 0x40) 299 cmpl(s, n); // if (s < n) 300 jcc(Assembler::less, L); // else (s >= n) 301 movl(lo, hi); // x := x >> n 302 if (sign_extension) sarl(hi, 31); 303 else xorl(hi, hi); 304 // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n! 305 bind(L); // s (mod n) < n 306 shrdl(lo, hi); // x := x >> s 307 if (sign_extension) sarl(hi); 308 else shrl(hi); 309 } 310 311 void MacroAssembler::movoop(Register dst, jobject obj) { 312 mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate()); 313 } 314 315 void MacroAssembler::movoop(Address dst, jobject obj, Register rscratch) { 316 assert(rscratch == noreg, "redundant"); 317 mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate()); 318 } 319 320 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { 321 mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate()); 322 } 323 324 void MacroAssembler::mov_metadata(Address dst, Metadata* obj, Register rscratch) { 325 assert(rscratch == noreg, "redundant"); 326 mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate()); 327 } 328 329 void MacroAssembler::movptr(Register dst, AddressLiteral src) { 330 if (src.is_lval()) { 331 mov_literal32(dst, (intptr_t)src.target(), src.rspec()); 332 } else { 333 movl(dst, as_Address(src)); 334 } 335 } 336 337 void MacroAssembler::movptr(ArrayAddress dst, Register src, Register rscratch) { 338 assert(rscratch == noreg, "redundant"); 339 movl(as_Address(dst, noreg), src); 340 } 341 342 void MacroAssembler::movptr(Register dst, ArrayAddress src) { 343 movl(dst, as_Address(src, noreg)); 344 } 345 346 void MacroAssembler::movptr(Address dst, intptr_t src, Register rscratch) { 347 assert(rscratch == noreg, "redundant"); 348 movl(dst, src); 349 } 350 351 void MacroAssembler::pushoop(jobject obj, Register rscratch) { 352 assert(rscratch == noreg, "redundant"); 353 push_literal32((int32_t)obj, oop_Relocation::spec_for_immediate()); 354 } 355 356 void MacroAssembler::pushklass(Metadata* obj, Register rscratch) { 357 assert(rscratch == noreg, "redundant"); 358 push_literal32((int32_t)obj, metadata_Relocation::spec_for_immediate()); 359 } 360 361 void MacroAssembler::pushptr(AddressLiteral src, Register rscratch) { 362 assert(rscratch == noreg, "redundant"); 363 if (src.is_lval()) { 364 push_literal32((int32_t)src.target(), src.rspec()); 365 } else { 366 pushl(as_Address(src)); 367 } 368 } 369 370 static void pass_arg0(MacroAssembler* masm, Register arg) { 371 masm->push(arg); 372 } 373 374 static void pass_arg1(MacroAssembler* masm, Register arg) { 375 masm->push(arg); 376 } 377 378 static void pass_arg2(MacroAssembler* masm, Register arg) { 379 masm->push(arg); 380 } 381 382 static void pass_arg3(MacroAssembler* masm, Register arg) { 383 masm->push(arg); 384 } 385 386 #ifndef PRODUCT 387 extern "C" void findpc(intptr_t x); 388 #endif 389 390 void MacroAssembler::debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg) { 391 // In order to get locks to work, we need to fake a in_VM state 392 JavaThread* thread = JavaThread::current(); 393 JavaThreadState saved_state = thread->thread_state(); 394 thread->set_thread_state(_thread_in_vm); 395 if (ShowMessageBoxOnError) { 396 JavaThread* thread = JavaThread::current(); 397 JavaThreadState saved_state = thread->thread_state(); 398 thread->set_thread_state(_thread_in_vm); 399 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 400 ttyLocker ttyl; 401 BytecodeCounter::print(); 402 } 403 // To see where a verify_oop failed, get $ebx+40/X for this frame. 404 // This is the value of eip which points to where verify_oop will return. 405 if (os::message_box(msg, "Execution stopped, print registers?")) { 406 print_state32(rdi, rsi, rbp, rsp, rbx, rdx, rcx, rax, eip); 407 BREAKPOINT; 408 } 409 } 410 fatal("DEBUG MESSAGE: %s", msg); 411 } 412 413 void MacroAssembler::print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip) { 414 ttyLocker ttyl; 415 DebuggingContext debugging{}; 416 tty->print_cr("eip = 0x%08x", eip); 417 #ifndef PRODUCT 418 if ((WizardMode || Verbose) && PrintMiscellaneous) { 419 tty->cr(); 420 findpc(eip); 421 tty->cr(); 422 } 423 #endif 424 #define PRINT_REG(rax) \ 425 { tty->print("%s = ", #rax); os::print_location(tty, rax); } 426 PRINT_REG(rax); 427 PRINT_REG(rbx); 428 PRINT_REG(rcx); 429 PRINT_REG(rdx); 430 PRINT_REG(rdi); 431 PRINT_REG(rsi); 432 PRINT_REG(rbp); 433 PRINT_REG(rsp); 434 #undef PRINT_REG 435 // Print some words near top of staack. 436 int* dump_sp = (int*) rsp; 437 for (int col1 = 0; col1 < 8; col1++) { 438 tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 439 os::print_location(tty, *dump_sp++); 440 } 441 for (int row = 0; row < 16; row++) { 442 tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 443 for (int col = 0; col < 8; col++) { 444 tty->print(" 0x%08x", *dump_sp++); 445 } 446 tty->cr(); 447 } 448 // Print some instructions around pc: 449 Disassembler::decode((address)eip-64, (address)eip); 450 tty->print_cr("--------"); 451 Disassembler::decode((address)eip, (address)eip+32); 452 } 453 454 void MacroAssembler::stop(const char* msg) { 455 // push address of message 456 ExternalAddress message((address)msg); 457 pushptr(message.addr(), noreg); 458 { Label L; call(L, relocInfo::none); bind(L); } // push eip 459 pusha(); // push registers 460 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32))); 461 hlt(); 462 } 463 464 void MacroAssembler::warn(const char* msg) { 465 push_CPU_state(); 466 467 // push address of message 468 ExternalAddress message((address)msg); 469 pushptr(message.addr(), noreg); 470 471 call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning))); 472 addl(rsp, wordSize); // discard argument 473 pop_CPU_state(); 474 } 475 476 void MacroAssembler::print_state() { 477 { Label L; call(L, relocInfo::none); bind(L); } // push eip 478 pusha(); // push registers 479 480 push_CPU_state(); 481 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::print_state32))); 482 pop_CPU_state(); 483 484 popa(); 485 addl(rsp, wordSize); 486 } 487 488 #else // _LP64 489 490 // 64 bit versions 491 492 Address MacroAssembler::as_Address(AddressLiteral adr) { 493 // amd64 always does this as a pc-rel 494 // we can be absolute or disp based on the instruction type 495 // jmp/call are displacements others are absolute 496 assert(!adr.is_lval(), "must be rval"); 497 assert(reachable(adr), "must be"); 498 return Address(checked_cast<int32_t>(adr.target() - pc()), adr.target(), adr.reloc()); 499 500 } 501 502 Address MacroAssembler::as_Address(ArrayAddress adr, Register rscratch) { 503 AddressLiteral base = adr.base(); 504 lea(rscratch, base); 505 Address index = adr.index(); 506 assert(index._disp == 0, "must not have disp"); // maybe it can? 507 Address array(rscratch, index._index, index._scale, index._disp); 508 return array; 509 } 510 511 void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) { 512 Label L, E; 513 514 #ifdef _WIN64 515 // Windows always allocates space for it's register args 516 assert(num_args <= 4, "only register arguments supported"); 517 subq(rsp, frame::arg_reg_save_area_bytes); 518 #endif 519 520 // Align stack if necessary 521 testl(rsp, 15); 522 jcc(Assembler::zero, L); 523 524 subq(rsp, 8); 525 call(RuntimeAddress(entry_point)); 526 addq(rsp, 8); 527 jmp(E); 528 529 bind(L); 530 call(RuntimeAddress(entry_point)); 531 532 bind(E); 533 534 #ifdef _WIN64 535 // restore stack pointer 536 addq(rsp, frame::arg_reg_save_area_bytes); 537 #endif 538 539 } 540 541 void MacroAssembler::cmp64(Register src1, AddressLiteral src2, Register rscratch) { 542 assert(!src2.is_lval(), "should use cmpptr"); 543 assert(rscratch != noreg || always_reachable(src2), "missing"); 544 545 if (reachable(src2)) { 546 cmpq(src1, as_Address(src2)); 547 } else { 548 lea(rscratch, src2); 549 Assembler::cmpq(src1, Address(rscratch, 0)); 550 } 551 } 552 553 int MacroAssembler::corrected_idivq(Register reg) { 554 // Full implementation of Java ldiv and lrem; checks for special 555 // case as described in JVM spec., p.243 & p.271. The function 556 // returns the (pc) offset of the idivl instruction - may be needed 557 // for implicit exceptions. 558 // 559 // normal case special case 560 // 561 // input : rax: dividend min_long 562 // reg: divisor (may not be eax/edx) -1 563 // 564 // output: rax: quotient (= rax idiv reg) min_long 565 // rdx: remainder (= rax irem reg) 0 566 assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register"); 567 static const int64_t min_long = 0x8000000000000000; 568 Label normal_case, special_case; 569 570 // check for special case 571 cmp64(rax, ExternalAddress((address) &min_long), rdx /*rscratch*/); 572 jcc(Assembler::notEqual, normal_case); 573 xorl(rdx, rdx); // prepare rdx for possible special case (where 574 // remainder = 0) 575 cmpq(reg, -1); 576 jcc(Assembler::equal, special_case); 577 578 // handle normal case 579 bind(normal_case); 580 cdqq(); 581 int idivq_offset = offset(); 582 idivq(reg); 583 584 // normal and special case exit 585 bind(special_case); 586 587 return idivq_offset; 588 } 589 590 void MacroAssembler::decrementq(Register reg, int value) { 591 if (value == min_jint) { subq(reg, value); return; } 592 if (value < 0) { incrementq(reg, -value); return; } 593 if (value == 0) { ; return; } 594 if (value == 1 && UseIncDec) { decq(reg) ; return; } 595 /* else */ { subq(reg, value) ; return; } 596 } 597 598 void MacroAssembler::decrementq(Address dst, int value) { 599 if (value == min_jint) { subq(dst, value); return; } 600 if (value < 0) { incrementq(dst, -value); return; } 601 if (value == 0) { ; return; } 602 if (value == 1 && UseIncDec) { decq(dst) ; return; } 603 /* else */ { subq(dst, value) ; return; } 604 } 605 606 void MacroAssembler::incrementq(AddressLiteral dst, Register rscratch) { 607 assert(rscratch != noreg || always_reachable(dst), "missing"); 608 609 if (reachable(dst)) { 610 incrementq(as_Address(dst)); 611 } else { 612 lea(rscratch, dst); 613 incrementq(Address(rscratch, 0)); 614 } 615 } 616 617 void MacroAssembler::incrementq(Register reg, int value) { 618 if (value == min_jint) { addq(reg, value); return; } 619 if (value < 0) { decrementq(reg, -value); return; } 620 if (value == 0) { ; return; } 621 if (value == 1 && UseIncDec) { incq(reg) ; return; } 622 /* else */ { addq(reg, value) ; return; } 623 } 624 625 void MacroAssembler::incrementq(Address dst, int value) { 626 if (value == min_jint) { addq(dst, value); return; } 627 if (value < 0) { decrementq(dst, -value); return; } 628 if (value == 0) { ; return; } 629 if (value == 1 && UseIncDec) { incq(dst) ; return; } 630 /* else */ { addq(dst, value) ; return; } 631 } 632 633 // 32bit can do a case table jump in one instruction but we no longer allow the base 634 // to be installed in the Address class 635 void MacroAssembler::jump(ArrayAddress entry, Register rscratch) { 636 lea(rscratch, entry.base()); 637 Address dispatch = entry.index(); 638 assert(dispatch._base == noreg, "must be"); 639 dispatch._base = rscratch; 640 jmp(dispatch); 641 } 642 643 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) { 644 ShouldNotReachHere(); // 64bit doesn't use two regs 645 cmpq(x_lo, y_lo); 646 } 647 648 void MacroAssembler::lea(Register dst, AddressLiteral src) { 649 mov_literal64(dst, (intptr_t)src.target(), src.rspec()); 650 } 651 652 void MacroAssembler::lea(Address dst, AddressLiteral adr, Register rscratch) { 653 lea(rscratch, adr); 654 movptr(dst, rscratch); 655 } 656 657 void MacroAssembler::leave() { 658 // %%% is this really better? Why not on 32bit too? 659 emit_int8((unsigned char)0xC9); // LEAVE 660 } 661 662 void MacroAssembler::lneg(Register hi, Register lo) { 663 ShouldNotReachHere(); // 64bit doesn't use two regs 664 negq(lo); 665 } 666 667 void MacroAssembler::movoop(Register dst, jobject obj) { 668 mov_literal64(dst, (intptr_t)obj, oop_Relocation::spec_for_immediate()); 669 } 670 671 void MacroAssembler::movoop(Address dst, jobject obj, Register rscratch) { 672 mov_literal64(rscratch, (intptr_t)obj, oop_Relocation::spec_for_immediate()); 673 movq(dst, rscratch); 674 } 675 676 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { 677 mov_literal64(dst, (intptr_t)obj, metadata_Relocation::spec_for_immediate()); 678 } 679 680 void MacroAssembler::mov_metadata(Address dst, Metadata* obj, Register rscratch) { 681 mov_literal64(rscratch, (intptr_t)obj, metadata_Relocation::spec_for_immediate()); 682 movq(dst, rscratch); 683 } 684 685 void MacroAssembler::movptr(Register dst, AddressLiteral src) { 686 if (src.is_lval()) { 687 mov_literal64(dst, (intptr_t)src.target(), src.rspec()); 688 } else { 689 if (reachable(src)) { 690 movq(dst, as_Address(src)); 691 } else { 692 lea(dst, src); 693 movq(dst, Address(dst, 0)); 694 } 695 } 696 } 697 698 void MacroAssembler::movptr(ArrayAddress dst, Register src, Register rscratch) { 699 movq(as_Address(dst, rscratch), src); 700 } 701 702 void MacroAssembler::movptr(Register dst, ArrayAddress src) { 703 movq(dst, as_Address(src, dst /*rscratch*/)); 704 } 705 706 // src should NEVER be a real pointer. Use AddressLiteral for true pointers 707 void MacroAssembler::movptr(Address dst, intptr_t src, Register rscratch) { 708 if (is_simm32(src)) { 709 movptr(dst, checked_cast<int32_t>(src)); 710 } else { 711 mov64(rscratch, src); 712 movq(dst, rscratch); 713 } 714 } 715 716 void MacroAssembler::pushoop(jobject obj, Register rscratch) { 717 movoop(rscratch, obj); 718 push(rscratch); 719 } 720 721 void MacroAssembler::pushklass(Metadata* obj, Register rscratch) { 722 mov_metadata(rscratch, obj); 723 push(rscratch); 724 } 725 726 void MacroAssembler::pushptr(AddressLiteral src, Register rscratch) { 727 lea(rscratch, src); 728 if (src.is_lval()) { 729 push(rscratch); 730 } else { 731 pushq(Address(rscratch, 0)); 732 } 733 } 734 735 void MacroAssembler::reset_last_Java_frame(bool clear_fp) { 736 reset_last_Java_frame(r15_thread, clear_fp); 737 } 738 739 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 740 Register last_java_fp, 741 address last_java_pc, 742 Register rscratch) { 743 set_last_Java_frame(r15_thread, last_java_sp, last_java_fp, last_java_pc, rscratch); 744 } 745 746 static void pass_arg0(MacroAssembler* masm, Register arg) { 747 if (c_rarg0 != arg ) { 748 masm->mov(c_rarg0, arg); 749 } 750 } 751 752 static void pass_arg1(MacroAssembler* masm, Register arg) { 753 if (c_rarg1 != arg ) { 754 masm->mov(c_rarg1, arg); 755 } 756 } 757 758 static void pass_arg2(MacroAssembler* masm, Register arg) { 759 if (c_rarg2 != arg ) { 760 masm->mov(c_rarg2, arg); 761 } 762 } 763 764 static void pass_arg3(MacroAssembler* masm, Register arg) { 765 if (c_rarg3 != arg ) { 766 masm->mov(c_rarg3, arg); 767 } 768 } 769 770 void MacroAssembler::stop(const char* msg) { 771 if (ShowMessageBoxOnError) { 772 address rip = pc(); 773 pusha(); // get regs on stack 774 lea(c_rarg1, InternalAddress(rip)); 775 movq(c_rarg2, rsp); // pass pointer to regs array 776 } 777 lea(c_rarg0, ExternalAddress((address) msg)); 778 andq(rsp, -16); // align stack as required by ABI 779 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); 780 hlt(); 781 } 782 783 void MacroAssembler::warn(const char* msg) { 784 push(rbp); 785 movq(rbp, rsp); 786 andq(rsp, -16); // align stack as required by push_CPU_state and call 787 push_CPU_state(); // keeps alignment at 16 bytes 788 789 lea(c_rarg0, ExternalAddress((address) msg)); 790 call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning))); 791 792 pop_CPU_state(); 793 mov(rsp, rbp); 794 pop(rbp); 795 } 796 797 void MacroAssembler::print_state() { 798 address rip = pc(); 799 pusha(); // get regs on stack 800 push(rbp); 801 movq(rbp, rsp); 802 andq(rsp, -16); // align stack as required by push_CPU_state and call 803 push_CPU_state(); // keeps alignment at 16 bytes 804 805 lea(c_rarg0, InternalAddress(rip)); 806 lea(c_rarg1, Address(rbp, wordSize)); // pass pointer to regs array 807 call_VM_leaf(CAST_FROM_FN_PTR(address, MacroAssembler::print_state64), c_rarg0, c_rarg1); 808 809 pop_CPU_state(); 810 mov(rsp, rbp); 811 pop(rbp); 812 popa(); 813 } 814 815 #ifndef PRODUCT 816 extern "C" void findpc(intptr_t x); 817 #endif 818 819 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) { 820 // In order to get locks to work, we need to fake a in_VM state 821 if (ShowMessageBoxOnError) { 822 JavaThread* thread = JavaThread::current(); 823 JavaThreadState saved_state = thread->thread_state(); 824 thread->set_thread_state(_thread_in_vm); 825 #ifndef PRODUCT 826 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 827 ttyLocker ttyl; 828 BytecodeCounter::print(); 829 } 830 #endif 831 // To see where a verify_oop failed, get $ebx+40/X for this frame. 832 // XXX correct this offset for amd64 833 // This is the value of eip which points to where verify_oop will return. 834 if (os::message_box(msg, "Execution stopped, print registers?")) { 835 print_state64(pc, regs); 836 BREAKPOINT; 837 } 838 } 839 fatal("DEBUG MESSAGE: %s", msg); 840 } 841 842 void MacroAssembler::print_state64(int64_t pc, int64_t regs[]) { 843 ttyLocker ttyl; 844 DebuggingContext debugging{}; 845 tty->print_cr("rip = 0x%016lx", (intptr_t)pc); 846 #ifndef PRODUCT 847 tty->cr(); 848 findpc(pc); 849 tty->cr(); 850 #endif 851 #define PRINT_REG(rax, value) \ 852 { tty->print("%s = ", #rax); os::print_location(tty, value); } 853 PRINT_REG(rax, regs[15]); 854 PRINT_REG(rbx, regs[12]); 855 PRINT_REG(rcx, regs[14]); 856 PRINT_REG(rdx, regs[13]); 857 PRINT_REG(rdi, regs[8]); 858 PRINT_REG(rsi, regs[9]); 859 PRINT_REG(rbp, regs[10]); 860 // rsp is actually not stored by pusha(), compute the old rsp from regs (rsp after pusha): regs + 16 = old rsp 861 PRINT_REG(rsp, (intptr_t)(®s[16])); 862 PRINT_REG(r8 , regs[7]); 863 PRINT_REG(r9 , regs[6]); 864 PRINT_REG(r10, regs[5]); 865 PRINT_REG(r11, regs[4]); 866 PRINT_REG(r12, regs[3]); 867 PRINT_REG(r13, regs[2]); 868 PRINT_REG(r14, regs[1]); 869 PRINT_REG(r15, regs[0]); 870 #undef PRINT_REG 871 // Print some words near the top of the stack. 872 int64_t* rsp = ®s[16]; 873 int64_t* dump_sp = rsp; 874 for (int col1 = 0; col1 < 8; col1++) { 875 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 876 os::print_location(tty, *dump_sp++); 877 } 878 for (int row = 0; row < 25; row++) { 879 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 880 for (int col = 0; col < 4; col++) { 881 tty->print(" 0x%016lx", (intptr_t)*dump_sp++); 882 } 883 tty->cr(); 884 } 885 // Print some instructions around pc: 886 Disassembler::decode((address)pc-64, (address)pc); 887 tty->print_cr("--------"); 888 Disassembler::decode((address)pc, (address)pc+32); 889 } 890 891 // The java_calling_convention describes stack locations as ideal slots on 892 // a frame with no abi restrictions. Since we must observe abi restrictions 893 // (like the placement of the register window) the slots must be biased by 894 // the following value. 895 static int reg2offset_in(VMReg r) { 896 // Account for saved rbp and return address 897 // This should really be in_preserve_stack_slots 898 return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size; 899 } 900 901 static int reg2offset_out(VMReg r) { 902 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 903 } 904 905 // A long move 906 void MacroAssembler::long_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 907 908 // The calling conventions assures us that each VMregpair is either 909 // all really one physical register or adjacent stack slots. 910 911 if (src.is_single_phys_reg() ) { 912 if (dst.is_single_phys_reg()) { 913 if (dst.first() != src.first()) { 914 mov(dst.first()->as_Register(), src.first()->as_Register()); 915 } 916 } else { 917 assert(dst.is_single_reg(), "not a stack pair: (%s, %s), (%s, %s)", 918 src.first()->name(), src.second()->name(), dst.first()->name(), dst.second()->name()); 919 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_Register()); 920 } 921 } else if (dst.is_single_phys_reg()) { 922 assert(src.is_single_reg(), "not a stack pair"); 923 movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 924 } else { 925 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs"); 926 movq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 927 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 928 } 929 } 930 931 // A double move 932 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 933 934 // The calling conventions assures us that each VMregpair is either 935 // all really one physical register or adjacent stack slots. 936 937 if (src.is_single_phys_reg() ) { 938 if (dst.is_single_phys_reg()) { 939 // In theory these overlap but the ordering is such that this is likely a nop 940 if ( src.first() != dst.first()) { 941 movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister()); 942 } 943 } else { 944 assert(dst.is_single_reg(), "not a stack pair"); 945 movdbl(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_XMMRegister()); 946 } 947 } else if (dst.is_single_phys_reg()) { 948 assert(src.is_single_reg(), "not a stack pair"); 949 movdbl(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 950 } else { 951 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs"); 952 movq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 953 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 954 } 955 } 956 957 958 // A float arg may have to do float reg int reg conversion 959 void MacroAssembler::float_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 960 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move"); 961 962 // The calling conventions assures us that each VMregpair is either 963 // all really one physical register or adjacent stack slots. 964 965 if (src.first()->is_stack()) { 966 if (dst.first()->is_stack()) { 967 movl(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 968 movptr(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 969 } else { 970 // stack to reg 971 assert(dst.first()->is_XMMRegister(), "only expect xmm registers as parameters"); 972 movflt(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 973 } 974 } else if (dst.first()->is_stack()) { 975 // reg to stack 976 assert(src.first()->is_XMMRegister(), "only expect xmm registers as parameters"); 977 movflt(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_XMMRegister()); 978 } else { 979 // reg to reg 980 // In theory these overlap but the ordering is such that this is likely a nop 981 if ( src.first() != dst.first()) { 982 movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister()); 983 } 984 } 985 } 986 987 // On 64 bit we will store integer like items to the stack as 988 // 64 bits items (x86_32/64 abi) even though java would only store 989 // 32bits for a parameter. On 32bit it will simply be 32 bits 990 // So this routine will do 32->32 on 32bit and 32->64 on 64bit 991 void MacroAssembler::move32_64(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 992 if (src.first()->is_stack()) { 993 if (dst.first()->is_stack()) { 994 // stack to stack 995 movslq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 996 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 997 } else { 998 // stack to reg 999 movslq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 1000 } 1001 } else if (dst.first()->is_stack()) { 1002 // reg to stack 1003 // Do we really have to sign extend??? 1004 // __ movslq(src.first()->as_Register(), src.first()->as_Register()); 1005 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_Register()); 1006 } else { 1007 // Do we really have to sign extend??? 1008 // __ movslq(dst.first()->as_Register(), src.first()->as_Register()); 1009 if (dst.first() != src.first()) { 1010 movq(dst.first()->as_Register(), src.first()->as_Register()); 1011 } 1012 } 1013 } 1014 1015 void MacroAssembler::move_ptr(VMRegPair src, VMRegPair dst) { 1016 if (src.first()->is_stack()) { 1017 if (dst.first()->is_stack()) { 1018 // stack to stack 1019 movq(rax, Address(rbp, reg2offset_in(src.first()))); 1020 movq(Address(rsp, reg2offset_out(dst.first())), rax); 1021 } else { 1022 // stack to reg 1023 movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()))); 1024 } 1025 } else if (dst.first()->is_stack()) { 1026 // reg to stack 1027 movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register()); 1028 } else { 1029 if (dst.first() != src.first()) { 1030 movq(dst.first()->as_Register(), src.first()->as_Register()); 1031 } 1032 } 1033 } 1034 1035 // An oop arg. Must pass a handle not the oop itself 1036 void MacroAssembler::object_move(OopMap* map, 1037 int oop_handle_offset, 1038 int framesize_in_slots, 1039 VMRegPair src, 1040 VMRegPair dst, 1041 bool is_receiver, 1042 int* receiver_offset) { 1043 1044 // must pass a handle. First figure out the location we use as a handle 1045 1046 Register rHandle = dst.first()->is_stack() ? rax : dst.first()->as_Register(); 1047 1048 // See if oop is null if it is we need no handle 1049 1050 if (src.first()->is_stack()) { 1051 1052 // Oop is already on the stack as an argument 1053 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 1054 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots)); 1055 if (is_receiver) { 1056 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size; 1057 } 1058 1059 cmpptr(Address(rbp, reg2offset_in(src.first())), NULL_WORD); 1060 lea(rHandle, Address(rbp, reg2offset_in(src.first()))); 1061 // conditionally move a null 1062 cmovptr(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first()))); 1063 } else { 1064 1065 // Oop is in a register we must store it to the space we reserve 1066 // on the stack for oop_handles and pass a handle if oop is non-null 1067 1068 const Register rOop = src.first()->as_Register(); 1069 int oop_slot; 1070 if (rOop == j_rarg0) 1071 oop_slot = 0; 1072 else if (rOop == j_rarg1) 1073 oop_slot = 1; 1074 else if (rOop == j_rarg2) 1075 oop_slot = 2; 1076 else if (rOop == j_rarg3) 1077 oop_slot = 3; 1078 else if (rOop == j_rarg4) 1079 oop_slot = 4; 1080 else { 1081 assert(rOop == j_rarg5, "wrong register"); 1082 oop_slot = 5; 1083 } 1084 1085 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset; 1086 int offset = oop_slot*VMRegImpl::stack_slot_size; 1087 1088 map->set_oop(VMRegImpl::stack2reg(oop_slot)); 1089 // Store oop in handle area, may be null 1090 movptr(Address(rsp, offset), rOop); 1091 if (is_receiver) { 1092 *receiver_offset = offset; 1093 } 1094 1095 cmpptr(rOop, NULL_WORD); 1096 lea(rHandle, Address(rsp, offset)); 1097 // conditionally move a null from the handle area where it was just stored 1098 cmovptr(Assembler::equal, rHandle, Address(rsp, offset)); 1099 } 1100 1101 // If arg is on the stack then place it otherwise it is already in correct reg. 1102 if (dst.first()->is_stack()) { 1103 movptr(Address(rsp, reg2offset_out(dst.first())), rHandle); 1104 } 1105 } 1106 1107 #endif // _LP64 1108 1109 // Now versions that are common to 32/64 bit 1110 1111 void MacroAssembler::addptr(Register dst, int32_t imm32) { 1112 LP64_ONLY(addq(dst, imm32)) NOT_LP64(addl(dst, imm32)); 1113 } 1114 1115 void MacroAssembler::addptr(Register dst, Register src) { 1116 LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); 1117 } 1118 1119 void MacroAssembler::addptr(Address dst, Register src) { 1120 LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); 1121 } 1122 1123 void MacroAssembler::addsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 1124 assert(rscratch != noreg || always_reachable(src), "missing"); 1125 1126 if (reachable(src)) { 1127 Assembler::addsd(dst, as_Address(src)); 1128 } else { 1129 lea(rscratch, src); 1130 Assembler::addsd(dst, Address(rscratch, 0)); 1131 } 1132 } 1133 1134 void MacroAssembler::addss(XMMRegister dst, AddressLiteral src, Register rscratch) { 1135 assert(rscratch != noreg || always_reachable(src), "missing"); 1136 1137 if (reachable(src)) { 1138 addss(dst, as_Address(src)); 1139 } else { 1140 lea(rscratch, src); 1141 addss(dst, Address(rscratch, 0)); 1142 } 1143 } 1144 1145 void MacroAssembler::addpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 1146 assert(rscratch != noreg || always_reachable(src), "missing"); 1147 1148 if (reachable(src)) { 1149 Assembler::addpd(dst, as_Address(src)); 1150 } else { 1151 lea(rscratch, src); 1152 Assembler::addpd(dst, Address(rscratch, 0)); 1153 } 1154 } 1155 1156 // See 8273459. Function for ensuring 64-byte alignment, intended for stubs only. 1157 // Stub code is generated once and never copied. 1158 // NMethods can't use this because they get copied and we can't force alignment > 32 bytes. 1159 void MacroAssembler::align64() { 1160 align(64, (unsigned long long) pc()); 1161 } 1162 1163 void MacroAssembler::align32() { 1164 align(32, (unsigned long long) pc()); 1165 } 1166 1167 void MacroAssembler::align(int modulus) { 1168 // 8273459: Ensure alignment is possible with current segment alignment 1169 assert(modulus <= CodeEntryAlignment, "Alignment must be <= CodeEntryAlignment"); 1170 align(modulus, offset()); 1171 } 1172 1173 void MacroAssembler::align(int modulus, int target) { 1174 if (target % modulus != 0) { 1175 nop(modulus - (target % modulus)); 1176 } 1177 } 1178 1179 void MacroAssembler::push_f(XMMRegister r) { 1180 subptr(rsp, wordSize); 1181 movflt(Address(rsp, 0), r); 1182 } 1183 1184 void MacroAssembler::pop_f(XMMRegister r) { 1185 movflt(r, Address(rsp, 0)); 1186 addptr(rsp, wordSize); 1187 } 1188 1189 void MacroAssembler::push_d(XMMRegister r) { 1190 subptr(rsp, 2 * wordSize); 1191 movdbl(Address(rsp, 0), r); 1192 } 1193 1194 void MacroAssembler::pop_d(XMMRegister r) { 1195 movdbl(r, Address(rsp, 0)); 1196 addptr(rsp, 2 * Interpreter::stackElementSize); 1197 } 1198 1199 void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 1200 // Used in sign-masking with aligned address. 1201 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 1202 assert(rscratch != noreg || always_reachable(src), "missing"); 1203 1204 if (reachable(src)) { 1205 Assembler::andpd(dst, as_Address(src)); 1206 } else { 1207 lea(rscratch, src); 1208 Assembler::andpd(dst, Address(rscratch, 0)); 1209 } 1210 } 1211 1212 void MacroAssembler::andps(XMMRegister dst, AddressLiteral src, Register rscratch) { 1213 // Used in sign-masking with aligned address. 1214 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 1215 assert(rscratch != noreg || always_reachable(src), "missing"); 1216 1217 if (reachable(src)) { 1218 Assembler::andps(dst, as_Address(src)); 1219 } else { 1220 lea(rscratch, src); 1221 Assembler::andps(dst, Address(rscratch, 0)); 1222 } 1223 } 1224 1225 void MacroAssembler::andptr(Register dst, int32_t imm32) { 1226 LP64_ONLY(andq(dst, imm32)) NOT_LP64(andl(dst, imm32)); 1227 } 1228 1229 #ifdef _LP64 1230 void MacroAssembler::andq(Register dst, AddressLiteral src, Register rscratch) { 1231 assert(rscratch != noreg || always_reachable(src), "missing"); 1232 1233 if (reachable(src)) { 1234 andq(dst, as_Address(src)); 1235 } else { 1236 lea(rscratch, src); 1237 andq(dst, Address(rscratch, 0)); 1238 } 1239 } 1240 #endif 1241 1242 void MacroAssembler::atomic_incl(Address counter_addr) { 1243 lock(); 1244 incrementl(counter_addr); 1245 } 1246 1247 void MacroAssembler::atomic_incl(AddressLiteral counter_addr, Register rscratch) { 1248 assert(rscratch != noreg || always_reachable(counter_addr), "missing"); 1249 1250 if (reachable(counter_addr)) { 1251 atomic_incl(as_Address(counter_addr)); 1252 } else { 1253 lea(rscratch, counter_addr); 1254 atomic_incl(Address(rscratch, 0)); 1255 } 1256 } 1257 1258 #ifdef _LP64 1259 void MacroAssembler::atomic_incq(Address counter_addr) { 1260 lock(); 1261 incrementq(counter_addr); 1262 } 1263 1264 void MacroAssembler::atomic_incq(AddressLiteral counter_addr, Register rscratch) { 1265 assert(rscratch != noreg || always_reachable(counter_addr), "missing"); 1266 1267 if (reachable(counter_addr)) { 1268 atomic_incq(as_Address(counter_addr)); 1269 } else { 1270 lea(rscratch, counter_addr); 1271 atomic_incq(Address(rscratch, 0)); 1272 } 1273 } 1274 #endif 1275 1276 // Writes to stack successive pages until offset reached to check for 1277 // stack overflow + shadow pages. This clobbers tmp. 1278 void MacroAssembler::bang_stack_size(Register size, Register tmp) { 1279 movptr(tmp, rsp); 1280 // Bang stack for total size given plus shadow page size. 1281 // Bang one page at a time because large size can bang beyond yellow and 1282 // red zones. 1283 Label loop; 1284 bind(loop); 1285 movl(Address(tmp, (-(int)os::vm_page_size())), size ); 1286 subptr(tmp, (int)os::vm_page_size()); 1287 subl(size, (int)os::vm_page_size()); 1288 jcc(Assembler::greater, loop); 1289 1290 // Bang down shadow pages too. 1291 // At this point, (tmp-0) is the last address touched, so don't 1292 // touch it again. (It was touched as (tmp-pagesize) but then tmp 1293 // was post-decremented.) Skip this address by starting at i=1, and 1294 // touch a few more pages below. N.B. It is important to touch all 1295 // the way down including all pages in the shadow zone. 1296 for (int i = 1; i < ((int)StackOverflow::stack_shadow_zone_size() / (int)os::vm_page_size()); i++) { 1297 // this could be any sized move but this is can be a debugging crumb 1298 // so the bigger the better. 1299 movptr(Address(tmp, (-i*(int)os::vm_page_size())), size ); 1300 } 1301 } 1302 1303 void MacroAssembler::reserved_stack_check() { 1304 // testing if reserved zone needs to be enabled 1305 Label no_reserved_zone_enabling; 1306 Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); 1307 NOT_LP64(get_thread(rsi);) 1308 1309 cmpptr(rsp, Address(thread, JavaThread::reserved_stack_activation_offset())); 1310 jcc(Assembler::below, no_reserved_zone_enabling); 1311 1312 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), thread); 1313 jump(RuntimeAddress(StubRoutines::throw_delayed_StackOverflowError_entry())); 1314 should_not_reach_here(); 1315 1316 bind(no_reserved_zone_enabling); 1317 } 1318 1319 void MacroAssembler::c2bool(Register x) { 1320 // implements x == 0 ? 0 : 1 1321 // note: must only look at least-significant byte of x 1322 // since C-style booleans are stored in one byte 1323 // only! (was bug) 1324 andl(x, 0xFF); 1325 setb(Assembler::notZero, x); 1326 } 1327 1328 // Wouldn't need if AddressLiteral version had new name 1329 void MacroAssembler::call(Label& L, relocInfo::relocType rtype) { 1330 Assembler::call(L, rtype); 1331 } 1332 1333 void MacroAssembler::call(Register entry) { 1334 Assembler::call(entry); 1335 } 1336 1337 void MacroAssembler::call(AddressLiteral entry, Register rscratch) { 1338 assert(rscratch != noreg || always_reachable(entry), "missing"); 1339 1340 if (reachable(entry)) { 1341 Assembler::call_literal(entry.target(), entry.rspec()); 1342 } else { 1343 lea(rscratch, entry); 1344 Assembler::call(rscratch); 1345 } 1346 } 1347 1348 void MacroAssembler::ic_call(address entry, jint method_index) { 1349 RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index); 1350 #ifdef _LP64 1351 // Needs full 64-bit immediate for later patching. 1352 mov64(rax, (int64_t)Universe::non_oop_word()); 1353 #else 1354 movptr(rax, (intptr_t)Universe::non_oop_word()); 1355 #endif 1356 call(AddressLiteral(entry, rh)); 1357 } 1358 1359 int MacroAssembler::ic_check_size() { 1360 return LP64_ONLY(14) NOT_LP64(12); 1361 } 1362 1363 int MacroAssembler::ic_check(int end_alignment) { 1364 Register receiver = LP64_ONLY(j_rarg0) NOT_LP64(rcx); 1365 Register data = rax; 1366 Register temp = LP64_ONLY(rscratch1) NOT_LP64(rbx); 1367 1368 // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed 1369 // before the inline cache check, so we don't have to execute any nop instructions when dispatching 1370 // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align 1371 // before the inline cache check here, and not after 1372 align(end_alignment, offset() + ic_check_size()); 1373 1374 int uep_offset = offset(); 1375 1376 if (UseCompressedClassPointers) { 1377 movl(temp, Address(receiver, oopDesc::klass_offset_in_bytes())); 1378 cmpl(temp, Address(data, CompiledICData::speculated_klass_offset())); 1379 } else { 1380 movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes())); 1381 cmpptr(temp, Address(data, CompiledICData::speculated_klass_offset())); 1382 } 1383 1384 // if inline cache check fails, then jump to runtime routine 1385 jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 1386 assert((offset() % end_alignment) == 0, "Misaligned verified entry point"); 1387 1388 return uep_offset; 1389 } 1390 1391 void MacroAssembler::emit_static_call_stub() { 1392 // Static stub relocation also tags the Method* in the code-stream. 1393 mov_metadata(rbx, (Metadata*) nullptr); // Method is zapped till fixup time. 1394 // This is recognized as unresolved by relocs/nativeinst/ic code. 1395 jump(RuntimeAddress(pc())); 1396 } 1397 1398 // Implementation of call_VM versions 1399 1400 void MacroAssembler::call_VM(Register oop_result, 1401 address entry_point, 1402 bool check_exceptions) { 1403 Label C, E; 1404 call(C, relocInfo::none); 1405 jmp(E); 1406 1407 bind(C); 1408 call_VM_helper(oop_result, entry_point, 0, check_exceptions); 1409 ret(0); 1410 1411 bind(E); 1412 } 1413 1414 void MacroAssembler::call_VM(Register oop_result, 1415 address entry_point, 1416 Register arg_1, 1417 bool check_exceptions) { 1418 Label C, E; 1419 call(C, relocInfo::none); 1420 jmp(E); 1421 1422 bind(C); 1423 pass_arg1(this, arg_1); 1424 call_VM_helper(oop_result, entry_point, 1, check_exceptions); 1425 ret(0); 1426 1427 bind(E); 1428 } 1429 1430 void MacroAssembler::call_VM(Register oop_result, 1431 address entry_point, 1432 Register arg_1, 1433 Register arg_2, 1434 bool check_exceptions) { 1435 Label C, E; 1436 call(C, relocInfo::none); 1437 jmp(E); 1438 1439 bind(C); 1440 1441 LP64_ONLY(assert_different_registers(arg_1, c_rarg2)); 1442 1443 pass_arg2(this, arg_2); 1444 pass_arg1(this, arg_1); 1445 call_VM_helper(oop_result, entry_point, 2, check_exceptions); 1446 ret(0); 1447 1448 bind(E); 1449 } 1450 1451 void MacroAssembler::call_VM(Register oop_result, 1452 address entry_point, 1453 Register arg_1, 1454 Register arg_2, 1455 Register arg_3, 1456 bool check_exceptions) { 1457 Label C, E; 1458 call(C, relocInfo::none); 1459 jmp(E); 1460 1461 bind(C); 1462 1463 LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3)); 1464 LP64_ONLY(assert_different_registers(arg_2, c_rarg3)); 1465 pass_arg3(this, arg_3); 1466 pass_arg2(this, arg_2); 1467 pass_arg1(this, arg_1); 1468 call_VM_helper(oop_result, entry_point, 3, check_exceptions); 1469 ret(0); 1470 1471 bind(E); 1472 } 1473 1474 void MacroAssembler::call_VM(Register oop_result, 1475 Register last_java_sp, 1476 address entry_point, 1477 int number_of_arguments, 1478 bool check_exceptions) { 1479 Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg); 1480 call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions); 1481 } 1482 1483 void MacroAssembler::call_VM(Register oop_result, 1484 Register last_java_sp, 1485 address entry_point, 1486 Register arg_1, 1487 bool check_exceptions) { 1488 pass_arg1(this, arg_1); 1489 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 1490 } 1491 1492 void MacroAssembler::call_VM(Register oop_result, 1493 Register last_java_sp, 1494 address entry_point, 1495 Register arg_1, 1496 Register arg_2, 1497 bool check_exceptions) { 1498 1499 LP64_ONLY(assert_different_registers(arg_1, c_rarg2)); 1500 pass_arg2(this, arg_2); 1501 pass_arg1(this, arg_1); 1502 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 1503 } 1504 1505 void MacroAssembler::call_VM(Register oop_result, 1506 Register last_java_sp, 1507 address entry_point, 1508 Register arg_1, 1509 Register arg_2, 1510 Register arg_3, 1511 bool check_exceptions) { 1512 LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3)); 1513 LP64_ONLY(assert_different_registers(arg_2, c_rarg3)); 1514 pass_arg3(this, arg_3); 1515 pass_arg2(this, arg_2); 1516 pass_arg1(this, arg_1); 1517 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 1518 } 1519 1520 void MacroAssembler::super_call_VM(Register oop_result, 1521 Register last_java_sp, 1522 address entry_point, 1523 int number_of_arguments, 1524 bool check_exceptions) { 1525 Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg); 1526 MacroAssembler::call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions); 1527 } 1528 1529 void MacroAssembler::super_call_VM(Register oop_result, 1530 Register last_java_sp, 1531 address entry_point, 1532 Register arg_1, 1533 bool check_exceptions) { 1534 pass_arg1(this, arg_1); 1535 super_call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 1536 } 1537 1538 void MacroAssembler::super_call_VM(Register oop_result, 1539 Register last_java_sp, 1540 address entry_point, 1541 Register arg_1, 1542 Register arg_2, 1543 bool check_exceptions) { 1544 1545 LP64_ONLY(assert_different_registers(arg_1, c_rarg2)); 1546 pass_arg2(this, arg_2); 1547 pass_arg1(this, arg_1); 1548 super_call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 1549 } 1550 1551 void MacroAssembler::super_call_VM(Register oop_result, 1552 Register last_java_sp, 1553 address entry_point, 1554 Register arg_1, 1555 Register arg_2, 1556 Register arg_3, 1557 bool check_exceptions) { 1558 LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3)); 1559 LP64_ONLY(assert_different_registers(arg_2, c_rarg3)); 1560 pass_arg3(this, arg_3); 1561 pass_arg2(this, arg_2); 1562 pass_arg1(this, arg_1); 1563 super_call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 1564 } 1565 1566 void MacroAssembler::call_VM_base(Register oop_result, 1567 Register java_thread, 1568 Register last_java_sp, 1569 address entry_point, 1570 int number_of_arguments, 1571 bool check_exceptions) { 1572 // determine java_thread register 1573 if (!java_thread->is_valid()) { 1574 #ifdef _LP64 1575 java_thread = r15_thread; 1576 #else 1577 java_thread = rdi; 1578 get_thread(java_thread); 1579 #endif // LP64 1580 } 1581 // determine last_java_sp register 1582 if (!last_java_sp->is_valid()) { 1583 last_java_sp = rsp; 1584 } 1585 // debugging support 1586 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 1587 LP64_ONLY(assert(java_thread == r15_thread, "unexpected register")); 1588 #ifdef ASSERT 1589 // TraceBytecodes does not use r12 but saves it over the call, so don't verify 1590 // r12 is the heapbase. 1591 LP64_ONLY(if (UseCompressedOops && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");) 1592 #endif // ASSERT 1593 1594 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"); 1595 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp"); 1596 1597 // push java thread (becomes first argument of C function) 1598 1599 NOT_LP64(push(java_thread); number_of_arguments++); 1600 LP64_ONLY(mov(c_rarg0, r15_thread)); 1601 1602 // set last Java frame before call 1603 assert(last_java_sp != rbp, "can't use ebp/rbp"); 1604 1605 // Only interpreter should have to set fp 1606 set_last_Java_frame(java_thread, last_java_sp, rbp, nullptr, rscratch1); 1607 1608 // do the call, remove parameters 1609 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments); 1610 1611 // restore the thread (cannot use the pushed argument since arguments 1612 // may be overwritten by C code generated by an optimizing compiler); 1613 // however can use the register value directly if it is callee saved. 1614 if (LP64_ONLY(true ||) java_thread == rdi || java_thread == rsi) { 1615 // rdi & rsi (also r15) are callee saved -> nothing to do 1616 #ifdef ASSERT 1617 guarantee(java_thread != rax, "change this code"); 1618 push(rax); 1619 { Label L; 1620 get_thread(rax); 1621 cmpptr(java_thread, rax); 1622 jcc(Assembler::equal, L); 1623 STOP("MacroAssembler::call_VM_base: rdi not callee saved?"); 1624 bind(L); 1625 } 1626 pop(rax); 1627 #endif 1628 } else { 1629 get_thread(java_thread); 1630 } 1631 // reset last Java frame 1632 // Only interpreter should have to clear fp 1633 reset_last_Java_frame(java_thread, true); 1634 1635 // C++ interp handles this in the interpreter 1636 check_and_handle_popframe(java_thread); 1637 check_and_handle_earlyret(java_thread); 1638 1639 if (check_exceptions) { 1640 // check for pending exceptions (java_thread is set upon return) 1641 cmpptr(Address(java_thread, Thread::pending_exception_offset()), NULL_WORD); 1642 #ifndef _LP64 1643 jump_cc(Assembler::notEqual, 1644 RuntimeAddress(StubRoutines::forward_exception_entry())); 1645 #else 1646 // This used to conditionally jump to forward_exception however it is 1647 // possible if we relocate that the branch will not reach. So we must jump 1648 // around so we can always reach 1649 1650 Label ok; 1651 jcc(Assembler::equal, ok); 1652 jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 1653 bind(ok); 1654 #endif // LP64 1655 } 1656 1657 // get oop result if there is one and reset the value in the thread 1658 if (oop_result->is_valid()) { 1659 get_vm_result(oop_result, java_thread); 1660 } 1661 } 1662 1663 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 1664 1665 // Calculate the value for last_Java_sp 1666 // somewhat subtle. call_VM does an intermediate call 1667 // which places a return address on the stack just under the 1668 // stack pointer as the user finished with it. This allows 1669 // use to retrieve last_Java_pc from last_Java_sp[-1]. 1670 // On 32bit we then have to push additional args on the stack to accomplish 1671 // the actual requested call. On 64bit call_VM only can use register args 1672 // so the only extra space is the return address that call_VM created. 1673 // This hopefully explains the calculations here. 1674 1675 #ifdef _LP64 1676 // We've pushed one address, correct last_Java_sp 1677 lea(rax, Address(rsp, wordSize)); 1678 #else 1679 lea(rax, Address(rsp, (1 + number_of_arguments) * wordSize)); 1680 #endif // LP64 1681 1682 call_VM_base(oop_result, noreg, rax, entry_point, number_of_arguments, check_exceptions); 1683 1684 } 1685 1686 // Use this method when MacroAssembler version of call_VM_leaf_base() should be called from Interpreter. 1687 void MacroAssembler::call_VM_leaf0(address entry_point) { 1688 MacroAssembler::call_VM_leaf_base(entry_point, 0); 1689 } 1690 1691 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) { 1692 call_VM_leaf_base(entry_point, number_of_arguments); 1693 } 1694 1695 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) { 1696 pass_arg0(this, arg_0); 1697 call_VM_leaf(entry_point, 1); 1698 } 1699 1700 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 1701 1702 LP64_ONLY(assert_different_registers(arg_0, c_rarg1)); 1703 pass_arg1(this, arg_1); 1704 pass_arg0(this, arg_0); 1705 call_VM_leaf(entry_point, 2); 1706 } 1707 1708 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { 1709 LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2)); 1710 LP64_ONLY(assert_different_registers(arg_1, c_rarg2)); 1711 pass_arg2(this, arg_2); 1712 pass_arg1(this, arg_1); 1713 pass_arg0(this, arg_0); 1714 call_VM_leaf(entry_point, 3); 1715 } 1716 1717 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { 1718 LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3)); 1719 LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3)); 1720 LP64_ONLY(assert_different_registers(arg_2, c_rarg3)); 1721 pass_arg3(this, arg_3); 1722 pass_arg2(this, arg_2); 1723 pass_arg1(this, arg_1); 1724 pass_arg0(this, arg_0); 1725 call_VM_leaf(entry_point, 3); 1726 } 1727 1728 void MacroAssembler::super_call_VM_leaf(address entry_point) { 1729 MacroAssembler::call_VM_leaf_base(entry_point, 1); 1730 } 1731 1732 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) { 1733 pass_arg0(this, arg_0); 1734 MacroAssembler::call_VM_leaf_base(entry_point, 1); 1735 } 1736 1737 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 1738 LP64_ONLY(assert_different_registers(arg_0, c_rarg1)); 1739 pass_arg1(this, arg_1); 1740 pass_arg0(this, arg_0); 1741 MacroAssembler::call_VM_leaf_base(entry_point, 2); 1742 } 1743 1744 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { 1745 LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2)); 1746 LP64_ONLY(assert_different_registers(arg_1, c_rarg2)); 1747 pass_arg2(this, arg_2); 1748 pass_arg1(this, arg_1); 1749 pass_arg0(this, arg_0); 1750 MacroAssembler::call_VM_leaf_base(entry_point, 3); 1751 } 1752 1753 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { 1754 LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3)); 1755 LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3)); 1756 LP64_ONLY(assert_different_registers(arg_2, c_rarg3)); 1757 pass_arg3(this, arg_3); 1758 pass_arg2(this, arg_2); 1759 pass_arg1(this, arg_1); 1760 pass_arg0(this, arg_0); 1761 MacroAssembler::call_VM_leaf_base(entry_point, 4); 1762 } 1763 1764 void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) { 1765 movptr(oop_result, Address(java_thread, JavaThread::vm_result_offset())); 1766 movptr(Address(java_thread, JavaThread::vm_result_offset()), NULL_WORD); 1767 verify_oop_msg(oop_result, "broken oop in call_VM_base"); 1768 } 1769 1770 void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) { 1771 movptr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset())); 1772 movptr(Address(java_thread, JavaThread::vm_result_2_offset()), NULL_WORD); 1773 } 1774 1775 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { 1776 } 1777 1778 void MacroAssembler::check_and_handle_popframe(Register java_thread) { 1779 } 1780 1781 void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm, Register rscratch) { 1782 assert(rscratch != noreg || always_reachable(src1), "missing"); 1783 1784 if (reachable(src1)) { 1785 cmpl(as_Address(src1), imm); 1786 } else { 1787 lea(rscratch, src1); 1788 cmpl(Address(rscratch, 0), imm); 1789 } 1790 } 1791 1792 void MacroAssembler::cmp32(Register src1, AddressLiteral src2, Register rscratch) { 1793 assert(!src2.is_lval(), "use cmpptr"); 1794 assert(rscratch != noreg || always_reachable(src2), "missing"); 1795 1796 if (reachable(src2)) { 1797 cmpl(src1, as_Address(src2)); 1798 } else { 1799 lea(rscratch, src2); 1800 cmpl(src1, Address(rscratch, 0)); 1801 } 1802 } 1803 1804 void MacroAssembler::cmp32(Register src1, int32_t imm) { 1805 Assembler::cmpl(src1, imm); 1806 } 1807 1808 void MacroAssembler::cmp32(Register src1, Address src2) { 1809 Assembler::cmpl(src1, src2); 1810 } 1811 1812 void MacroAssembler::cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) { 1813 ucomisd(opr1, opr2); 1814 1815 Label L; 1816 if (unordered_is_less) { 1817 movl(dst, -1); 1818 jcc(Assembler::parity, L); 1819 jcc(Assembler::below , L); 1820 movl(dst, 0); 1821 jcc(Assembler::equal , L); 1822 increment(dst); 1823 } else { // unordered is greater 1824 movl(dst, 1); 1825 jcc(Assembler::parity, L); 1826 jcc(Assembler::above , L); 1827 movl(dst, 0); 1828 jcc(Assembler::equal , L); 1829 decrementl(dst); 1830 } 1831 bind(L); 1832 } 1833 1834 void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) { 1835 ucomiss(opr1, opr2); 1836 1837 Label L; 1838 if (unordered_is_less) { 1839 movl(dst, -1); 1840 jcc(Assembler::parity, L); 1841 jcc(Assembler::below , L); 1842 movl(dst, 0); 1843 jcc(Assembler::equal , L); 1844 increment(dst); 1845 } else { // unordered is greater 1846 movl(dst, 1); 1847 jcc(Assembler::parity, L); 1848 jcc(Assembler::above , L); 1849 movl(dst, 0); 1850 jcc(Assembler::equal , L); 1851 decrementl(dst); 1852 } 1853 bind(L); 1854 } 1855 1856 1857 void MacroAssembler::cmp8(AddressLiteral src1, int imm, Register rscratch) { 1858 assert(rscratch != noreg || always_reachable(src1), "missing"); 1859 1860 if (reachable(src1)) { 1861 cmpb(as_Address(src1), imm); 1862 } else { 1863 lea(rscratch, src1); 1864 cmpb(Address(rscratch, 0), imm); 1865 } 1866 } 1867 1868 void MacroAssembler::cmpptr(Register src1, AddressLiteral src2, Register rscratch) { 1869 #ifdef _LP64 1870 assert(rscratch != noreg || always_reachable(src2), "missing"); 1871 1872 if (src2.is_lval()) { 1873 movptr(rscratch, src2); 1874 Assembler::cmpq(src1, rscratch); 1875 } else if (reachable(src2)) { 1876 cmpq(src1, as_Address(src2)); 1877 } else { 1878 lea(rscratch, src2); 1879 Assembler::cmpq(src1, Address(rscratch, 0)); 1880 } 1881 #else 1882 assert(rscratch == noreg, "not needed"); 1883 if (src2.is_lval()) { 1884 cmp_literal32(src1, (int32_t)src2.target(), src2.rspec()); 1885 } else { 1886 cmpl(src1, as_Address(src2)); 1887 } 1888 #endif // _LP64 1889 } 1890 1891 void MacroAssembler::cmpptr(Address src1, AddressLiteral src2, Register rscratch) { 1892 assert(src2.is_lval(), "not a mem-mem compare"); 1893 #ifdef _LP64 1894 // moves src2's literal address 1895 movptr(rscratch, src2); 1896 Assembler::cmpq(src1, rscratch); 1897 #else 1898 assert(rscratch == noreg, "not needed"); 1899 cmp_literal32(src1, (int32_t)src2.target(), src2.rspec()); 1900 #endif // _LP64 1901 } 1902 1903 void MacroAssembler::cmpoop(Register src1, Register src2) { 1904 cmpptr(src1, src2); 1905 } 1906 1907 void MacroAssembler::cmpoop(Register src1, Address src2) { 1908 cmpptr(src1, src2); 1909 } 1910 1911 #ifdef _LP64 1912 void MacroAssembler::cmpoop(Register src1, jobject src2, Register rscratch) { 1913 movoop(rscratch, src2); 1914 cmpptr(src1, rscratch); 1915 } 1916 #endif 1917 1918 void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch) { 1919 assert(rscratch != noreg || always_reachable(adr), "missing"); 1920 1921 if (reachable(adr)) { 1922 lock(); 1923 cmpxchgptr(reg, as_Address(adr)); 1924 } else { 1925 lea(rscratch, adr); 1926 lock(); 1927 cmpxchgptr(reg, Address(rscratch, 0)); 1928 } 1929 } 1930 1931 void MacroAssembler::cmpxchgptr(Register reg, Address adr) { 1932 LP64_ONLY(cmpxchgq(reg, adr)) NOT_LP64(cmpxchgl(reg, adr)); 1933 } 1934 1935 void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src, Register rscratch) { 1936 assert(rscratch != noreg || always_reachable(src), "missing"); 1937 1938 if (reachable(src)) { 1939 Assembler::comisd(dst, as_Address(src)); 1940 } else { 1941 lea(rscratch, src); 1942 Assembler::comisd(dst, Address(rscratch, 0)); 1943 } 1944 } 1945 1946 void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src, Register rscratch) { 1947 assert(rscratch != noreg || always_reachable(src), "missing"); 1948 1949 if (reachable(src)) { 1950 Assembler::comiss(dst, as_Address(src)); 1951 } else { 1952 lea(rscratch, src); 1953 Assembler::comiss(dst, Address(rscratch, 0)); 1954 } 1955 } 1956 1957 1958 void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr, Register rscratch) { 1959 assert(rscratch != noreg || always_reachable(counter_addr), "missing"); 1960 1961 Condition negated_cond = negate_condition(cond); 1962 Label L; 1963 jcc(negated_cond, L); 1964 pushf(); // Preserve flags 1965 atomic_incl(counter_addr, rscratch); 1966 popf(); 1967 bind(L); 1968 } 1969 1970 int MacroAssembler::corrected_idivl(Register reg) { 1971 // Full implementation of Java idiv and irem; checks for 1972 // special case as described in JVM spec., p.243 & p.271. 1973 // The function returns the (pc) offset of the idivl 1974 // instruction - may be needed for implicit exceptions. 1975 // 1976 // normal case special case 1977 // 1978 // input : rax,: dividend min_int 1979 // reg: divisor (may not be rax,/rdx) -1 1980 // 1981 // output: rax,: quotient (= rax, idiv reg) min_int 1982 // rdx: remainder (= rax, irem reg) 0 1983 assert(reg != rax && reg != rdx, "reg cannot be rax, or rdx register"); 1984 const int min_int = 0x80000000; 1985 Label normal_case, special_case; 1986 1987 // check for special case 1988 cmpl(rax, min_int); 1989 jcc(Assembler::notEqual, normal_case); 1990 xorl(rdx, rdx); // prepare rdx for possible special case (where remainder = 0) 1991 cmpl(reg, -1); 1992 jcc(Assembler::equal, special_case); 1993 1994 // handle normal case 1995 bind(normal_case); 1996 cdql(); 1997 int idivl_offset = offset(); 1998 idivl(reg); 1999 2000 // normal and special case exit 2001 bind(special_case); 2002 2003 return idivl_offset; 2004 } 2005 2006 2007 2008 void MacroAssembler::decrementl(Register reg, int value) { 2009 if (value == min_jint) {subl(reg, value) ; return; } 2010 if (value < 0) { incrementl(reg, -value); return; } 2011 if (value == 0) { ; return; } 2012 if (value == 1 && UseIncDec) { decl(reg) ; return; } 2013 /* else */ { subl(reg, value) ; return; } 2014 } 2015 2016 void MacroAssembler::decrementl(Address dst, int value) { 2017 if (value == min_jint) {subl(dst, value) ; return; } 2018 if (value < 0) { incrementl(dst, -value); return; } 2019 if (value == 0) { ; return; } 2020 if (value == 1 && UseIncDec) { decl(dst) ; return; } 2021 /* else */ { subl(dst, value) ; return; } 2022 } 2023 2024 void MacroAssembler::division_with_shift (Register reg, int shift_value) { 2025 assert(shift_value > 0, "illegal shift value"); 2026 Label _is_positive; 2027 testl (reg, reg); 2028 jcc (Assembler::positive, _is_positive); 2029 int offset = (1 << shift_value) - 1 ; 2030 2031 if (offset == 1) { 2032 incrementl(reg); 2033 } else { 2034 addl(reg, offset); 2035 } 2036 2037 bind (_is_positive); 2038 sarl(reg, shift_value); 2039 } 2040 2041 void MacroAssembler::divsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2042 assert(rscratch != noreg || always_reachable(src), "missing"); 2043 2044 if (reachable(src)) { 2045 Assembler::divsd(dst, as_Address(src)); 2046 } else { 2047 lea(rscratch, src); 2048 Assembler::divsd(dst, Address(rscratch, 0)); 2049 } 2050 } 2051 2052 void MacroAssembler::divss(XMMRegister dst, AddressLiteral src, Register rscratch) { 2053 assert(rscratch != noreg || always_reachable(src), "missing"); 2054 2055 if (reachable(src)) { 2056 Assembler::divss(dst, as_Address(src)); 2057 } else { 2058 lea(rscratch, src); 2059 Assembler::divss(dst, Address(rscratch, 0)); 2060 } 2061 } 2062 2063 void MacroAssembler::enter() { 2064 push(rbp); 2065 mov(rbp, rsp); 2066 } 2067 2068 void MacroAssembler::post_call_nop() { 2069 if (!Continuations::enabled()) { 2070 return; 2071 } 2072 InstructionMark im(this); 2073 relocate(post_call_nop_Relocation::spec()); 2074 InlineSkippedInstructionsCounter skipCounter(this); 2075 emit_int8((uint8_t)0x0f); 2076 emit_int8((uint8_t)0x1f); 2077 emit_int8((uint8_t)0x84); 2078 emit_int8((uint8_t)0x00); 2079 emit_int32(0x00); 2080 } 2081 2082 // A 5 byte nop that is safe for patching (see patch_verified_entry) 2083 void MacroAssembler::fat_nop() { 2084 if (UseAddressNop) { 2085 addr_nop_5(); 2086 } else { 2087 emit_int8((uint8_t)0x26); // es: 2088 emit_int8((uint8_t)0x2e); // cs: 2089 emit_int8((uint8_t)0x64); // fs: 2090 emit_int8((uint8_t)0x65); // gs: 2091 emit_int8((uint8_t)0x90); 2092 } 2093 } 2094 2095 #ifndef _LP64 2096 void MacroAssembler::fcmp(Register tmp) { 2097 fcmp(tmp, 1, true, true); 2098 } 2099 2100 void MacroAssembler::fcmp(Register tmp, int index, bool pop_left, bool pop_right) { 2101 assert(!pop_right || pop_left, "usage error"); 2102 if (VM_Version::supports_cmov()) { 2103 assert(tmp == noreg, "unneeded temp"); 2104 if (pop_left) { 2105 fucomip(index); 2106 } else { 2107 fucomi(index); 2108 } 2109 if (pop_right) { 2110 fpop(); 2111 } 2112 } else { 2113 assert(tmp != noreg, "need temp"); 2114 if (pop_left) { 2115 if (pop_right) { 2116 fcompp(); 2117 } else { 2118 fcomp(index); 2119 } 2120 } else { 2121 fcom(index); 2122 } 2123 // convert FPU condition into eflags condition via rax, 2124 save_rax(tmp); 2125 fwait(); fnstsw_ax(); 2126 sahf(); 2127 restore_rax(tmp); 2128 } 2129 // condition codes set as follows: 2130 // 2131 // CF (corresponds to C0) if x < y 2132 // PF (corresponds to C2) if unordered 2133 // ZF (corresponds to C3) if x = y 2134 } 2135 2136 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less) { 2137 fcmp2int(dst, unordered_is_less, 1, true, true); 2138 } 2139 2140 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right) { 2141 fcmp(VM_Version::supports_cmov() ? noreg : dst, index, pop_left, pop_right); 2142 Label L; 2143 if (unordered_is_less) { 2144 movl(dst, -1); 2145 jcc(Assembler::parity, L); 2146 jcc(Assembler::below , L); 2147 movl(dst, 0); 2148 jcc(Assembler::equal , L); 2149 increment(dst); 2150 } else { // unordered is greater 2151 movl(dst, 1); 2152 jcc(Assembler::parity, L); 2153 jcc(Assembler::above , L); 2154 movl(dst, 0); 2155 jcc(Assembler::equal , L); 2156 decrementl(dst); 2157 } 2158 bind(L); 2159 } 2160 2161 void MacroAssembler::fld_d(AddressLiteral src) { 2162 fld_d(as_Address(src)); 2163 } 2164 2165 void MacroAssembler::fld_s(AddressLiteral src) { 2166 fld_s(as_Address(src)); 2167 } 2168 2169 void MacroAssembler::fldcw(AddressLiteral src) { 2170 fldcw(as_Address(src)); 2171 } 2172 2173 void MacroAssembler::fpop() { 2174 ffree(); 2175 fincstp(); 2176 } 2177 2178 void MacroAssembler::fremr(Register tmp) { 2179 save_rax(tmp); 2180 { Label L; 2181 bind(L); 2182 fprem(); 2183 fwait(); fnstsw_ax(); 2184 sahf(); 2185 jcc(Assembler::parity, L); 2186 } 2187 restore_rax(tmp); 2188 // Result is in ST0. 2189 // Note: fxch & fpop to get rid of ST1 2190 // (otherwise FPU stack could overflow eventually) 2191 fxch(1); 2192 fpop(); 2193 } 2194 2195 void MacroAssembler::empty_FPU_stack() { 2196 if (VM_Version::supports_mmx()) { 2197 emms(); 2198 } else { 2199 for (int i = 8; i-- > 0; ) ffree(i); 2200 } 2201 } 2202 #endif // !LP64 2203 2204 void MacroAssembler::mulpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2205 assert(rscratch != noreg || always_reachable(src), "missing"); 2206 if (reachable(src)) { 2207 Assembler::mulpd(dst, as_Address(src)); 2208 } else { 2209 lea(rscratch, src); 2210 Assembler::mulpd(dst, Address(rscratch, 0)); 2211 } 2212 } 2213 2214 void MacroAssembler::load_float(Address src) { 2215 #ifdef _LP64 2216 movflt(xmm0, src); 2217 #else 2218 if (UseSSE >= 1) { 2219 movflt(xmm0, src); 2220 } else { 2221 fld_s(src); 2222 } 2223 #endif // LP64 2224 } 2225 2226 void MacroAssembler::store_float(Address dst) { 2227 #ifdef _LP64 2228 movflt(dst, xmm0); 2229 #else 2230 if (UseSSE >= 1) { 2231 movflt(dst, xmm0); 2232 } else { 2233 fstp_s(dst); 2234 } 2235 #endif // LP64 2236 } 2237 2238 void MacroAssembler::load_double(Address src) { 2239 #ifdef _LP64 2240 movdbl(xmm0, src); 2241 #else 2242 if (UseSSE >= 2) { 2243 movdbl(xmm0, src); 2244 } else { 2245 fld_d(src); 2246 } 2247 #endif // LP64 2248 } 2249 2250 void MacroAssembler::store_double(Address dst) { 2251 #ifdef _LP64 2252 movdbl(dst, xmm0); 2253 #else 2254 if (UseSSE >= 2) { 2255 movdbl(dst, xmm0); 2256 } else { 2257 fstp_d(dst); 2258 } 2259 #endif // LP64 2260 } 2261 2262 // dst = c = a * b + c 2263 void MacroAssembler::fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c) { 2264 Assembler::vfmadd231sd(c, a, b); 2265 if (dst != c) { 2266 movdbl(dst, c); 2267 } 2268 } 2269 2270 // dst = c = a * b + c 2271 void MacroAssembler::fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c) { 2272 Assembler::vfmadd231ss(c, a, b); 2273 if (dst != c) { 2274 movflt(dst, c); 2275 } 2276 } 2277 2278 // dst = c = a * b + c 2279 void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) { 2280 Assembler::vfmadd231pd(c, a, b, vector_len); 2281 if (dst != c) { 2282 vmovdqu(dst, c); 2283 } 2284 } 2285 2286 // dst = c = a * b + c 2287 void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) { 2288 Assembler::vfmadd231ps(c, a, b, vector_len); 2289 if (dst != c) { 2290 vmovdqu(dst, c); 2291 } 2292 } 2293 2294 // dst = c = a * b + c 2295 void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) { 2296 Assembler::vfmadd231pd(c, a, b, vector_len); 2297 if (dst != c) { 2298 vmovdqu(dst, c); 2299 } 2300 } 2301 2302 // dst = c = a * b + c 2303 void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) { 2304 Assembler::vfmadd231ps(c, a, b, vector_len); 2305 if (dst != c) { 2306 vmovdqu(dst, c); 2307 } 2308 } 2309 2310 void MacroAssembler::incrementl(AddressLiteral dst, Register rscratch) { 2311 assert(rscratch != noreg || always_reachable(dst), "missing"); 2312 2313 if (reachable(dst)) { 2314 incrementl(as_Address(dst)); 2315 } else { 2316 lea(rscratch, dst); 2317 incrementl(Address(rscratch, 0)); 2318 } 2319 } 2320 2321 void MacroAssembler::incrementl(ArrayAddress dst, Register rscratch) { 2322 incrementl(as_Address(dst, rscratch)); 2323 } 2324 2325 void MacroAssembler::incrementl(Register reg, int value) { 2326 if (value == min_jint) {addl(reg, value) ; return; } 2327 if (value < 0) { decrementl(reg, -value); return; } 2328 if (value == 0) { ; return; } 2329 if (value == 1 && UseIncDec) { incl(reg) ; return; } 2330 /* else */ { addl(reg, value) ; return; } 2331 } 2332 2333 void MacroAssembler::incrementl(Address dst, int value) { 2334 if (value == min_jint) {addl(dst, value) ; return; } 2335 if (value < 0) { decrementl(dst, -value); return; } 2336 if (value == 0) { ; return; } 2337 if (value == 1 && UseIncDec) { incl(dst) ; return; } 2338 /* else */ { addl(dst, value) ; return; } 2339 } 2340 2341 void MacroAssembler::jump(AddressLiteral dst, Register rscratch) { 2342 assert(rscratch != noreg || always_reachable(dst), "missing"); 2343 2344 if (reachable(dst)) { 2345 jmp_literal(dst.target(), dst.rspec()); 2346 } else { 2347 lea(rscratch, dst); 2348 jmp(rscratch); 2349 } 2350 } 2351 2352 void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst, Register rscratch) { 2353 assert(rscratch != noreg || always_reachable(dst), "missing"); 2354 2355 if (reachable(dst)) { 2356 InstructionMark im(this); 2357 relocate(dst.reloc()); 2358 const int short_size = 2; 2359 const int long_size = 6; 2360 int offs = (intptr_t)dst.target() - ((intptr_t)pc()); 2361 if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) { 2362 // 0111 tttn #8-bit disp 2363 emit_int8(0x70 | cc); 2364 emit_int8((offs - short_size) & 0xFF); 2365 } else { 2366 // 0000 1111 1000 tttn #32-bit disp 2367 emit_int8(0x0F); 2368 emit_int8((unsigned char)(0x80 | cc)); 2369 emit_int32(offs - long_size); 2370 } 2371 } else { 2372 #ifdef ASSERT 2373 warning("reversing conditional branch"); 2374 #endif /* ASSERT */ 2375 Label skip; 2376 jccb(reverse[cc], skip); 2377 lea(rscratch, dst); 2378 Assembler::jmp(rscratch); 2379 bind(skip); 2380 } 2381 } 2382 2383 void MacroAssembler::ldmxcsr(AddressLiteral src, Register rscratch) { 2384 assert(rscratch != noreg || always_reachable(src), "missing"); 2385 2386 if (reachable(src)) { 2387 Assembler::ldmxcsr(as_Address(src)); 2388 } else { 2389 lea(rscratch, src); 2390 Assembler::ldmxcsr(Address(rscratch, 0)); 2391 } 2392 } 2393 2394 int MacroAssembler::load_signed_byte(Register dst, Address src) { 2395 int off; 2396 if (LP64_ONLY(true ||) VM_Version::is_P6()) { 2397 off = offset(); 2398 movsbl(dst, src); // movsxb 2399 } else { 2400 off = load_unsigned_byte(dst, src); 2401 shll(dst, 24); 2402 sarl(dst, 24); 2403 } 2404 return off; 2405 } 2406 2407 // Note: load_signed_short used to be called load_signed_word. 2408 // Although the 'w' in x86 opcodes refers to the term "word" in the assembler 2409 // manual, which means 16 bits, that usage is found nowhere in HotSpot code. 2410 // The term "word" in HotSpot means a 32- or 64-bit machine word. 2411 int MacroAssembler::load_signed_short(Register dst, Address src) { 2412 int off; 2413 if (LP64_ONLY(true ||) VM_Version::is_P6()) { 2414 // This is dubious to me since it seems safe to do a signed 16 => 64 bit 2415 // version but this is what 64bit has always done. This seems to imply 2416 // that users are only using 32bits worth. 2417 off = offset(); 2418 movswl(dst, src); // movsxw 2419 } else { 2420 off = load_unsigned_short(dst, src); 2421 shll(dst, 16); 2422 sarl(dst, 16); 2423 } 2424 return off; 2425 } 2426 2427 int MacroAssembler::load_unsigned_byte(Register dst, Address src) { 2428 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16, 2429 // and "3.9 Partial Register Penalties", p. 22). 2430 int off; 2431 if (LP64_ONLY(true || ) VM_Version::is_P6() || src.uses(dst)) { 2432 off = offset(); 2433 movzbl(dst, src); // movzxb 2434 } else { 2435 xorl(dst, dst); 2436 off = offset(); 2437 movb(dst, src); 2438 } 2439 return off; 2440 } 2441 2442 // Note: load_unsigned_short used to be called load_unsigned_word. 2443 int MacroAssembler::load_unsigned_short(Register dst, Address src) { 2444 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16, 2445 // and "3.9 Partial Register Penalties", p. 22). 2446 int off; 2447 if (LP64_ONLY(true ||) VM_Version::is_P6() || src.uses(dst)) { 2448 off = offset(); 2449 movzwl(dst, src); // movzxw 2450 } else { 2451 xorl(dst, dst); 2452 off = offset(); 2453 movw(dst, src); 2454 } 2455 return off; 2456 } 2457 2458 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) { 2459 switch (size_in_bytes) { 2460 #ifndef _LP64 2461 case 8: 2462 assert(dst2 != noreg, "second dest register required"); 2463 movl(dst, src); 2464 movl(dst2, src.plus_disp(BytesPerInt)); 2465 break; 2466 #else 2467 case 8: movq(dst, src); break; 2468 #endif 2469 case 4: movl(dst, src); break; 2470 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break; 2471 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break; 2472 default: ShouldNotReachHere(); 2473 } 2474 } 2475 2476 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) { 2477 switch (size_in_bytes) { 2478 #ifndef _LP64 2479 case 8: 2480 assert(src2 != noreg, "second source register required"); 2481 movl(dst, src); 2482 movl(dst.plus_disp(BytesPerInt), src2); 2483 break; 2484 #else 2485 case 8: movq(dst, src); break; 2486 #endif 2487 case 4: movl(dst, src); break; 2488 case 2: movw(dst, src); break; 2489 case 1: movb(dst, src); break; 2490 default: ShouldNotReachHere(); 2491 } 2492 } 2493 2494 void MacroAssembler::mov32(AddressLiteral dst, Register src, Register rscratch) { 2495 assert(rscratch != noreg || always_reachable(dst), "missing"); 2496 2497 if (reachable(dst)) { 2498 movl(as_Address(dst), src); 2499 } else { 2500 lea(rscratch, dst); 2501 movl(Address(rscratch, 0), src); 2502 } 2503 } 2504 2505 void MacroAssembler::mov32(Register dst, AddressLiteral src) { 2506 if (reachable(src)) { 2507 movl(dst, as_Address(src)); 2508 } else { 2509 lea(dst, src); 2510 movl(dst, Address(dst, 0)); 2511 } 2512 } 2513 2514 // C++ bool manipulation 2515 2516 void MacroAssembler::movbool(Register dst, Address src) { 2517 if(sizeof(bool) == 1) 2518 movb(dst, src); 2519 else if(sizeof(bool) == 2) 2520 movw(dst, src); 2521 else if(sizeof(bool) == 4) 2522 movl(dst, src); 2523 else 2524 // unsupported 2525 ShouldNotReachHere(); 2526 } 2527 2528 void MacroAssembler::movbool(Address dst, bool boolconst) { 2529 if(sizeof(bool) == 1) 2530 movb(dst, (int) boolconst); 2531 else if(sizeof(bool) == 2) 2532 movw(dst, (int) boolconst); 2533 else if(sizeof(bool) == 4) 2534 movl(dst, (int) boolconst); 2535 else 2536 // unsupported 2537 ShouldNotReachHere(); 2538 } 2539 2540 void MacroAssembler::movbool(Address dst, Register src) { 2541 if(sizeof(bool) == 1) 2542 movb(dst, src); 2543 else if(sizeof(bool) == 2) 2544 movw(dst, src); 2545 else if(sizeof(bool) == 4) 2546 movl(dst, src); 2547 else 2548 // unsupported 2549 ShouldNotReachHere(); 2550 } 2551 2552 void MacroAssembler::movdl(XMMRegister dst, AddressLiteral src, Register rscratch) { 2553 assert(rscratch != noreg || always_reachable(src), "missing"); 2554 2555 if (reachable(src)) { 2556 movdl(dst, as_Address(src)); 2557 } else { 2558 lea(rscratch, src); 2559 movdl(dst, Address(rscratch, 0)); 2560 } 2561 } 2562 2563 void MacroAssembler::movq(XMMRegister dst, AddressLiteral src, Register rscratch) { 2564 assert(rscratch != noreg || always_reachable(src), "missing"); 2565 2566 if (reachable(src)) { 2567 movq(dst, as_Address(src)); 2568 } else { 2569 lea(rscratch, src); 2570 movq(dst, Address(rscratch, 0)); 2571 } 2572 } 2573 2574 void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src, Register rscratch) { 2575 assert(rscratch != noreg || always_reachable(src), "missing"); 2576 2577 if (reachable(src)) { 2578 if (UseXmmLoadAndClearUpper) { 2579 movsd (dst, as_Address(src)); 2580 } else { 2581 movlpd(dst, as_Address(src)); 2582 } 2583 } else { 2584 lea(rscratch, src); 2585 if (UseXmmLoadAndClearUpper) { 2586 movsd (dst, Address(rscratch, 0)); 2587 } else { 2588 movlpd(dst, Address(rscratch, 0)); 2589 } 2590 } 2591 } 2592 2593 void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src, Register rscratch) { 2594 assert(rscratch != noreg || always_reachable(src), "missing"); 2595 2596 if (reachable(src)) { 2597 movss(dst, as_Address(src)); 2598 } else { 2599 lea(rscratch, src); 2600 movss(dst, Address(rscratch, 0)); 2601 } 2602 } 2603 2604 void MacroAssembler::movptr(Register dst, Register src) { 2605 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); 2606 } 2607 2608 void MacroAssembler::movptr(Register dst, Address src) { 2609 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); 2610 } 2611 2612 // src should NEVER be a real pointer. Use AddressLiteral for true pointers 2613 void MacroAssembler::movptr(Register dst, intptr_t src) { 2614 #ifdef _LP64 2615 if (is_uimm32(src)) { 2616 movl(dst, checked_cast<uint32_t>(src)); 2617 } else if (is_simm32(src)) { 2618 movq(dst, checked_cast<int32_t>(src)); 2619 } else { 2620 mov64(dst, src); 2621 } 2622 #else 2623 movl(dst, src); 2624 #endif 2625 } 2626 2627 void MacroAssembler::movptr(Address dst, Register src) { 2628 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); 2629 } 2630 2631 void MacroAssembler::movptr(Address dst, int32_t src) { 2632 LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); 2633 } 2634 2635 void MacroAssembler::movdqu(Address dst, XMMRegister src) { 2636 assert(((src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2637 Assembler::movdqu(dst, src); 2638 } 2639 2640 void MacroAssembler::movdqu(XMMRegister dst, Address src) { 2641 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2642 Assembler::movdqu(dst, src); 2643 } 2644 2645 void MacroAssembler::movdqu(XMMRegister dst, XMMRegister src) { 2646 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2647 Assembler::movdqu(dst, src); 2648 } 2649 2650 void MacroAssembler::movdqu(XMMRegister dst, AddressLiteral src, Register rscratch) { 2651 assert(rscratch != noreg || always_reachable(src), "missing"); 2652 2653 if (reachable(src)) { 2654 movdqu(dst, as_Address(src)); 2655 } else { 2656 lea(rscratch, src); 2657 movdqu(dst, Address(rscratch, 0)); 2658 } 2659 } 2660 2661 void MacroAssembler::vmovdqu(Address dst, XMMRegister src) { 2662 assert(((src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2663 Assembler::vmovdqu(dst, src); 2664 } 2665 2666 void MacroAssembler::vmovdqu(XMMRegister dst, Address src) { 2667 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2668 Assembler::vmovdqu(dst, src); 2669 } 2670 2671 void MacroAssembler::vmovdqu(XMMRegister dst, XMMRegister src) { 2672 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2673 Assembler::vmovdqu(dst, src); 2674 } 2675 2676 void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src, Register rscratch) { 2677 assert(rscratch != noreg || always_reachable(src), "missing"); 2678 2679 if (reachable(src)) { 2680 vmovdqu(dst, as_Address(src)); 2681 } 2682 else { 2683 lea(rscratch, src); 2684 vmovdqu(dst, Address(rscratch, 0)); 2685 } 2686 } 2687 2688 void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2689 assert(rscratch != noreg || always_reachable(src), "missing"); 2690 2691 if (vector_len == AVX_512bit) { 2692 evmovdquq(dst, src, AVX_512bit, rscratch); 2693 } else if (vector_len == AVX_256bit) { 2694 vmovdqu(dst, src, rscratch); 2695 } else { 2696 movdqu(dst, src, rscratch); 2697 } 2698 } 2699 2700 void MacroAssembler::kmov(KRegister dst, Address src) { 2701 if (VM_Version::supports_avx512bw()) { 2702 kmovql(dst, src); 2703 } else { 2704 assert(VM_Version::supports_evex(), ""); 2705 kmovwl(dst, src); 2706 } 2707 } 2708 2709 void MacroAssembler::kmov(Address dst, KRegister src) { 2710 if (VM_Version::supports_avx512bw()) { 2711 kmovql(dst, src); 2712 } else { 2713 assert(VM_Version::supports_evex(), ""); 2714 kmovwl(dst, src); 2715 } 2716 } 2717 2718 void MacroAssembler::kmov(KRegister dst, KRegister src) { 2719 if (VM_Version::supports_avx512bw()) { 2720 kmovql(dst, src); 2721 } else { 2722 assert(VM_Version::supports_evex(), ""); 2723 kmovwl(dst, src); 2724 } 2725 } 2726 2727 void MacroAssembler::kmov(Register dst, KRegister src) { 2728 if (VM_Version::supports_avx512bw()) { 2729 kmovql(dst, src); 2730 } else { 2731 assert(VM_Version::supports_evex(), ""); 2732 kmovwl(dst, src); 2733 } 2734 } 2735 2736 void MacroAssembler::kmov(KRegister dst, Register src) { 2737 if (VM_Version::supports_avx512bw()) { 2738 kmovql(dst, src); 2739 } else { 2740 assert(VM_Version::supports_evex(), ""); 2741 kmovwl(dst, src); 2742 } 2743 } 2744 2745 void MacroAssembler::kmovql(KRegister dst, AddressLiteral src, Register rscratch) { 2746 assert(rscratch != noreg || always_reachable(src), "missing"); 2747 2748 if (reachable(src)) { 2749 kmovql(dst, as_Address(src)); 2750 } else { 2751 lea(rscratch, src); 2752 kmovql(dst, Address(rscratch, 0)); 2753 } 2754 } 2755 2756 void MacroAssembler::kmovwl(KRegister dst, AddressLiteral src, Register rscratch) { 2757 assert(rscratch != noreg || always_reachable(src), "missing"); 2758 2759 if (reachable(src)) { 2760 kmovwl(dst, as_Address(src)); 2761 } else { 2762 lea(rscratch, src); 2763 kmovwl(dst, Address(rscratch, 0)); 2764 } 2765 } 2766 2767 void MacroAssembler::evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, 2768 int vector_len, Register rscratch) { 2769 assert(rscratch != noreg || always_reachable(src), "missing"); 2770 2771 if (reachable(src)) { 2772 Assembler::evmovdqub(dst, mask, as_Address(src), merge, vector_len); 2773 } else { 2774 lea(rscratch, src); 2775 Assembler::evmovdqub(dst, mask, Address(rscratch, 0), merge, vector_len); 2776 } 2777 } 2778 2779 void MacroAssembler::evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, 2780 int vector_len, Register rscratch) { 2781 assert(rscratch != noreg || always_reachable(src), "missing"); 2782 2783 if (reachable(src)) { 2784 Assembler::evmovdquw(dst, mask, as_Address(src), merge, vector_len); 2785 } else { 2786 lea(rscratch, src); 2787 Assembler::evmovdquw(dst, mask, Address(rscratch, 0), merge, vector_len); 2788 } 2789 } 2790 2791 void MacroAssembler::evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) { 2792 assert(rscratch != noreg || always_reachable(src), "missing"); 2793 2794 if (reachable(src)) { 2795 Assembler::evmovdqul(dst, mask, as_Address(src), merge, vector_len); 2796 } else { 2797 lea(rscratch, src); 2798 Assembler::evmovdqul(dst, mask, Address(rscratch, 0), merge, vector_len); 2799 } 2800 } 2801 2802 void MacroAssembler::evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) { 2803 assert(rscratch != noreg || always_reachable(src), "missing"); 2804 2805 if (reachable(src)) { 2806 Assembler::evmovdquq(dst, mask, as_Address(src), merge, vector_len); 2807 } else { 2808 lea(rscratch, src); 2809 Assembler::evmovdquq(dst, mask, Address(rscratch, 0), merge, vector_len); 2810 } 2811 } 2812 2813 void MacroAssembler::evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2814 assert(rscratch != noreg || always_reachable(src), "missing"); 2815 2816 if (reachable(src)) { 2817 Assembler::evmovdquq(dst, as_Address(src), vector_len); 2818 } else { 2819 lea(rscratch, src); 2820 Assembler::evmovdquq(dst, Address(rscratch, 0), vector_len); 2821 } 2822 } 2823 2824 void MacroAssembler::movdqa(XMMRegister dst, AddressLiteral src, Register rscratch) { 2825 assert(rscratch != noreg || always_reachable(src), "missing"); 2826 2827 if (reachable(src)) { 2828 Assembler::movdqa(dst, as_Address(src)); 2829 } else { 2830 lea(rscratch, src); 2831 Assembler::movdqa(dst, Address(rscratch, 0)); 2832 } 2833 } 2834 2835 void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2836 assert(rscratch != noreg || always_reachable(src), "missing"); 2837 2838 if (reachable(src)) { 2839 Assembler::movsd(dst, as_Address(src)); 2840 } else { 2841 lea(rscratch, src); 2842 Assembler::movsd(dst, Address(rscratch, 0)); 2843 } 2844 } 2845 2846 void MacroAssembler::movss(XMMRegister dst, AddressLiteral src, Register rscratch) { 2847 assert(rscratch != noreg || always_reachable(src), "missing"); 2848 2849 if (reachable(src)) { 2850 Assembler::movss(dst, as_Address(src)); 2851 } else { 2852 lea(rscratch, src); 2853 Assembler::movss(dst, Address(rscratch, 0)); 2854 } 2855 } 2856 2857 void MacroAssembler::movddup(XMMRegister dst, AddressLiteral src, Register rscratch) { 2858 assert(rscratch != noreg || always_reachable(src), "missing"); 2859 2860 if (reachable(src)) { 2861 Assembler::movddup(dst, as_Address(src)); 2862 } else { 2863 lea(rscratch, src); 2864 Assembler::movddup(dst, Address(rscratch, 0)); 2865 } 2866 } 2867 2868 void MacroAssembler::vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2869 assert(rscratch != noreg || always_reachable(src), "missing"); 2870 2871 if (reachable(src)) { 2872 Assembler::vmovddup(dst, as_Address(src), vector_len); 2873 } else { 2874 lea(rscratch, src); 2875 Assembler::vmovddup(dst, Address(rscratch, 0), vector_len); 2876 } 2877 } 2878 2879 void MacroAssembler::mulsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2880 assert(rscratch != noreg || always_reachable(src), "missing"); 2881 2882 if (reachable(src)) { 2883 Assembler::mulsd(dst, as_Address(src)); 2884 } else { 2885 lea(rscratch, src); 2886 Assembler::mulsd(dst, Address(rscratch, 0)); 2887 } 2888 } 2889 2890 void MacroAssembler::mulss(XMMRegister dst, AddressLiteral src, Register rscratch) { 2891 assert(rscratch != noreg || always_reachable(src), "missing"); 2892 2893 if (reachable(src)) { 2894 Assembler::mulss(dst, as_Address(src)); 2895 } else { 2896 lea(rscratch, src); 2897 Assembler::mulss(dst, Address(rscratch, 0)); 2898 } 2899 } 2900 2901 void MacroAssembler::null_check(Register reg, int offset) { 2902 if (needs_explicit_null_check(offset)) { 2903 // provoke OS null exception if reg is null by 2904 // accessing M[reg] w/o changing any (non-CC) registers 2905 // NOTE: cmpl is plenty here to provoke a segv 2906 cmpptr(rax, Address(reg, 0)); 2907 // Note: should probably use testl(rax, Address(reg, 0)); 2908 // may be shorter code (however, this version of 2909 // testl needs to be implemented first) 2910 } else { 2911 // nothing to do, (later) access of M[reg + offset] 2912 // will provoke OS null exception if reg is null 2913 } 2914 } 2915 2916 void MacroAssembler::test_markword_is_inline_type(Register markword, Label& is_inline_type) { 2917 andptr(markword, markWord::inline_type_mask_in_place); 2918 cmpptr(markword, markWord::inline_type_pattern); 2919 jcc(Assembler::equal, is_inline_type); 2920 } 2921 2922 void MacroAssembler::test_klass_is_inline_type(Register klass, Register temp_reg, Label& is_inline_type) { 2923 movl(temp_reg, Address(klass, Klass::access_flags_offset())); 2924 testl(temp_reg, JVM_ACC_IDENTITY); 2925 jcc(Assembler::zero, is_inline_type); 2926 } 2927 2928 void MacroAssembler::test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type) { 2929 testptr(object, object); 2930 jcc(Assembler::zero, not_inline_type); 2931 const int is_inline_type_mask = markWord::inline_type_pattern; 2932 movptr(tmp, Address(object, oopDesc::mark_offset_in_bytes())); 2933 andptr(tmp, is_inline_type_mask); 2934 cmpptr(tmp, is_inline_type_mask); 2935 jcc(Assembler::notEqual, not_inline_type); 2936 } 2937 2938 void MacroAssembler::test_klass_is_empty_inline_type(Register klass, Register temp_reg, Label& is_empty_inline_type) { 2939 #ifdef ASSERT 2940 { 2941 Label done_check; 2942 test_klass_is_inline_type(klass, temp_reg, done_check); 2943 stop("test_klass_is_empty_inline_type with non inline type klass"); 2944 bind(done_check); 2945 } 2946 #endif 2947 movl(temp_reg, Address(klass, InstanceKlass::misc_flags_offset())); 2948 testl(temp_reg, InstanceKlassFlags::is_empty_inline_type_value()); 2949 jcc(Assembler::notZero, is_empty_inline_type); 2950 } 2951 2952 void MacroAssembler::test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free_inline_type) { 2953 movl(temp_reg, flags); 2954 testl(temp_reg, 1 << ResolvedFieldEntry::is_null_free_inline_type_shift); 2955 jcc(Assembler::notEqual, is_null_free_inline_type); 2956 } 2957 2958 void MacroAssembler::test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free_inline_type) { 2959 movl(temp_reg, flags); 2960 testl(temp_reg, 1 << ResolvedFieldEntry::is_null_free_inline_type_shift); 2961 jcc(Assembler::equal, not_null_free_inline_type); 2962 } 2963 2964 void MacroAssembler::test_field_is_flat(Register flags, Register temp_reg, Label& is_flat) { 2965 movl(temp_reg, flags); 2966 testl(temp_reg, 1 << ResolvedFieldEntry::is_flat_shift); 2967 jcc(Assembler::notEqual, is_flat); 2968 } 2969 2970 void MacroAssembler::test_field_has_null_marker(Register flags, Register temp_reg, Label& has_null_marker) { 2971 movl(temp_reg, flags); 2972 testl(temp_reg, 1 << ResolvedFieldEntry::has_null_marker_shift); 2973 jcc(Assembler::notEqual, has_null_marker); 2974 } 2975 2976 void MacroAssembler::test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label) { 2977 Label test_mark_word; 2978 // load mark word 2979 movptr(temp_reg, Address(oop, oopDesc::mark_offset_in_bytes())); 2980 // check displaced 2981 testl(temp_reg, markWord::unlocked_value); 2982 jccb(Assembler::notZero, test_mark_word); 2983 // slow path use klass prototype 2984 push(rscratch1); 2985 load_prototype_header(temp_reg, oop, rscratch1); 2986 pop(rscratch1); 2987 2988 bind(test_mark_word); 2989 testl(temp_reg, test_bit); 2990 jcc((jmp_set) ? Assembler::notZero : Assembler::zero, jmp_label); 2991 } 2992 2993 void MacroAssembler::test_flat_array_oop(Register oop, Register temp_reg, 2994 Label& is_flat_array) { 2995 #ifdef _LP64 2996 test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, true, is_flat_array); 2997 #else 2998 load_klass(temp_reg, oop, noreg); 2999 movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset())); 3000 test_flat_array_layout(temp_reg, is_flat_array); 3001 #endif 3002 } 3003 3004 void MacroAssembler::test_non_flat_array_oop(Register oop, Register temp_reg, 3005 Label& is_non_flat_array) { 3006 #ifdef _LP64 3007 test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, false, is_non_flat_array); 3008 #else 3009 load_klass(temp_reg, oop, noreg); 3010 movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset())); 3011 test_non_flat_array_layout(temp_reg, is_non_flat_array); 3012 #endif 3013 } 3014 3015 void MacroAssembler::test_null_free_array_oop(Register oop, Register temp_reg, Label&is_null_free_array) { 3016 #ifdef _LP64 3017 test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, true, is_null_free_array); 3018 #else 3019 Unimplemented(); 3020 #endif 3021 } 3022 3023 void MacroAssembler::test_non_null_free_array_oop(Register oop, Register temp_reg, Label&is_non_null_free_array) { 3024 #ifdef _LP64 3025 test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, false, is_non_null_free_array); 3026 #else 3027 Unimplemented(); 3028 #endif 3029 } 3030 3031 void MacroAssembler::test_flat_array_layout(Register lh, Label& is_flat_array) { 3032 testl(lh, Klass::_lh_array_tag_flat_value_bit_inplace); 3033 jcc(Assembler::notZero, is_flat_array); 3034 } 3035 3036 void MacroAssembler::test_non_flat_array_layout(Register lh, Label& is_non_flat_array) { 3037 testl(lh, Klass::_lh_array_tag_flat_value_bit_inplace); 3038 jcc(Assembler::zero, is_non_flat_array); 3039 } 3040 3041 void MacroAssembler::os_breakpoint() { 3042 // instead of directly emitting a breakpoint, call os:breakpoint for better debugability 3043 // (e.g., MSVC can't call ps() otherwise) 3044 call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint))); 3045 } 3046 3047 void MacroAssembler::unimplemented(const char* what) { 3048 const char* buf = nullptr; 3049 { 3050 ResourceMark rm; 3051 stringStream ss; 3052 ss.print("unimplemented: %s", what); 3053 buf = code_string(ss.as_string()); 3054 } 3055 stop(buf); 3056 } 3057 3058 #ifdef _LP64 3059 #define XSTATE_BV 0x200 3060 #endif 3061 3062 void MacroAssembler::pop_CPU_state() { 3063 pop_FPU_state(); 3064 pop_IU_state(); 3065 } 3066 3067 void MacroAssembler::pop_FPU_state() { 3068 #ifndef _LP64 3069 frstor(Address(rsp, 0)); 3070 #else 3071 fxrstor(Address(rsp, 0)); 3072 #endif 3073 addptr(rsp, FPUStateSizeInWords * wordSize); 3074 } 3075 3076 void MacroAssembler::pop_IU_state() { 3077 popa(); 3078 LP64_ONLY(addq(rsp, 8)); 3079 popf(); 3080 } 3081 3082 // Save Integer and Float state 3083 // Warning: Stack must be 16 byte aligned (64bit) 3084 void MacroAssembler::push_CPU_state() { 3085 push_IU_state(); 3086 push_FPU_state(); 3087 } 3088 3089 void MacroAssembler::push_FPU_state() { 3090 subptr(rsp, FPUStateSizeInWords * wordSize); 3091 #ifndef _LP64 3092 fnsave(Address(rsp, 0)); 3093 fwait(); 3094 #else 3095 fxsave(Address(rsp, 0)); 3096 #endif // LP64 3097 } 3098 3099 void MacroAssembler::push_IU_state() { 3100 // Push flags first because pusha kills them 3101 pushf(); 3102 // Make sure rsp stays 16-byte aligned 3103 LP64_ONLY(subq(rsp, 8)); 3104 pusha(); 3105 } 3106 3107 void MacroAssembler::push_cont_fastpath() { 3108 if (!Continuations::enabled()) return; 3109 3110 #ifndef _LP64 3111 Register rthread = rax; 3112 Register rrealsp = rbx; 3113 push(rthread); 3114 push(rrealsp); 3115 3116 get_thread(rthread); 3117 3118 // The code below wants the original RSP. 3119 // Move it back after the pushes above. 3120 movptr(rrealsp, rsp); 3121 addptr(rrealsp, 2*wordSize); 3122 #else 3123 Register rthread = r15_thread; 3124 Register rrealsp = rsp; 3125 #endif 3126 3127 Label done; 3128 cmpptr(rrealsp, Address(rthread, JavaThread::cont_fastpath_offset())); 3129 jccb(Assembler::belowEqual, done); 3130 movptr(Address(rthread, JavaThread::cont_fastpath_offset()), rrealsp); 3131 bind(done); 3132 3133 #ifndef _LP64 3134 pop(rrealsp); 3135 pop(rthread); 3136 #endif 3137 } 3138 3139 void MacroAssembler::pop_cont_fastpath() { 3140 if (!Continuations::enabled()) return; 3141 3142 #ifndef _LP64 3143 Register rthread = rax; 3144 Register rrealsp = rbx; 3145 push(rthread); 3146 push(rrealsp); 3147 3148 get_thread(rthread); 3149 3150 // The code below wants the original RSP. 3151 // Move it back after the pushes above. 3152 movptr(rrealsp, rsp); 3153 addptr(rrealsp, 2*wordSize); 3154 #else 3155 Register rthread = r15_thread; 3156 Register rrealsp = rsp; 3157 #endif 3158 3159 Label done; 3160 cmpptr(rrealsp, Address(rthread, JavaThread::cont_fastpath_offset())); 3161 jccb(Assembler::below, done); 3162 movptr(Address(rthread, JavaThread::cont_fastpath_offset()), 0); 3163 bind(done); 3164 3165 #ifndef _LP64 3166 pop(rrealsp); 3167 pop(rthread); 3168 #endif 3169 } 3170 3171 void MacroAssembler::inc_held_monitor_count() { 3172 #ifndef _LP64 3173 Register thread = rax; 3174 push(thread); 3175 get_thread(thread); 3176 incrementl(Address(thread, JavaThread::held_monitor_count_offset())); 3177 pop(thread); 3178 #else // LP64 3179 incrementq(Address(r15_thread, JavaThread::held_monitor_count_offset())); 3180 #endif 3181 } 3182 3183 void MacroAssembler::dec_held_monitor_count() { 3184 #ifndef _LP64 3185 Register thread = rax; 3186 push(thread); 3187 get_thread(thread); 3188 decrementl(Address(thread, JavaThread::held_monitor_count_offset())); 3189 pop(thread); 3190 #else // LP64 3191 decrementq(Address(r15_thread, JavaThread::held_monitor_count_offset())); 3192 #endif 3193 } 3194 3195 #ifdef ASSERT 3196 void MacroAssembler::stop_if_in_cont(Register cont, const char* name) { 3197 #ifdef _LP64 3198 Label no_cont; 3199 movptr(cont, Address(r15_thread, JavaThread::cont_entry_offset())); 3200 testl(cont, cont); 3201 jcc(Assembler::zero, no_cont); 3202 stop(name); 3203 bind(no_cont); 3204 #else 3205 Unimplemented(); 3206 #endif 3207 } 3208 #endif 3209 3210 void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp) { // determine java_thread register 3211 if (!java_thread->is_valid()) { 3212 java_thread = rdi; 3213 get_thread(java_thread); 3214 } 3215 // we must set sp to zero to clear frame 3216 movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), NULL_WORD); 3217 // must clear fp, so that compiled frames are not confused; it is 3218 // possible that we need it only for debugging 3219 if (clear_fp) { 3220 movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), NULL_WORD); 3221 } 3222 // Always clear the pc because it could have been set by make_walkable() 3223 movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), NULL_WORD); 3224 vzeroupper(); 3225 } 3226 3227 void MacroAssembler::restore_rax(Register tmp) { 3228 if (tmp == noreg) pop(rax); 3229 else if (tmp != rax) mov(rax, tmp); 3230 } 3231 3232 void MacroAssembler::round_to(Register reg, int modulus) { 3233 addptr(reg, modulus - 1); 3234 andptr(reg, -modulus); 3235 } 3236 3237 void MacroAssembler::save_rax(Register tmp) { 3238 if (tmp == noreg) push(rax); 3239 else if (tmp != rax) mov(tmp, rax); 3240 } 3241 3242 void MacroAssembler::safepoint_poll(Label& slow_path, Register thread_reg, bool at_return, bool in_nmethod) { 3243 if (at_return) { 3244 // Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore, 3245 // we may safely use rsp instead to perform the stack watermark check. 3246 cmpptr(in_nmethod ? rsp : rbp, Address(thread_reg, JavaThread::polling_word_offset())); 3247 jcc(Assembler::above, slow_path); 3248 return; 3249 } 3250 testb(Address(thread_reg, JavaThread::polling_word_offset()), SafepointMechanism::poll_bit()); 3251 jcc(Assembler::notZero, slow_path); // handshake bit set implies poll 3252 } 3253 3254 // Calls to C land 3255 // 3256 // When entering C land, the rbp, & rsp of the last Java frame have to be recorded 3257 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp 3258 // has to be reset to 0. This is required to allow proper stack traversal. 3259 void MacroAssembler::set_last_Java_frame(Register java_thread, 3260 Register last_java_sp, 3261 Register last_java_fp, 3262 address last_java_pc, 3263 Register rscratch) { 3264 vzeroupper(); 3265 // determine java_thread register 3266 if (!java_thread->is_valid()) { 3267 java_thread = rdi; 3268 get_thread(java_thread); 3269 } 3270 // determine last_java_sp register 3271 if (!last_java_sp->is_valid()) { 3272 last_java_sp = rsp; 3273 } 3274 // last_java_fp is optional 3275 if (last_java_fp->is_valid()) { 3276 movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), last_java_fp); 3277 } 3278 // last_java_pc is optional 3279 if (last_java_pc != nullptr) { 3280 Address java_pc(java_thread, 3281 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()); 3282 lea(java_pc, InternalAddress(last_java_pc), rscratch); 3283 } 3284 movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), last_java_sp); 3285 } 3286 3287 void MacroAssembler::shlptr(Register dst, int imm8) { 3288 LP64_ONLY(shlq(dst, imm8)) NOT_LP64(shll(dst, imm8)); 3289 } 3290 3291 void MacroAssembler::shrptr(Register dst, int imm8) { 3292 LP64_ONLY(shrq(dst, imm8)) NOT_LP64(shrl(dst, imm8)); 3293 } 3294 3295 void MacroAssembler::sign_extend_byte(Register reg) { 3296 if (LP64_ONLY(true ||) (VM_Version::is_P6() && reg->has_byte_register())) { 3297 movsbl(reg, reg); // movsxb 3298 } else { 3299 shll(reg, 24); 3300 sarl(reg, 24); 3301 } 3302 } 3303 3304 void MacroAssembler::sign_extend_short(Register reg) { 3305 if (LP64_ONLY(true ||) VM_Version::is_P6()) { 3306 movswl(reg, reg); // movsxw 3307 } else { 3308 shll(reg, 16); 3309 sarl(reg, 16); 3310 } 3311 } 3312 3313 void MacroAssembler::testl(Address dst, int32_t imm32) { 3314 if (imm32 >= 0 && is8bit(imm32)) { 3315 testb(dst, imm32); 3316 } else { 3317 Assembler::testl(dst, imm32); 3318 } 3319 } 3320 3321 void MacroAssembler::testl(Register dst, int32_t imm32) { 3322 if (imm32 >= 0 && is8bit(imm32) && dst->has_byte_register()) { 3323 testb(dst, imm32); 3324 } else { 3325 Assembler::testl(dst, imm32); 3326 } 3327 } 3328 3329 void MacroAssembler::testl(Register dst, AddressLiteral src) { 3330 assert(always_reachable(src), "Address should be reachable"); 3331 testl(dst, as_Address(src)); 3332 } 3333 3334 #ifdef _LP64 3335 3336 void MacroAssembler::testq(Address dst, int32_t imm32) { 3337 if (imm32 >= 0) { 3338 testl(dst, imm32); 3339 } else { 3340 Assembler::testq(dst, imm32); 3341 } 3342 } 3343 3344 void MacroAssembler::testq(Register dst, int32_t imm32) { 3345 if (imm32 >= 0) { 3346 testl(dst, imm32); 3347 } else { 3348 Assembler::testq(dst, imm32); 3349 } 3350 } 3351 3352 #endif 3353 3354 void MacroAssembler::pcmpeqb(XMMRegister dst, XMMRegister src) { 3355 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3356 Assembler::pcmpeqb(dst, src); 3357 } 3358 3359 void MacroAssembler::pcmpeqw(XMMRegister dst, XMMRegister src) { 3360 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3361 Assembler::pcmpeqw(dst, src); 3362 } 3363 3364 void MacroAssembler::pcmpestri(XMMRegister dst, Address src, int imm8) { 3365 assert((dst->encoding() < 16),"XMM register should be 0-15"); 3366 Assembler::pcmpestri(dst, src, imm8); 3367 } 3368 3369 void MacroAssembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) { 3370 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15"); 3371 Assembler::pcmpestri(dst, src, imm8); 3372 } 3373 3374 void MacroAssembler::pmovzxbw(XMMRegister dst, XMMRegister src) { 3375 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3376 Assembler::pmovzxbw(dst, src); 3377 } 3378 3379 void MacroAssembler::pmovzxbw(XMMRegister dst, Address src) { 3380 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3381 Assembler::pmovzxbw(dst, src); 3382 } 3383 3384 void MacroAssembler::pmovmskb(Register dst, XMMRegister src) { 3385 assert((src->encoding() < 16),"XMM register should be 0-15"); 3386 Assembler::pmovmskb(dst, src); 3387 } 3388 3389 void MacroAssembler::ptest(XMMRegister dst, XMMRegister src) { 3390 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15"); 3391 Assembler::ptest(dst, src); 3392 } 3393 3394 void MacroAssembler::sqrtss(XMMRegister dst, AddressLiteral src, Register rscratch) { 3395 assert(rscratch != noreg || always_reachable(src), "missing"); 3396 3397 if (reachable(src)) { 3398 Assembler::sqrtss(dst, as_Address(src)); 3399 } else { 3400 lea(rscratch, src); 3401 Assembler::sqrtss(dst, Address(rscratch, 0)); 3402 } 3403 } 3404 3405 void MacroAssembler::subsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 3406 assert(rscratch != noreg || always_reachable(src), "missing"); 3407 3408 if (reachable(src)) { 3409 Assembler::subsd(dst, as_Address(src)); 3410 } else { 3411 lea(rscratch, src); 3412 Assembler::subsd(dst, Address(rscratch, 0)); 3413 } 3414 } 3415 3416 void MacroAssembler::roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register rscratch) { 3417 assert(rscratch != noreg || always_reachable(src), "missing"); 3418 3419 if (reachable(src)) { 3420 Assembler::roundsd(dst, as_Address(src), rmode); 3421 } else { 3422 lea(rscratch, src); 3423 Assembler::roundsd(dst, Address(rscratch, 0), rmode); 3424 } 3425 } 3426 3427 void MacroAssembler::subss(XMMRegister dst, AddressLiteral src, Register rscratch) { 3428 assert(rscratch != noreg || always_reachable(src), "missing"); 3429 3430 if (reachable(src)) { 3431 Assembler::subss(dst, as_Address(src)); 3432 } else { 3433 lea(rscratch, src); 3434 Assembler::subss(dst, Address(rscratch, 0)); 3435 } 3436 } 3437 3438 void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch) { 3439 assert(rscratch != noreg || always_reachable(src), "missing"); 3440 3441 if (reachable(src)) { 3442 Assembler::ucomisd(dst, as_Address(src)); 3443 } else { 3444 lea(rscratch, src); 3445 Assembler::ucomisd(dst, Address(rscratch, 0)); 3446 } 3447 } 3448 3449 void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch) { 3450 assert(rscratch != noreg || always_reachable(src), "missing"); 3451 3452 if (reachable(src)) { 3453 Assembler::ucomiss(dst, as_Address(src)); 3454 } else { 3455 lea(rscratch, src); 3456 Assembler::ucomiss(dst, Address(rscratch, 0)); 3457 } 3458 } 3459 3460 void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 3461 assert(rscratch != noreg || always_reachable(src), "missing"); 3462 3463 // Used in sign-bit flipping with aligned address. 3464 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 3465 if (reachable(src)) { 3466 Assembler::xorpd(dst, as_Address(src)); 3467 } else { 3468 lea(rscratch, src); 3469 Assembler::xorpd(dst, Address(rscratch, 0)); 3470 } 3471 } 3472 3473 void MacroAssembler::xorpd(XMMRegister dst, XMMRegister src) { 3474 if (UseAVX > 2 && !VM_Version::supports_avx512dq() && (dst->encoding() == src->encoding())) { 3475 Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit); 3476 } 3477 else { 3478 Assembler::xorpd(dst, src); 3479 } 3480 } 3481 3482 void MacroAssembler::xorps(XMMRegister dst, XMMRegister src) { 3483 if (UseAVX > 2 && !VM_Version::supports_avx512dq() && (dst->encoding() == src->encoding())) { 3484 Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit); 3485 } else { 3486 Assembler::xorps(dst, src); 3487 } 3488 } 3489 3490 void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src, Register rscratch) { 3491 assert(rscratch != noreg || always_reachable(src), "missing"); 3492 3493 // Used in sign-bit flipping with aligned address. 3494 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 3495 if (reachable(src)) { 3496 Assembler::xorps(dst, as_Address(src)); 3497 } else { 3498 lea(rscratch, src); 3499 Assembler::xorps(dst, Address(rscratch, 0)); 3500 } 3501 } 3502 3503 void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src, Register rscratch) { 3504 assert(rscratch != noreg || always_reachable(src), "missing"); 3505 3506 // Used in sign-bit flipping with aligned address. 3507 bool aligned_adr = (((intptr_t)src.target() & 15) == 0); 3508 assert((UseAVX > 0) || aligned_adr, "SSE mode requires address alignment 16 bytes"); 3509 if (reachable(src)) { 3510 Assembler::pshufb(dst, as_Address(src)); 3511 } else { 3512 lea(rscratch, src); 3513 Assembler::pshufb(dst, Address(rscratch, 0)); 3514 } 3515 } 3516 3517 // AVX 3-operands instructions 3518 3519 void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3520 assert(rscratch != noreg || always_reachable(src), "missing"); 3521 3522 if (reachable(src)) { 3523 vaddsd(dst, nds, as_Address(src)); 3524 } else { 3525 lea(rscratch, src); 3526 vaddsd(dst, nds, Address(rscratch, 0)); 3527 } 3528 } 3529 3530 void MacroAssembler::vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3531 assert(rscratch != noreg || always_reachable(src), "missing"); 3532 3533 if (reachable(src)) { 3534 vaddss(dst, nds, as_Address(src)); 3535 } else { 3536 lea(rscratch, src); 3537 vaddss(dst, nds, Address(rscratch, 0)); 3538 } 3539 } 3540 3541 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3542 assert(UseAVX > 0, "requires some form of AVX"); 3543 assert(rscratch != noreg || always_reachable(src), "missing"); 3544 3545 if (reachable(src)) { 3546 Assembler::vpaddb(dst, nds, as_Address(src), vector_len); 3547 } else { 3548 lea(rscratch, src); 3549 Assembler::vpaddb(dst, nds, Address(rscratch, 0), vector_len); 3550 } 3551 } 3552 3553 void MacroAssembler::vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3554 assert(UseAVX > 0, "requires some form of AVX"); 3555 assert(rscratch != noreg || always_reachable(src), "missing"); 3556 3557 if (reachable(src)) { 3558 Assembler::vpaddd(dst, nds, as_Address(src), vector_len); 3559 } else { 3560 lea(rscratch, src); 3561 Assembler::vpaddd(dst, nds, Address(rscratch, 0), vector_len); 3562 } 3563 } 3564 3565 void MacroAssembler::vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch) { 3566 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 3567 assert(rscratch != noreg || always_reachable(negate_field), "missing"); 3568 3569 vandps(dst, nds, negate_field, vector_len, rscratch); 3570 } 3571 3572 void MacroAssembler::vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch) { 3573 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 3574 assert(rscratch != noreg || always_reachable(negate_field), "missing"); 3575 3576 vandpd(dst, nds, negate_field, vector_len, rscratch); 3577 } 3578 3579 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3580 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3581 Assembler::vpaddb(dst, nds, src, vector_len); 3582 } 3583 3584 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3585 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3586 Assembler::vpaddb(dst, nds, src, vector_len); 3587 } 3588 3589 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3590 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3591 Assembler::vpaddw(dst, nds, src, vector_len); 3592 } 3593 3594 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3595 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3596 Assembler::vpaddw(dst, nds, src, vector_len); 3597 } 3598 3599 void MacroAssembler::vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3600 assert(rscratch != noreg || always_reachable(src), "missing"); 3601 3602 if (reachable(src)) { 3603 Assembler::vpand(dst, nds, as_Address(src), vector_len); 3604 } else { 3605 lea(rscratch, src); 3606 Assembler::vpand(dst, nds, Address(rscratch, 0), vector_len); 3607 } 3608 } 3609 3610 void MacroAssembler::vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 3611 assert(rscratch != noreg || always_reachable(src), "missing"); 3612 3613 if (reachable(src)) { 3614 Assembler::vpbroadcastd(dst, as_Address(src), vector_len); 3615 } else { 3616 lea(rscratch, src); 3617 Assembler::vpbroadcastd(dst, Address(rscratch, 0), vector_len); 3618 } 3619 } 3620 3621 void MacroAssembler::vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 3622 assert(rscratch != noreg || always_reachable(src), "missing"); 3623 3624 if (reachable(src)) { 3625 Assembler::vpbroadcastq(dst, as_Address(src), vector_len); 3626 } else { 3627 lea(rscratch, src); 3628 Assembler::vpbroadcastq(dst, Address(rscratch, 0), vector_len); 3629 } 3630 } 3631 3632 void MacroAssembler::vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 3633 assert(rscratch != noreg || always_reachable(src), "missing"); 3634 3635 if (reachable(src)) { 3636 Assembler::vbroadcastsd(dst, as_Address(src), vector_len); 3637 } else { 3638 lea(rscratch, src); 3639 Assembler::vbroadcastsd(dst, Address(rscratch, 0), vector_len); 3640 } 3641 } 3642 3643 void MacroAssembler::vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 3644 assert(rscratch != noreg || always_reachable(src), "missing"); 3645 3646 if (reachable(src)) { 3647 Assembler::vbroadcastss(dst, as_Address(src), vector_len); 3648 } else { 3649 lea(rscratch, src); 3650 Assembler::vbroadcastss(dst, Address(rscratch, 0), vector_len); 3651 } 3652 } 3653 3654 // Vector float blend 3655 // vblendvps(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg) 3656 void MacroAssembler::vblendvps(XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister mask, int vector_len, bool compute_mask, XMMRegister scratch) { 3657 // WARN: Allow dst == (src1|src2), mask == scratch 3658 bool blend_emulation = EnableX86ECoreOpts && UseAVX > 1; 3659 bool scratch_available = scratch != xnoreg && scratch != src1 && scratch != src2 && scratch != dst; 3660 bool dst_available = dst != mask && (dst != src1 || dst != src2); 3661 if (blend_emulation && scratch_available && dst_available) { 3662 if (compute_mask) { 3663 vpsrad(scratch, mask, 32, vector_len); 3664 mask = scratch; 3665 } 3666 if (dst == src1) { 3667 vpandn(dst, mask, src1, vector_len); // if mask == 0, src1 3668 vpand (scratch, mask, src2, vector_len); // if mask == 1, src2 3669 } else { 3670 vpand (dst, mask, src2, vector_len); // if mask == 1, src2 3671 vpandn(scratch, mask, src1, vector_len); // if mask == 0, src1 3672 } 3673 vpor(dst, dst, scratch, vector_len); 3674 } else { 3675 Assembler::vblendvps(dst, src1, src2, mask, vector_len); 3676 } 3677 } 3678 3679 // vblendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg) 3680 void MacroAssembler::vblendvpd(XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister mask, int vector_len, bool compute_mask, XMMRegister scratch) { 3681 // WARN: Allow dst == (src1|src2), mask == scratch 3682 bool blend_emulation = EnableX86ECoreOpts && UseAVX > 1; 3683 bool scratch_available = scratch != xnoreg && scratch != src1 && scratch != src2 && scratch != dst && (!compute_mask || scratch != mask); 3684 bool dst_available = dst != mask && (dst != src1 || dst != src2); 3685 if (blend_emulation && scratch_available && dst_available) { 3686 if (compute_mask) { 3687 vpxor(scratch, scratch, scratch, vector_len); 3688 vpcmpgtq(scratch, scratch, mask, vector_len); 3689 mask = scratch; 3690 } 3691 if (dst == src1) { 3692 vpandn(dst, mask, src1, vector_len); // if mask == 0, src 3693 vpand (scratch, mask, src2, vector_len); // if mask == 1, src2 3694 } else { 3695 vpand (dst, mask, src2, vector_len); // if mask == 1, src2 3696 vpandn(scratch, mask, src1, vector_len); // if mask == 0, src 3697 } 3698 vpor(dst, dst, scratch, vector_len); 3699 } else { 3700 Assembler::vblendvpd(dst, src1, src2, mask, vector_len); 3701 } 3702 } 3703 3704 void MacroAssembler::vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3705 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3706 Assembler::vpcmpeqb(dst, nds, src, vector_len); 3707 } 3708 3709 void MacroAssembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3710 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3711 Assembler::vpcmpeqw(dst, nds, src, vector_len); 3712 } 3713 3714 void MacroAssembler::evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3715 assert(rscratch != noreg || always_reachable(src), "missing"); 3716 3717 if (reachable(src)) { 3718 Assembler::evpcmpeqd(kdst, mask, nds, as_Address(src), vector_len); 3719 } else { 3720 lea(rscratch, src); 3721 Assembler::evpcmpeqd(kdst, mask, nds, Address(rscratch, 0), vector_len); 3722 } 3723 } 3724 3725 void MacroAssembler::evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 3726 int comparison, bool is_signed, int vector_len, Register rscratch) { 3727 assert(rscratch != noreg || always_reachable(src), "missing"); 3728 3729 if (reachable(src)) { 3730 Assembler::evpcmpd(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 3731 } else { 3732 lea(rscratch, src); 3733 Assembler::evpcmpd(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 3734 } 3735 } 3736 3737 void MacroAssembler::evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 3738 int comparison, bool is_signed, int vector_len, Register rscratch) { 3739 assert(rscratch != noreg || always_reachable(src), "missing"); 3740 3741 if (reachable(src)) { 3742 Assembler::evpcmpq(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 3743 } else { 3744 lea(rscratch, src); 3745 Assembler::evpcmpq(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 3746 } 3747 } 3748 3749 void MacroAssembler::evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 3750 int comparison, bool is_signed, int vector_len, Register rscratch) { 3751 assert(rscratch != noreg || always_reachable(src), "missing"); 3752 3753 if (reachable(src)) { 3754 Assembler::evpcmpb(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 3755 } else { 3756 lea(rscratch, src); 3757 Assembler::evpcmpb(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 3758 } 3759 } 3760 3761 void MacroAssembler::evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 3762 int comparison, bool is_signed, int vector_len, Register rscratch) { 3763 assert(rscratch != noreg || always_reachable(src), "missing"); 3764 3765 if (reachable(src)) { 3766 Assembler::evpcmpw(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 3767 } else { 3768 lea(rscratch, src); 3769 Assembler::evpcmpw(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 3770 } 3771 } 3772 3773 void MacroAssembler::vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len) { 3774 if (width == Assembler::Q) { 3775 Assembler::vpcmpCCq(dst, nds, src, cond_encoding, vector_len); 3776 } else { 3777 Assembler::vpcmpCCbwd(dst, nds, src, cond_encoding, vector_len); 3778 } 3779 } 3780 3781 void MacroAssembler::vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister xtmp, ComparisonPredicate cond, Width width, int vector_len) { 3782 int eq_cond_enc = 0x29; 3783 int gt_cond_enc = 0x37; 3784 if (width != Assembler::Q) { 3785 eq_cond_enc = 0x74 + width; 3786 gt_cond_enc = 0x64 + width; 3787 } 3788 switch (cond) { 3789 case eq: 3790 vpcmpCC(dst, nds, src, eq_cond_enc, width, vector_len); 3791 break; 3792 case neq: 3793 vpcmpCC(dst, nds, src, eq_cond_enc, width, vector_len); 3794 vallones(xtmp, vector_len); 3795 vpxor(dst, xtmp, dst, vector_len); 3796 break; 3797 case le: 3798 vpcmpCC(dst, nds, src, gt_cond_enc, width, vector_len); 3799 vallones(xtmp, vector_len); 3800 vpxor(dst, xtmp, dst, vector_len); 3801 break; 3802 case nlt: 3803 vpcmpCC(dst, src, nds, gt_cond_enc, width, vector_len); 3804 vallones(xtmp, vector_len); 3805 vpxor(dst, xtmp, dst, vector_len); 3806 break; 3807 case lt: 3808 vpcmpCC(dst, src, nds, gt_cond_enc, width, vector_len); 3809 break; 3810 case nle: 3811 vpcmpCC(dst, nds, src, gt_cond_enc, width, vector_len); 3812 break; 3813 default: 3814 assert(false, "Should not reach here"); 3815 } 3816 } 3817 3818 void MacroAssembler::vpmovzxbw(XMMRegister dst, Address src, int vector_len) { 3819 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3820 Assembler::vpmovzxbw(dst, src, vector_len); 3821 } 3822 3823 void MacroAssembler::vpmovmskb(Register dst, XMMRegister src, int vector_len) { 3824 assert((src->encoding() < 16),"XMM register should be 0-15"); 3825 Assembler::vpmovmskb(dst, src, vector_len); 3826 } 3827 3828 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3829 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3830 Assembler::vpmullw(dst, nds, src, vector_len); 3831 } 3832 3833 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3834 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3835 Assembler::vpmullw(dst, nds, src, vector_len); 3836 } 3837 3838 void MacroAssembler::vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3839 assert((UseAVX > 0), "AVX support is needed"); 3840 assert(rscratch != noreg || always_reachable(src), "missing"); 3841 3842 if (reachable(src)) { 3843 Assembler::vpmulld(dst, nds, as_Address(src), vector_len); 3844 } else { 3845 lea(rscratch, src); 3846 Assembler::vpmulld(dst, nds, Address(rscratch, 0), vector_len); 3847 } 3848 } 3849 3850 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3851 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3852 Assembler::vpsubb(dst, nds, src, vector_len); 3853 } 3854 3855 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3856 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3857 Assembler::vpsubb(dst, nds, src, vector_len); 3858 } 3859 3860 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3861 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3862 Assembler::vpsubw(dst, nds, src, vector_len); 3863 } 3864 3865 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3866 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3867 Assembler::vpsubw(dst, nds, src, vector_len); 3868 } 3869 3870 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3871 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3872 Assembler::vpsraw(dst, nds, shift, vector_len); 3873 } 3874 3875 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3876 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3877 Assembler::vpsraw(dst, nds, shift, vector_len); 3878 } 3879 3880 void MacroAssembler::evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3881 assert(UseAVX > 2,""); 3882 if (!VM_Version::supports_avx512vl() && vector_len < 2) { 3883 vector_len = 2; 3884 } 3885 Assembler::evpsraq(dst, nds, shift, vector_len); 3886 } 3887 3888 void MacroAssembler::evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3889 assert(UseAVX > 2,""); 3890 if (!VM_Version::supports_avx512vl() && vector_len < 2) { 3891 vector_len = 2; 3892 } 3893 Assembler::evpsraq(dst, nds, shift, vector_len); 3894 } 3895 3896 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3897 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3898 Assembler::vpsrlw(dst, nds, shift, vector_len); 3899 } 3900 3901 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3902 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3903 Assembler::vpsrlw(dst, nds, shift, vector_len); 3904 } 3905 3906 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3907 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3908 Assembler::vpsllw(dst, nds, shift, vector_len); 3909 } 3910 3911 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3912 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3913 Assembler::vpsllw(dst, nds, shift, vector_len); 3914 } 3915 3916 void MacroAssembler::vptest(XMMRegister dst, XMMRegister src) { 3917 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15"); 3918 Assembler::vptest(dst, src); 3919 } 3920 3921 void MacroAssembler::punpcklbw(XMMRegister dst, XMMRegister src) { 3922 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3923 Assembler::punpcklbw(dst, src); 3924 } 3925 3926 void MacroAssembler::pshufd(XMMRegister dst, Address src, int mode) { 3927 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 3928 Assembler::pshufd(dst, src, mode); 3929 } 3930 3931 void MacroAssembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) { 3932 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3933 Assembler::pshuflw(dst, src, mode); 3934 } 3935 3936 void MacroAssembler::vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3937 assert(rscratch != noreg || always_reachable(src), "missing"); 3938 3939 if (reachable(src)) { 3940 vandpd(dst, nds, as_Address(src), vector_len); 3941 } else { 3942 lea(rscratch, src); 3943 vandpd(dst, nds, Address(rscratch, 0), vector_len); 3944 } 3945 } 3946 3947 void MacroAssembler::vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3948 assert(rscratch != noreg || always_reachable(src), "missing"); 3949 3950 if (reachable(src)) { 3951 vandps(dst, nds, as_Address(src), vector_len); 3952 } else { 3953 lea(rscratch, src); 3954 vandps(dst, nds, Address(rscratch, 0), vector_len); 3955 } 3956 } 3957 3958 void MacroAssembler::evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, 3959 bool merge, int vector_len, Register rscratch) { 3960 assert(rscratch != noreg || always_reachable(src), "missing"); 3961 3962 if (reachable(src)) { 3963 Assembler::evpord(dst, mask, nds, as_Address(src), merge, vector_len); 3964 } else { 3965 lea(rscratch, src); 3966 Assembler::evpord(dst, mask, nds, Address(rscratch, 0), merge, vector_len); 3967 } 3968 } 3969 3970 void MacroAssembler::vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3971 assert(rscratch != noreg || always_reachable(src), "missing"); 3972 3973 if (reachable(src)) { 3974 vdivsd(dst, nds, as_Address(src)); 3975 } else { 3976 lea(rscratch, src); 3977 vdivsd(dst, nds, Address(rscratch, 0)); 3978 } 3979 } 3980 3981 void MacroAssembler::vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3982 assert(rscratch != noreg || always_reachable(src), "missing"); 3983 3984 if (reachable(src)) { 3985 vdivss(dst, nds, as_Address(src)); 3986 } else { 3987 lea(rscratch, src); 3988 vdivss(dst, nds, Address(rscratch, 0)); 3989 } 3990 } 3991 3992 void MacroAssembler::vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3993 assert(rscratch != noreg || always_reachable(src), "missing"); 3994 3995 if (reachable(src)) { 3996 vmulsd(dst, nds, as_Address(src)); 3997 } else { 3998 lea(rscratch, src); 3999 vmulsd(dst, nds, Address(rscratch, 0)); 4000 } 4001 } 4002 4003 void MacroAssembler::vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 4004 assert(rscratch != noreg || always_reachable(src), "missing"); 4005 4006 if (reachable(src)) { 4007 vmulss(dst, nds, as_Address(src)); 4008 } else { 4009 lea(rscratch, src); 4010 vmulss(dst, nds, Address(rscratch, 0)); 4011 } 4012 } 4013 4014 void MacroAssembler::vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 4015 assert(rscratch != noreg || always_reachable(src), "missing"); 4016 4017 if (reachable(src)) { 4018 vsubsd(dst, nds, as_Address(src)); 4019 } else { 4020 lea(rscratch, src); 4021 vsubsd(dst, nds, Address(rscratch, 0)); 4022 } 4023 } 4024 4025 void MacroAssembler::vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 4026 assert(rscratch != noreg || always_reachable(src), "missing"); 4027 4028 if (reachable(src)) { 4029 vsubss(dst, nds, as_Address(src)); 4030 } else { 4031 lea(rscratch, src); 4032 vsubss(dst, nds, Address(rscratch, 0)); 4033 } 4034 } 4035 4036 void MacroAssembler::vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 4037 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 4038 assert(rscratch != noreg || always_reachable(src), "missing"); 4039 4040 vxorps(dst, nds, src, Assembler::AVX_128bit, rscratch); 4041 } 4042 4043 void MacroAssembler::vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 4044 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 4045 assert(rscratch != noreg || always_reachable(src), "missing"); 4046 4047 vxorpd(dst, nds, src, Assembler::AVX_128bit, rscratch); 4048 } 4049 4050 void MacroAssembler::vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 4051 assert(rscratch != noreg || always_reachable(src), "missing"); 4052 4053 if (reachable(src)) { 4054 vxorpd(dst, nds, as_Address(src), vector_len); 4055 } else { 4056 lea(rscratch, src); 4057 vxorpd(dst, nds, Address(rscratch, 0), vector_len); 4058 } 4059 } 4060 4061 void MacroAssembler::vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 4062 assert(rscratch != noreg || always_reachable(src), "missing"); 4063 4064 if (reachable(src)) { 4065 vxorps(dst, nds, as_Address(src), vector_len); 4066 } else { 4067 lea(rscratch, src); 4068 vxorps(dst, nds, Address(rscratch, 0), vector_len); 4069 } 4070 } 4071 4072 void MacroAssembler::vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 4073 assert(rscratch != noreg || always_reachable(src), "missing"); 4074 4075 if (UseAVX > 1 || (vector_len < 1)) { 4076 if (reachable(src)) { 4077 Assembler::vpxor(dst, nds, as_Address(src), vector_len); 4078 } else { 4079 lea(rscratch, src); 4080 Assembler::vpxor(dst, nds, Address(rscratch, 0), vector_len); 4081 } 4082 } else { 4083 MacroAssembler::vxorpd(dst, nds, src, vector_len, rscratch); 4084 } 4085 } 4086 4087 void MacroAssembler::vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 4088 assert(rscratch != noreg || always_reachable(src), "missing"); 4089 4090 if (reachable(src)) { 4091 Assembler::vpermd(dst, nds, as_Address(src), vector_len); 4092 } else { 4093 lea(rscratch, src); 4094 Assembler::vpermd(dst, nds, Address(rscratch, 0), vector_len); 4095 } 4096 } 4097 4098 void MacroAssembler::clear_jobject_tag(Register possibly_non_local) { 4099 const int32_t inverted_mask = ~static_cast<int32_t>(JNIHandles::tag_mask); 4100 STATIC_ASSERT(inverted_mask == -4); // otherwise check this code 4101 // The inverted mask is sign-extended 4102 andptr(possibly_non_local, inverted_mask); 4103 } 4104 4105 void MacroAssembler::resolve_jobject(Register value, 4106 Register thread, 4107 Register tmp) { 4108 assert_different_registers(value, thread, tmp); 4109 Label done, tagged, weak_tagged; 4110 testptr(value, value); 4111 jcc(Assembler::zero, done); // Use null as-is. 4112 testptr(value, JNIHandles::tag_mask); // Test for tag. 4113 jcc(Assembler::notZero, tagged); 4114 4115 // Resolve local handle 4116 access_load_at(T_OBJECT, IN_NATIVE | AS_RAW, value, Address(value, 0), tmp, thread); 4117 verify_oop(value); 4118 jmp(done); 4119 4120 bind(tagged); 4121 testptr(value, JNIHandles::TypeTag::weak_global); // Test for weak tag. 4122 jcc(Assembler::notZero, weak_tagged); 4123 4124 // Resolve global handle 4125 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp, thread); 4126 verify_oop(value); 4127 jmp(done); 4128 4129 bind(weak_tagged); 4130 // Resolve jweak. 4131 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 4132 value, Address(value, -JNIHandles::TypeTag::weak_global), tmp, thread); 4133 verify_oop(value); 4134 4135 bind(done); 4136 } 4137 4138 void MacroAssembler::resolve_global_jobject(Register value, 4139 Register thread, 4140 Register tmp) { 4141 assert_different_registers(value, thread, tmp); 4142 Label done; 4143 4144 testptr(value, value); 4145 jcc(Assembler::zero, done); // Use null as-is. 4146 4147 #ifdef ASSERT 4148 { 4149 Label valid_global_tag; 4150 testptr(value, JNIHandles::TypeTag::global); // Test for global tag. 4151 jcc(Assembler::notZero, valid_global_tag); 4152 stop("non global jobject using resolve_global_jobject"); 4153 bind(valid_global_tag); 4154 } 4155 #endif 4156 4157 // Resolve global handle 4158 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp, thread); 4159 verify_oop(value); 4160 4161 bind(done); 4162 } 4163 4164 void MacroAssembler::subptr(Register dst, int32_t imm32) { 4165 LP64_ONLY(subq(dst, imm32)) NOT_LP64(subl(dst, imm32)); 4166 } 4167 4168 // Force generation of a 4 byte immediate value even if it fits into 8bit 4169 void MacroAssembler::subptr_imm32(Register dst, int32_t imm32) { 4170 LP64_ONLY(subq_imm32(dst, imm32)) NOT_LP64(subl_imm32(dst, imm32)); 4171 } 4172 4173 void MacroAssembler::subptr(Register dst, Register src) { 4174 LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); 4175 } 4176 4177 // C++ bool manipulation 4178 void MacroAssembler::testbool(Register dst) { 4179 if(sizeof(bool) == 1) 4180 testb(dst, 0xff); 4181 else if(sizeof(bool) == 2) { 4182 // testw implementation needed for two byte bools 4183 ShouldNotReachHere(); 4184 } else if(sizeof(bool) == 4) 4185 testl(dst, dst); 4186 else 4187 // unsupported 4188 ShouldNotReachHere(); 4189 } 4190 4191 void MacroAssembler::testptr(Register dst, Register src) { 4192 LP64_ONLY(testq(dst, src)) NOT_LP64(testl(dst, src)); 4193 } 4194 4195 // Object / value buffer allocation... 4196 // 4197 // Kills klass and rsi on LP64 4198 void MacroAssembler::allocate_instance(Register klass, Register new_obj, 4199 Register t1, Register t2, 4200 bool clear_fields, Label& alloc_failed) 4201 { 4202 Label done, initialize_header, initialize_object, slow_case, slow_case_no_pop; 4203 Register layout_size = t1; 4204 assert(new_obj == rax, "needs to be rax"); 4205 assert_different_registers(klass, new_obj, t1, t2); 4206 4207 // get instance_size in InstanceKlass (scaled to a count of bytes) 4208 movl(layout_size, Address(klass, Klass::layout_helper_offset())); 4209 // test to see if it has a finalizer or is malformed in some way 4210 testl(layout_size, Klass::_lh_instance_slow_path_bit); 4211 jcc(Assembler::notZero, slow_case_no_pop); 4212 4213 // Allocate the instance: 4214 // If TLAB is enabled: 4215 // Try to allocate in the TLAB. 4216 // If fails, go to the slow path. 4217 // Else If inline contiguous allocations are enabled: 4218 // Try to allocate in eden. 4219 // If fails due to heap end, go to slow path. 4220 // 4221 // If TLAB is enabled OR inline contiguous is enabled: 4222 // Initialize the allocation. 4223 // Exit. 4224 // 4225 // Go to slow path. 4226 4227 push(klass); 4228 const Register thread = LP64_ONLY(r15_thread) NOT_LP64(klass); 4229 #ifndef _LP64 4230 if (UseTLAB) { 4231 get_thread(thread); 4232 } 4233 #endif // _LP64 4234 4235 if (UseTLAB) { 4236 tlab_allocate(thread, new_obj, layout_size, 0, klass, t2, slow_case); 4237 if (ZeroTLAB || (!clear_fields)) { 4238 // the fields have been already cleared 4239 jmp(initialize_header); 4240 } else { 4241 // initialize both the header and fields 4242 jmp(initialize_object); 4243 } 4244 } else { 4245 jmp(slow_case); 4246 } 4247 4248 // If UseTLAB is true, the object is created above and there is an initialize need. 4249 // Otherwise, skip and go to the slow path. 4250 if (UseTLAB) { 4251 if (clear_fields) { 4252 // The object is initialized before the header. If the object size is 4253 // zero, go directly to the header initialization. 4254 bind(initialize_object); 4255 decrement(layout_size, sizeof(oopDesc)); 4256 jcc(Assembler::zero, initialize_header); 4257 4258 // Initialize topmost object field, divide size by 8, check if odd and 4259 // test if zero. 4260 Register zero = klass; 4261 xorl(zero, zero); // use zero reg to clear memory (shorter code) 4262 shrl(layout_size, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd 4263 4264 #ifdef ASSERT 4265 // make sure instance_size was multiple of 8 4266 Label L; 4267 // Ignore partial flag stall after shrl() since it is debug VM 4268 jcc(Assembler::carryClear, L); 4269 stop("object size is not multiple of 2 - adjust this code"); 4270 bind(L); 4271 // must be > 0, no extra check needed here 4272 #endif 4273 4274 // initialize remaining object fields: instance_size was a multiple of 8 4275 { 4276 Label loop; 4277 bind(loop); 4278 movptr(Address(new_obj, layout_size, Address::times_8, sizeof(oopDesc) - 1*oopSize), zero); 4279 NOT_LP64(movptr(Address(new_obj, layout_size, Address::times_8, sizeof(oopDesc) - 2*oopSize), zero)); 4280 decrement(layout_size); 4281 jcc(Assembler::notZero, loop); 4282 } 4283 } // clear_fields 4284 4285 // initialize object header only. 4286 bind(initialize_header); 4287 pop(klass); 4288 Register mark_word = t2; 4289 movptr(mark_word, Address(klass, Klass::prototype_header_offset())); 4290 movptr(Address(new_obj, oopDesc::mark_offset_in_bytes ()), mark_word); 4291 #ifdef _LP64 4292 xorl(rsi, rsi); // use zero reg to clear memory (shorter code) 4293 store_klass_gap(new_obj, rsi); // zero klass gap for compressed oops 4294 #endif 4295 movptr(t2, klass); // preserve klass 4296 store_klass(new_obj, t2, rscratch1); // src klass reg is potentially compressed 4297 4298 jmp(done); 4299 } 4300 4301 bind(slow_case); 4302 pop(klass); 4303 bind(slow_case_no_pop); 4304 jmp(alloc_failed); 4305 4306 bind(done); 4307 } 4308 4309 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. 4310 void MacroAssembler::tlab_allocate(Register thread, Register obj, 4311 Register var_size_in_bytes, 4312 int con_size_in_bytes, 4313 Register t1, 4314 Register t2, 4315 Label& slow_case) { 4316 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 4317 bs->tlab_allocate(this, thread, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case); 4318 } 4319 4320 RegSet MacroAssembler::call_clobbered_gp_registers() { 4321 RegSet regs; 4322 #ifdef _LP64 4323 regs += RegSet::of(rax, rcx, rdx); 4324 #ifndef WINDOWS 4325 regs += RegSet::of(rsi, rdi); 4326 #endif 4327 regs += RegSet::range(r8, r11); 4328 #else 4329 regs += RegSet::of(rax, rcx, rdx); 4330 #endif 4331 return regs; 4332 } 4333 4334 XMMRegSet MacroAssembler::call_clobbered_xmm_registers() { 4335 int num_xmm_registers = XMMRegister::available_xmm_registers(); 4336 #if defined(WINDOWS) && defined(_LP64) 4337 XMMRegSet result = XMMRegSet::range(xmm0, xmm5); 4338 if (num_xmm_registers > 16) { 4339 result += XMMRegSet::range(xmm16, as_XMMRegister(num_xmm_registers - 1)); 4340 } 4341 return result; 4342 #else 4343 return XMMRegSet::range(xmm0, as_XMMRegister(num_xmm_registers - 1)); 4344 #endif 4345 } 4346 4347 static int FPUSaveAreaSize = align_up(108, StackAlignmentInBytes); // 108 bytes needed for FPU state by fsave/frstor 4348 4349 #ifndef _LP64 4350 static bool use_x87_registers() { return UseSSE < 2; } 4351 #endif 4352 static bool use_xmm_registers() { return UseSSE >= 1; } 4353 4354 // C1 only ever uses the first double/float of the XMM register. 4355 static int xmm_save_size() { return UseSSE >= 2 ? sizeof(double) : sizeof(float); } 4356 4357 static void save_xmm_register(MacroAssembler* masm, int offset, XMMRegister reg) { 4358 if (UseSSE == 1) { 4359 masm->movflt(Address(rsp, offset), reg); 4360 } else { 4361 masm->movdbl(Address(rsp, offset), reg); 4362 } 4363 } 4364 4365 static void restore_xmm_register(MacroAssembler* masm, int offset, XMMRegister reg) { 4366 if (UseSSE == 1) { 4367 masm->movflt(reg, Address(rsp, offset)); 4368 } else { 4369 masm->movdbl(reg, Address(rsp, offset)); 4370 } 4371 } 4372 4373 static int register_section_sizes(RegSet gp_registers, XMMRegSet xmm_registers, 4374 bool save_fpu, int& gp_area_size, 4375 int& fp_area_size, int& xmm_area_size) { 4376 4377 gp_area_size = align_up(gp_registers.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size, 4378 StackAlignmentInBytes); 4379 #ifdef _LP64 4380 fp_area_size = 0; 4381 #else 4382 fp_area_size = (save_fpu && use_x87_registers()) ? FPUSaveAreaSize : 0; 4383 #endif 4384 xmm_area_size = (save_fpu && use_xmm_registers()) ? xmm_registers.size() * xmm_save_size() : 0; 4385 4386 return gp_area_size + fp_area_size + xmm_area_size; 4387 } 4388 4389 void MacroAssembler::push_call_clobbered_registers_except(RegSet exclude, bool save_fpu) { 4390 block_comment("push_call_clobbered_registers start"); 4391 // Regular registers 4392 RegSet gp_registers_to_push = call_clobbered_gp_registers() - exclude; 4393 4394 int gp_area_size; 4395 int fp_area_size; 4396 int xmm_area_size; 4397 int total_save_size = register_section_sizes(gp_registers_to_push, call_clobbered_xmm_registers(), save_fpu, 4398 gp_area_size, fp_area_size, xmm_area_size); 4399 subptr(rsp, total_save_size); 4400 4401 push_set(gp_registers_to_push, 0); 4402 4403 #ifndef _LP64 4404 if (save_fpu && use_x87_registers()) { 4405 fnsave(Address(rsp, gp_area_size)); 4406 fwait(); 4407 } 4408 #endif 4409 if (save_fpu && use_xmm_registers()) { 4410 push_set(call_clobbered_xmm_registers(), gp_area_size + fp_area_size); 4411 } 4412 4413 block_comment("push_call_clobbered_registers end"); 4414 } 4415 4416 void MacroAssembler::pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu) { 4417 block_comment("pop_call_clobbered_registers start"); 4418 4419 RegSet gp_registers_to_pop = call_clobbered_gp_registers() - exclude; 4420 4421 int gp_area_size; 4422 int fp_area_size; 4423 int xmm_area_size; 4424 int total_save_size = register_section_sizes(gp_registers_to_pop, call_clobbered_xmm_registers(), restore_fpu, 4425 gp_area_size, fp_area_size, xmm_area_size); 4426 4427 if (restore_fpu && use_xmm_registers()) { 4428 pop_set(call_clobbered_xmm_registers(), gp_area_size + fp_area_size); 4429 } 4430 #ifndef _LP64 4431 if (restore_fpu && use_x87_registers()) { 4432 frstor(Address(rsp, gp_area_size)); 4433 } 4434 #endif 4435 4436 pop_set(gp_registers_to_pop, 0); 4437 4438 addptr(rsp, total_save_size); 4439 4440 vzeroupper(); 4441 4442 block_comment("pop_call_clobbered_registers end"); 4443 } 4444 4445 void MacroAssembler::push_set(XMMRegSet set, int offset) { 4446 assert(is_aligned(set.size() * xmm_save_size(), StackAlignmentInBytes), "must be"); 4447 int spill_offset = offset; 4448 4449 for (RegSetIterator<XMMRegister> it = set.begin(); *it != xnoreg; ++it) { 4450 save_xmm_register(this, spill_offset, *it); 4451 spill_offset += xmm_save_size(); 4452 } 4453 } 4454 4455 void MacroAssembler::pop_set(XMMRegSet set, int offset) { 4456 int restore_size = set.size() * xmm_save_size(); 4457 assert(is_aligned(restore_size, StackAlignmentInBytes), "must be"); 4458 4459 int restore_offset = offset + restore_size - xmm_save_size(); 4460 4461 for (ReverseRegSetIterator<XMMRegister> it = set.rbegin(); *it != xnoreg; ++it) { 4462 restore_xmm_register(this, restore_offset, *it); 4463 restore_offset -= xmm_save_size(); 4464 } 4465 } 4466 4467 void MacroAssembler::push_set(RegSet set, int offset) { 4468 int spill_offset; 4469 if (offset == -1) { 4470 int register_push_size = set.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size; 4471 int aligned_size = align_up(register_push_size, StackAlignmentInBytes); 4472 subptr(rsp, aligned_size); 4473 spill_offset = 0; 4474 } else { 4475 spill_offset = offset; 4476 } 4477 4478 for (RegSetIterator<Register> it = set.begin(); *it != noreg; ++it) { 4479 movptr(Address(rsp, spill_offset), *it); 4480 spill_offset += Register::max_slots_per_register * VMRegImpl::stack_slot_size; 4481 } 4482 } 4483 4484 void MacroAssembler::pop_set(RegSet set, int offset) { 4485 4486 int gp_reg_size = Register::max_slots_per_register * VMRegImpl::stack_slot_size; 4487 int restore_size = set.size() * gp_reg_size; 4488 int aligned_size = align_up(restore_size, StackAlignmentInBytes); 4489 4490 int restore_offset; 4491 if (offset == -1) { 4492 restore_offset = restore_size - gp_reg_size; 4493 } else { 4494 restore_offset = offset + restore_size - gp_reg_size; 4495 } 4496 for (ReverseRegSetIterator<Register> it = set.rbegin(); *it != noreg; ++it) { 4497 movptr(*it, Address(rsp, restore_offset)); 4498 restore_offset -= gp_reg_size; 4499 } 4500 4501 if (offset == -1) { 4502 addptr(rsp, aligned_size); 4503 } 4504 } 4505 4506 // Preserves the contents of address, destroys the contents length_in_bytes and temp. 4507 void MacroAssembler::zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp) { 4508 assert(address != length_in_bytes && address != temp && temp != length_in_bytes, "registers must be different"); 4509 assert((offset_in_bytes & (BytesPerWord - 1)) == 0, "offset must be a multiple of BytesPerWord"); 4510 Label done; 4511 4512 testptr(length_in_bytes, length_in_bytes); 4513 jcc(Assembler::zero, done); 4514 4515 // initialize topmost word, divide index by 2, check if odd and test if zero 4516 // note: for the remaining code to work, index must be a multiple of BytesPerWord 4517 #ifdef ASSERT 4518 { 4519 Label L; 4520 testptr(length_in_bytes, BytesPerWord - 1); 4521 jcc(Assembler::zero, L); 4522 stop("length must be a multiple of BytesPerWord"); 4523 bind(L); 4524 } 4525 #endif 4526 Register index = length_in_bytes; 4527 xorptr(temp, temp); // use _zero reg to clear memory (shorter code) 4528 if (UseIncDec) { 4529 shrptr(index, 3); // divide by 8/16 and set carry flag if bit 2 was set 4530 } else { 4531 shrptr(index, 2); // use 2 instructions to avoid partial flag stall 4532 shrptr(index, 1); 4533 } 4534 #ifndef _LP64 4535 // index could have not been a multiple of 8 (i.e., bit 2 was set) 4536 { 4537 Label even; 4538 // note: if index was a multiple of 8, then it cannot 4539 // be 0 now otherwise it must have been 0 before 4540 // => if it is even, we don't need to check for 0 again 4541 jcc(Assembler::carryClear, even); 4542 // clear topmost word (no jump would be needed if conditional assignment worked here) 4543 movptr(Address(address, index, Address::times_8, offset_in_bytes - 0*BytesPerWord), temp); 4544 // index could be 0 now, must check again 4545 jcc(Assembler::zero, done); 4546 bind(even); 4547 } 4548 #endif // !_LP64 4549 // initialize remaining object fields: index is a multiple of 2 now 4550 { 4551 Label loop; 4552 bind(loop); 4553 movptr(Address(address, index, Address::times_8, offset_in_bytes - 1*BytesPerWord), temp); 4554 NOT_LP64(movptr(Address(address, index, Address::times_8, offset_in_bytes - 2*BytesPerWord), temp);) 4555 decrement(index); 4556 jcc(Assembler::notZero, loop); 4557 } 4558 4559 bind(done); 4560 } 4561 4562 void MacroAssembler::get_inline_type_field_klass(Register klass, Register index, Register inline_klass) { 4563 movptr(inline_klass, Address(klass, InstanceKlass::inline_type_field_klasses_offset())); 4564 #ifdef ASSERT 4565 { 4566 Label done; 4567 cmpptr(inline_klass, 0); 4568 jcc(Assembler::notEqual, done); 4569 stop("get_inline_type_field_klass contains no inline klass"); 4570 bind(done); 4571 } 4572 #endif 4573 movptr(inline_klass, Address(inline_klass, index, Address::times_ptr, Array<InlineKlass*>::base_offset_in_bytes())); 4574 } 4575 4576 void MacroAssembler::get_default_value_oop(Register inline_klass, Register temp_reg, Register obj) { 4577 #ifdef ASSERT 4578 { 4579 Label done_check; 4580 test_klass_is_inline_type(inline_klass, temp_reg, done_check); 4581 stop("get_default_value_oop from non inline type klass"); 4582 bind(done_check); 4583 } 4584 #endif 4585 Register offset = temp_reg; 4586 // Getting the offset of the pre-allocated default value 4587 movptr(offset, Address(inline_klass, in_bytes(InstanceKlass::adr_inlineklass_fixed_block_offset()))); 4588 movl(offset, Address(offset, in_bytes(InlineKlass::default_value_offset_offset()))); 4589 4590 // Getting the mirror 4591 movptr(obj, Address(inline_klass, in_bytes(Klass::java_mirror_offset()))); 4592 resolve_oop_handle(obj, inline_klass); 4593 4594 // Getting the pre-allocated default value from the mirror 4595 Address field(obj, offset, Address::times_1); 4596 load_heap_oop(obj, field); 4597 } 4598 4599 void MacroAssembler::get_empty_inline_type_oop(Register inline_klass, Register temp_reg, Register obj) { 4600 #ifdef ASSERT 4601 { 4602 Label done_check; 4603 test_klass_is_empty_inline_type(inline_klass, temp_reg, done_check); 4604 stop("get_empty_value from non-empty inline klass"); 4605 bind(done_check); 4606 } 4607 #endif 4608 get_default_value_oop(inline_klass, temp_reg, obj); 4609 } 4610 4611 4612 // Look up the method for a megamorphic invokeinterface call. 4613 // The target method is determined by <intf_klass, itable_index>. 4614 // The receiver klass is in recv_klass. 4615 // On success, the result will be in method_result, and execution falls through. 4616 // On failure, execution transfers to the given label. 4617 void MacroAssembler::lookup_interface_method(Register recv_klass, 4618 Register intf_klass, 4619 RegisterOrConstant itable_index, 4620 Register method_result, 4621 Register scan_temp, 4622 Label& L_no_such_interface, 4623 bool return_method) { 4624 assert_different_registers(recv_klass, intf_klass, scan_temp); 4625 assert_different_registers(method_result, intf_klass, scan_temp); 4626 assert(recv_klass != method_result || !return_method, 4627 "recv_klass can be destroyed when method isn't needed"); 4628 4629 assert(itable_index.is_constant() || itable_index.as_register() == method_result, 4630 "caller must use same register for non-constant itable index as for method"); 4631 4632 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 4633 int vtable_base = in_bytes(Klass::vtable_start_offset()); 4634 int itentry_off = in_bytes(itableMethodEntry::method_offset()); 4635 int scan_step = itableOffsetEntry::size() * wordSize; 4636 int vte_size = vtableEntry::size_in_bytes(); 4637 Address::ScaleFactor times_vte_scale = Address::times_ptr; 4638 assert(vte_size == wordSize, "else adjust times_vte_scale"); 4639 4640 movl(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 4641 4642 // %%% Could store the aligned, prescaled offset in the klassoop. 4643 lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base)); 4644 4645 if (return_method) { 4646 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 4647 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 4648 lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off)); 4649 } 4650 4651 // for (scan = klass->itable(); scan->interface() != nullptr; scan += scan_step) { 4652 // if (scan->interface() == intf) { 4653 // result = (klass + scan->offset() + itable_index); 4654 // } 4655 // } 4656 Label search, found_method; 4657 4658 for (int peel = 1; peel >= 0; peel--) { 4659 movptr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset())); 4660 cmpptr(intf_klass, method_result); 4661 4662 if (peel) { 4663 jccb(Assembler::equal, found_method); 4664 } else { 4665 jccb(Assembler::notEqual, search); 4666 // (invert the test to fall through to found_method...) 4667 } 4668 4669 if (!peel) break; 4670 4671 bind(search); 4672 4673 // Check that the previous entry is non-null. A null entry means that 4674 // the receiver class doesn't implement the interface, and wasn't the 4675 // same as when the caller was compiled. 4676 testptr(method_result, method_result); 4677 jcc(Assembler::zero, L_no_such_interface); 4678 addptr(scan_temp, scan_step); 4679 } 4680 4681 bind(found_method); 4682 4683 if (return_method) { 4684 // Got a hit. 4685 movl(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset())); 4686 movptr(method_result, Address(recv_klass, scan_temp, Address::times_1)); 4687 } 4688 } 4689 4690 // Look up the method for a megamorphic invokeinterface call in a single pass over itable: 4691 // - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICData 4692 // - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index 4693 // The target method is determined by <holder_klass, itable_index>. 4694 // The receiver klass is in recv_klass. 4695 // On success, the result will be in method_result, and execution falls through. 4696 // On failure, execution transfers to the given label. 4697 void MacroAssembler::lookup_interface_method_stub(Register recv_klass, 4698 Register holder_klass, 4699 Register resolved_klass, 4700 Register method_result, 4701 Register scan_temp, 4702 Register temp_reg2, 4703 Register receiver, 4704 int itable_index, 4705 Label& L_no_such_interface) { 4706 assert_different_registers(recv_klass, method_result, holder_klass, resolved_klass, scan_temp, temp_reg2, receiver); 4707 Register temp_itbl_klass = method_result; 4708 Register temp_reg = (temp_reg2 == noreg ? recv_klass : temp_reg2); // reuse recv_klass register on 32-bit x86 impl 4709 4710 int vtable_base = in_bytes(Klass::vtable_start_offset()); 4711 int itentry_off = in_bytes(itableMethodEntry::method_offset()); 4712 int scan_step = itableOffsetEntry::size() * wordSize; 4713 int vte_size = vtableEntry::size_in_bytes(); 4714 int ioffset = in_bytes(itableOffsetEntry::interface_offset()); 4715 int ooffset = in_bytes(itableOffsetEntry::offset_offset()); 4716 Address::ScaleFactor times_vte_scale = Address::times_ptr; 4717 assert(vte_size == wordSize, "adjust times_vte_scale"); 4718 4719 Label L_loop_scan_resolved_entry, L_resolved_found, L_holder_found; 4720 4721 // temp_itbl_klass = recv_klass.itable[0] 4722 // scan_temp = &recv_klass.itable[0] + step 4723 movl(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 4724 movptr(temp_itbl_klass, Address(recv_klass, scan_temp, times_vte_scale, vtable_base + ioffset)); 4725 lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base + ioffset + scan_step)); 4726 xorptr(temp_reg, temp_reg); 4727 4728 // Initial checks: 4729 // - if (holder_klass != resolved_klass), go to "scan for resolved" 4730 // - if (itable[0] == 0), no such interface 4731 // - if (itable[0] == holder_klass), shortcut to "holder found" 4732 cmpptr(holder_klass, resolved_klass); 4733 jccb(Assembler::notEqual, L_loop_scan_resolved_entry); 4734 testptr(temp_itbl_klass, temp_itbl_klass); 4735 jccb(Assembler::zero, L_no_such_interface); 4736 cmpptr(holder_klass, temp_itbl_klass); 4737 jccb(Assembler::equal, L_holder_found); 4738 4739 // Loop: Look for holder_klass record in itable 4740 // do { 4741 // tmp = itable[index]; 4742 // index += step; 4743 // if (tmp == holder_klass) { 4744 // goto L_holder_found; // Found! 4745 // } 4746 // } while (tmp != 0); 4747 // goto L_no_such_interface // Not found. 4748 Label L_scan_holder; 4749 bind(L_scan_holder); 4750 movptr(temp_itbl_klass, Address(scan_temp, 0)); 4751 addptr(scan_temp, scan_step); 4752 cmpptr(holder_klass, temp_itbl_klass); 4753 jccb(Assembler::equal, L_holder_found); 4754 testptr(temp_itbl_klass, temp_itbl_klass); 4755 jccb(Assembler::notZero, L_scan_holder); 4756 4757 jmpb(L_no_such_interface); 4758 4759 // Loop: Look for resolved_class record in itable 4760 // do { 4761 // tmp = itable[index]; 4762 // index += step; 4763 // if (tmp == holder_klass) { 4764 // // Also check if we have met a holder klass 4765 // holder_tmp = itable[index-step-ioffset]; 4766 // } 4767 // if (tmp == resolved_klass) { 4768 // goto L_resolved_found; // Found! 4769 // } 4770 // } while (tmp != 0); 4771 // goto L_no_such_interface // Not found. 4772 // 4773 Label L_loop_scan_resolved; 4774 bind(L_loop_scan_resolved); 4775 movptr(temp_itbl_klass, Address(scan_temp, 0)); 4776 addptr(scan_temp, scan_step); 4777 bind(L_loop_scan_resolved_entry); 4778 cmpptr(holder_klass, temp_itbl_klass); 4779 cmovl(Assembler::equal, temp_reg, Address(scan_temp, ooffset - ioffset - scan_step)); 4780 cmpptr(resolved_klass, temp_itbl_klass); 4781 jccb(Assembler::equal, L_resolved_found); 4782 testptr(temp_itbl_klass, temp_itbl_klass); 4783 jccb(Assembler::notZero, L_loop_scan_resolved); 4784 4785 jmpb(L_no_such_interface); 4786 4787 Label L_ready; 4788 4789 // See if we already have a holder klass. If not, go and scan for it. 4790 bind(L_resolved_found); 4791 testptr(temp_reg, temp_reg); 4792 jccb(Assembler::zero, L_scan_holder); 4793 jmpb(L_ready); 4794 4795 bind(L_holder_found); 4796 movl(temp_reg, Address(scan_temp, ooffset - ioffset - scan_step)); 4797 4798 // Finally, temp_reg contains holder_klass vtable offset 4799 bind(L_ready); 4800 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 4801 if (temp_reg2 == noreg) { // recv_klass register is clobbered for 32-bit x86 impl 4802 load_klass(scan_temp, receiver, noreg); 4803 movptr(method_result, Address(scan_temp, temp_reg, Address::times_1, itable_index * wordSize + itentry_off)); 4804 } else { 4805 movptr(method_result, Address(recv_klass, temp_reg, Address::times_1, itable_index * wordSize + itentry_off)); 4806 } 4807 } 4808 4809 4810 // virtual method calling 4811 void MacroAssembler::lookup_virtual_method(Register recv_klass, 4812 RegisterOrConstant vtable_index, 4813 Register method_result) { 4814 const ByteSize base = Klass::vtable_start_offset(); 4815 assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below"); 4816 Address vtable_entry_addr(recv_klass, 4817 vtable_index, Address::times_ptr, 4818 base + vtableEntry::method_offset()); 4819 movptr(method_result, vtable_entry_addr); 4820 } 4821 4822 4823 void MacroAssembler::check_klass_subtype(Register sub_klass, 4824 Register super_klass, 4825 Register temp_reg, 4826 Label& L_success) { 4827 Label L_failure; 4828 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, nullptr); 4829 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, nullptr); 4830 bind(L_failure); 4831 } 4832 4833 4834 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 4835 Register super_klass, 4836 Register temp_reg, 4837 Label* L_success, 4838 Label* L_failure, 4839 Label* L_slow_path, 4840 RegisterOrConstant super_check_offset) { 4841 assert_different_registers(sub_klass, super_klass, temp_reg); 4842 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 4843 if (super_check_offset.is_register()) { 4844 assert_different_registers(sub_klass, super_klass, 4845 super_check_offset.as_register()); 4846 } else if (must_load_sco) { 4847 assert(temp_reg != noreg, "supply either a temp or a register offset"); 4848 } 4849 4850 Label L_fallthrough; 4851 int label_nulls = 0; 4852 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 4853 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 4854 if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; } 4855 assert(label_nulls <= 1, "at most one null in the batch"); 4856 4857 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 4858 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 4859 Address super_check_offset_addr(super_klass, sco_offset); 4860 4861 // Hacked jcc, which "knows" that L_fallthrough, at least, is in 4862 // range of a jccb. If this routine grows larger, reconsider at 4863 // least some of these. 4864 #define local_jcc(assembler_cond, label) \ 4865 if (&(label) == &L_fallthrough) jccb(assembler_cond, label); \ 4866 else jcc( assembler_cond, label) /*omit semi*/ 4867 4868 // Hacked jmp, which may only be used just before L_fallthrough. 4869 #define final_jmp(label) \ 4870 if (&(label) == &L_fallthrough) { /*do nothing*/ } \ 4871 else jmp(label) /*omit semi*/ 4872 4873 // If the pointers are equal, we are done (e.g., String[] elements). 4874 // This self-check enables sharing of secondary supertype arrays among 4875 // non-primary types such as array-of-interface. Otherwise, each such 4876 // type would need its own customized SSA. 4877 // We move this check to the front of the fast path because many 4878 // type checks are in fact trivially successful in this manner, 4879 // so we get a nicely predicted branch right at the start of the check. 4880 cmpptr(sub_klass, super_klass); 4881 local_jcc(Assembler::equal, *L_success); 4882 4883 // Check the supertype display: 4884 if (must_load_sco) { 4885 // Positive movl does right thing on LP64. 4886 movl(temp_reg, super_check_offset_addr); 4887 super_check_offset = RegisterOrConstant(temp_reg); 4888 } 4889 Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0); 4890 cmpptr(super_klass, super_check_addr); // load displayed supertype 4891 4892 // This check has worked decisively for primary supers. 4893 // Secondary supers are sought in the super_cache ('super_cache_addr'). 4894 // (Secondary supers are interfaces and very deeply nested subtypes.) 4895 // This works in the same check above because of a tricky aliasing 4896 // between the super_cache and the primary super display elements. 4897 // (The 'super_check_addr' can address either, as the case requires.) 4898 // Note that the cache is updated below if it does not help us find 4899 // what we need immediately. 4900 // So if it was a primary super, we can just fail immediately. 4901 // Otherwise, it's the slow path for us (no success at this point). 4902 4903 if (super_check_offset.is_register()) { 4904 local_jcc(Assembler::equal, *L_success); 4905 cmpl(super_check_offset.as_register(), sc_offset); 4906 if (L_failure == &L_fallthrough) { 4907 local_jcc(Assembler::equal, *L_slow_path); 4908 } else { 4909 local_jcc(Assembler::notEqual, *L_failure); 4910 final_jmp(*L_slow_path); 4911 } 4912 } else if (super_check_offset.as_constant() == sc_offset) { 4913 // Need a slow path; fast failure is impossible. 4914 if (L_slow_path == &L_fallthrough) { 4915 local_jcc(Assembler::equal, *L_success); 4916 } else { 4917 local_jcc(Assembler::notEqual, *L_slow_path); 4918 final_jmp(*L_success); 4919 } 4920 } else { 4921 // No slow path; it's a fast decision. 4922 if (L_failure == &L_fallthrough) { 4923 local_jcc(Assembler::equal, *L_success); 4924 } else { 4925 local_jcc(Assembler::notEqual, *L_failure); 4926 final_jmp(*L_success); 4927 } 4928 } 4929 4930 bind(L_fallthrough); 4931 4932 #undef local_jcc 4933 #undef final_jmp 4934 } 4935 4936 4937 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 4938 Register super_klass, 4939 Register temp_reg, 4940 Register temp2_reg, 4941 Label* L_success, 4942 Label* L_failure, 4943 bool set_cond_codes) { 4944 assert_different_registers(sub_klass, super_klass, temp_reg); 4945 if (temp2_reg != noreg) 4946 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg); 4947 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg) 4948 4949 Label L_fallthrough; 4950 int label_nulls = 0; 4951 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 4952 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 4953 assert(label_nulls <= 1, "at most one null in the batch"); 4954 4955 // a couple of useful fields in sub_klass: 4956 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 4957 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 4958 Address secondary_supers_addr(sub_klass, ss_offset); 4959 Address super_cache_addr( sub_klass, sc_offset); 4960 4961 // Do a linear scan of the secondary super-klass chain. 4962 // This code is rarely used, so simplicity is a virtue here. 4963 // The repne_scan instruction uses fixed registers, which we must spill. 4964 // Don't worry too much about pre-existing connections with the input regs. 4965 4966 assert(sub_klass != rax, "killed reg"); // killed by mov(rax, super) 4967 assert(sub_klass != rcx, "killed reg"); // killed by lea(rcx, &pst_counter) 4968 4969 // Get super_klass value into rax (even if it was in rdi or rcx). 4970 bool pushed_rax = false, pushed_rcx = false, pushed_rdi = false; 4971 if (super_klass != rax) { 4972 if (!IS_A_TEMP(rax)) { push(rax); pushed_rax = true; } 4973 mov(rax, super_klass); 4974 } 4975 if (!IS_A_TEMP(rcx)) { push(rcx); pushed_rcx = true; } 4976 if (!IS_A_TEMP(rdi)) { push(rdi); pushed_rdi = true; } 4977 4978 #ifndef PRODUCT 4979 uint* pst_counter = &SharedRuntime::_partial_subtype_ctr; 4980 ExternalAddress pst_counter_addr((address) pst_counter); 4981 NOT_LP64( incrementl(pst_counter_addr) ); 4982 LP64_ONLY( lea(rcx, pst_counter_addr) ); 4983 LP64_ONLY( incrementl(Address(rcx, 0)) ); 4984 #endif //PRODUCT 4985 4986 // We will consult the secondary-super array. 4987 movptr(rdi, secondary_supers_addr); 4988 // Load the array length. (Positive movl does right thing on LP64.) 4989 movl(rcx, Address(rdi, Array<Klass*>::length_offset_in_bytes())); 4990 // Skip to start of data. 4991 addptr(rdi, Array<Klass*>::base_offset_in_bytes()); 4992 4993 // Scan RCX words at [RDI] for an occurrence of RAX. 4994 // Set NZ/Z based on last compare. 4995 // Z flag value will not be set by 'repne' if RCX == 0 since 'repne' does 4996 // not change flags (only scas instruction which is repeated sets flags). 4997 // Set Z = 0 (not equal) before 'repne' to indicate that class was not found. 4998 4999 testptr(rax,rax); // Set Z = 0 5000 repne_scan(); 5001 5002 // Unspill the temp. registers: 5003 if (pushed_rdi) pop(rdi); 5004 if (pushed_rcx) pop(rcx); 5005 if (pushed_rax) pop(rax); 5006 5007 if (set_cond_codes) { 5008 // Special hack for the AD files: rdi is guaranteed non-zero. 5009 assert(!pushed_rdi, "rdi must be left non-null"); 5010 // Also, the condition codes are properly set Z/NZ on succeed/failure. 5011 } 5012 5013 if (L_failure == &L_fallthrough) 5014 jccb(Assembler::notEqual, *L_failure); 5015 else jcc(Assembler::notEqual, *L_failure); 5016 5017 // Success. Cache the super we found and proceed in triumph. 5018 movptr(super_cache_addr, super_klass); 5019 5020 if (L_success != &L_fallthrough) { 5021 jmp(*L_success); 5022 } 5023 5024 #undef IS_A_TEMP 5025 5026 bind(L_fallthrough); 5027 } 5028 5029 void MacroAssembler::clinit_barrier(Register klass, Register thread, Label* L_fast_path, Label* L_slow_path) { 5030 assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required"); 5031 5032 Label L_fallthrough; 5033 if (L_fast_path == nullptr) { 5034 L_fast_path = &L_fallthrough; 5035 } else if (L_slow_path == nullptr) { 5036 L_slow_path = &L_fallthrough; 5037 } 5038 5039 // Fast path check: class is fully initialized 5040 cmpb(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized); 5041 jcc(Assembler::equal, *L_fast_path); 5042 5043 // Fast path check: current thread is initializer thread 5044 cmpptr(thread, Address(klass, InstanceKlass::init_thread_offset())); 5045 if (L_slow_path == &L_fallthrough) { 5046 jcc(Assembler::equal, *L_fast_path); 5047 bind(*L_slow_path); 5048 } else if (L_fast_path == &L_fallthrough) { 5049 jcc(Assembler::notEqual, *L_slow_path); 5050 bind(*L_fast_path); 5051 } else { 5052 Unimplemented(); 5053 } 5054 } 5055 5056 void MacroAssembler::cmov32(Condition cc, Register dst, Address src) { 5057 if (VM_Version::supports_cmov()) { 5058 cmovl(cc, dst, src); 5059 } else { 5060 Label L; 5061 jccb(negate_condition(cc), L); 5062 movl(dst, src); 5063 bind(L); 5064 } 5065 } 5066 5067 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) { 5068 if (VM_Version::supports_cmov()) { 5069 cmovl(cc, dst, src); 5070 } else { 5071 Label L; 5072 jccb(negate_condition(cc), L); 5073 movl(dst, src); 5074 bind(L); 5075 } 5076 } 5077 5078 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) { 5079 if (!VerifyOops || VerifyAdapterSharing) { 5080 // Below address of the code string confuses VerifyAdapterSharing 5081 // because it may differ between otherwise equivalent adapters. 5082 return; 5083 } 5084 5085 BLOCK_COMMENT("verify_oop {"); 5086 #ifdef _LP64 5087 push(rscratch1); 5088 #endif 5089 push(rax); // save rax 5090 push(reg); // pass register argument 5091 5092 // Pass register number to verify_oop_subroutine 5093 const char* b = nullptr; 5094 { 5095 ResourceMark rm; 5096 stringStream ss; 5097 ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line); 5098 b = code_string(ss.as_string()); 5099 } 5100 ExternalAddress buffer((address) b); 5101 pushptr(buffer.addr(), rscratch1); 5102 5103 // call indirectly to solve generation ordering problem 5104 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 5105 call(rax); 5106 // Caller pops the arguments (oop, message) and restores rax, r10 5107 BLOCK_COMMENT("} verify_oop"); 5108 } 5109 5110 void MacroAssembler::vallones(XMMRegister dst, int vector_len) { 5111 if (UseAVX > 2 && (vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl())) { 5112 // Only pcmpeq has dependency breaking treatment (i.e the execution can begin without 5113 // waiting for the previous result on dst), not vpcmpeqd, so just use vpternlog 5114 vpternlogd(dst, 0xFF, dst, dst, vector_len); 5115 } else if (VM_Version::supports_avx()) { 5116 vpcmpeqd(dst, dst, dst, vector_len); 5117 } else { 5118 assert(VM_Version::supports_sse2(), ""); 5119 pcmpeqd(dst, dst); 5120 } 5121 } 5122 5123 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 5124 int extra_slot_offset) { 5125 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 5126 int stackElementSize = Interpreter::stackElementSize; 5127 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0); 5128 #ifdef ASSERT 5129 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1); 5130 assert(offset1 - offset == stackElementSize, "correct arithmetic"); 5131 #endif 5132 Register scale_reg = noreg; 5133 Address::ScaleFactor scale_factor = Address::no_scale; 5134 if (arg_slot.is_constant()) { 5135 offset += arg_slot.as_constant() * stackElementSize; 5136 } else { 5137 scale_reg = arg_slot.as_register(); 5138 scale_factor = Address::times(stackElementSize); 5139 } 5140 offset += wordSize; // return PC is on stack 5141 return Address(rsp, scale_reg, scale_factor, offset); 5142 } 5143 5144 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) { 5145 if (!VerifyOops || VerifyAdapterSharing) { 5146 // Below address of the code string confuses VerifyAdapterSharing 5147 // because it may differ between otherwise equivalent adapters. 5148 return; 5149 } 5150 5151 #ifdef _LP64 5152 push(rscratch1); 5153 #endif 5154 push(rax); // save rax, 5155 // addr may contain rsp so we will have to adjust it based on the push 5156 // we just did (and on 64 bit we do two pushes) 5157 // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which 5158 // stores rax into addr which is backwards of what was intended. 5159 if (addr.uses(rsp)) { 5160 lea(rax, addr); 5161 pushptr(Address(rax, LP64_ONLY(2 *) BytesPerWord)); 5162 } else { 5163 pushptr(addr); 5164 } 5165 5166 // Pass register number to verify_oop_subroutine 5167 const char* b = nullptr; 5168 { 5169 ResourceMark rm; 5170 stringStream ss; 5171 ss.print("verify_oop_addr: %s (%s:%d)", s, file, line); 5172 b = code_string(ss.as_string()); 5173 } 5174 ExternalAddress buffer((address) b); 5175 pushptr(buffer.addr(), rscratch1); 5176 5177 // call indirectly to solve generation ordering problem 5178 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 5179 call(rax); 5180 // Caller pops the arguments (addr, message) and restores rax, r10. 5181 } 5182 5183 void MacroAssembler::verify_tlab() { 5184 #ifdef ASSERT 5185 if (UseTLAB && VerifyOops) { 5186 Label next, ok; 5187 Register t1 = rsi; 5188 Register thread_reg = NOT_LP64(rbx) LP64_ONLY(r15_thread); 5189 5190 push(t1); 5191 NOT_LP64(push(thread_reg)); 5192 NOT_LP64(get_thread(thread_reg)); 5193 5194 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset()))); 5195 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset()))); 5196 jcc(Assembler::aboveEqual, next); 5197 STOP("assert(top >= start)"); 5198 should_not_reach_here(); 5199 5200 bind(next); 5201 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset()))); 5202 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset()))); 5203 jcc(Assembler::aboveEqual, ok); 5204 STOP("assert(top <= end)"); 5205 should_not_reach_here(); 5206 5207 bind(ok); 5208 NOT_LP64(pop(thread_reg)); 5209 pop(t1); 5210 } 5211 #endif 5212 } 5213 5214 class ControlWord { 5215 public: 5216 int32_t _value; 5217 5218 int rounding_control() const { return (_value >> 10) & 3 ; } 5219 int precision_control() const { return (_value >> 8) & 3 ; } 5220 bool precision() const { return ((_value >> 5) & 1) != 0; } 5221 bool underflow() const { return ((_value >> 4) & 1) != 0; } 5222 bool overflow() const { return ((_value >> 3) & 1) != 0; } 5223 bool zero_divide() const { return ((_value >> 2) & 1) != 0; } 5224 bool denormalized() const { return ((_value >> 1) & 1) != 0; } 5225 bool invalid() const { return ((_value >> 0) & 1) != 0; } 5226 5227 void print() const { 5228 // rounding control 5229 const char* rc; 5230 switch (rounding_control()) { 5231 case 0: rc = "round near"; break; 5232 case 1: rc = "round down"; break; 5233 case 2: rc = "round up "; break; 5234 case 3: rc = "chop "; break; 5235 default: 5236 rc = nullptr; // silence compiler warnings 5237 fatal("Unknown rounding control: %d", rounding_control()); 5238 }; 5239 // precision control 5240 const char* pc; 5241 switch (precision_control()) { 5242 case 0: pc = "24 bits "; break; 5243 case 1: pc = "reserved"; break; 5244 case 2: pc = "53 bits "; break; 5245 case 3: pc = "64 bits "; break; 5246 default: 5247 pc = nullptr; // silence compiler warnings 5248 fatal("Unknown precision control: %d", precision_control()); 5249 }; 5250 // flags 5251 char f[9]; 5252 f[0] = ' '; 5253 f[1] = ' '; 5254 f[2] = (precision ()) ? 'P' : 'p'; 5255 f[3] = (underflow ()) ? 'U' : 'u'; 5256 f[4] = (overflow ()) ? 'O' : 'o'; 5257 f[5] = (zero_divide ()) ? 'Z' : 'z'; 5258 f[6] = (denormalized()) ? 'D' : 'd'; 5259 f[7] = (invalid ()) ? 'I' : 'i'; 5260 f[8] = '\x0'; 5261 // output 5262 printf("%04x masks = %s, %s, %s", _value & 0xFFFF, f, rc, pc); 5263 } 5264 5265 }; 5266 5267 class StatusWord { 5268 public: 5269 int32_t _value; 5270 5271 bool busy() const { return ((_value >> 15) & 1) != 0; } 5272 bool C3() const { return ((_value >> 14) & 1) != 0; } 5273 bool C2() const { return ((_value >> 10) & 1) != 0; } 5274 bool C1() const { return ((_value >> 9) & 1) != 0; } 5275 bool C0() const { return ((_value >> 8) & 1) != 0; } 5276 int top() const { return (_value >> 11) & 7 ; } 5277 bool error_status() const { return ((_value >> 7) & 1) != 0; } 5278 bool stack_fault() const { return ((_value >> 6) & 1) != 0; } 5279 bool precision() const { return ((_value >> 5) & 1) != 0; } 5280 bool underflow() const { return ((_value >> 4) & 1) != 0; } 5281 bool overflow() const { return ((_value >> 3) & 1) != 0; } 5282 bool zero_divide() const { return ((_value >> 2) & 1) != 0; } 5283 bool denormalized() const { return ((_value >> 1) & 1) != 0; } 5284 bool invalid() const { return ((_value >> 0) & 1) != 0; } 5285 5286 void print() const { 5287 // condition codes 5288 char c[5]; 5289 c[0] = (C3()) ? '3' : '-'; 5290 c[1] = (C2()) ? '2' : '-'; 5291 c[2] = (C1()) ? '1' : '-'; 5292 c[3] = (C0()) ? '0' : '-'; 5293 c[4] = '\x0'; 5294 // flags 5295 char f[9]; 5296 f[0] = (error_status()) ? 'E' : '-'; 5297 f[1] = (stack_fault ()) ? 'S' : '-'; 5298 f[2] = (precision ()) ? 'P' : '-'; 5299 f[3] = (underflow ()) ? 'U' : '-'; 5300 f[4] = (overflow ()) ? 'O' : '-'; 5301 f[5] = (zero_divide ()) ? 'Z' : '-'; 5302 f[6] = (denormalized()) ? 'D' : '-'; 5303 f[7] = (invalid ()) ? 'I' : '-'; 5304 f[8] = '\x0'; 5305 // output 5306 printf("%04x flags = %s, cc = %s, top = %d", _value & 0xFFFF, f, c, top()); 5307 } 5308 5309 }; 5310 5311 class TagWord { 5312 public: 5313 int32_t _value; 5314 5315 int tag_at(int i) const { return (_value >> (i*2)) & 3; } 5316 5317 void print() const { 5318 printf("%04x", _value & 0xFFFF); 5319 } 5320 5321 }; 5322 5323 class FPU_Register { 5324 public: 5325 int32_t _m0; 5326 int32_t _m1; 5327 int16_t _ex; 5328 5329 bool is_indefinite() const { 5330 return _ex == -1 && _m1 == (int32_t)0xC0000000 && _m0 == 0; 5331 } 5332 5333 void print() const { 5334 char sign = (_ex < 0) ? '-' : '+'; 5335 const char* kind = (_ex == 0x7FFF || _ex == (int16_t)-1) ? "NaN" : " "; 5336 printf("%c%04hx.%08x%08x %s", sign, _ex, _m1, _m0, kind); 5337 }; 5338 5339 }; 5340 5341 class FPU_State { 5342 public: 5343 enum { 5344 register_size = 10, 5345 number_of_registers = 8, 5346 register_mask = 7 5347 }; 5348 5349 ControlWord _control_word; 5350 StatusWord _status_word; 5351 TagWord _tag_word; 5352 int32_t _error_offset; 5353 int32_t _error_selector; 5354 int32_t _data_offset; 5355 int32_t _data_selector; 5356 int8_t _register[register_size * number_of_registers]; 5357 5358 int tag_for_st(int i) const { return _tag_word.tag_at((_status_word.top() + i) & register_mask); } 5359 FPU_Register* st(int i) const { return (FPU_Register*)&_register[register_size * i]; } 5360 5361 const char* tag_as_string(int tag) const { 5362 switch (tag) { 5363 case 0: return "valid"; 5364 case 1: return "zero"; 5365 case 2: return "special"; 5366 case 3: return "empty"; 5367 } 5368 ShouldNotReachHere(); 5369 return nullptr; 5370 } 5371 5372 void print() const { 5373 // print computation registers 5374 { int t = _status_word.top(); 5375 for (int i = 0; i < number_of_registers; i++) { 5376 int j = (i - t) & register_mask; 5377 printf("%c r%d = ST%d = ", (j == 0 ? '*' : ' '), i, j); 5378 st(j)->print(); 5379 printf(" %s\n", tag_as_string(_tag_word.tag_at(i))); 5380 } 5381 } 5382 printf("\n"); 5383 // print control registers 5384 printf("ctrl = "); _control_word.print(); printf("\n"); 5385 printf("stat = "); _status_word .print(); printf("\n"); 5386 printf("tags = "); _tag_word .print(); printf("\n"); 5387 } 5388 5389 }; 5390 5391 class Flag_Register { 5392 public: 5393 int32_t _value; 5394 5395 bool overflow() const { return ((_value >> 11) & 1) != 0; } 5396 bool direction() const { return ((_value >> 10) & 1) != 0; } 5397 bool sign() const { return ((_value >> 7) & 1) != 0; } 5398 bool zero() const { return ((_value >> 6) & 1) != 0; } 5399 bool auxiliary_carry() const { return ((_value >> 4) & 1) != 0; } 5400 bool parity() const { return ((_value >> 2) & 1) != 0; } 5401 bool carry() const { return ((_value >> 0) & 1) != 0; } 5402 5403 void print() const { 5404 // flags 5405 char f[8]; 5406 f[0] = (overflow ()) ? 'O' : '-'; 5407 f[1] = (direction ()) ? 'D' : '-'; 5408 f[2] = (sign ()) ? 'S' : '-'; 5409 f[3] = (zero ()) ? 'Z' : '-'; 5410 f[4] = (auxiliary_carry()) ? 'A' : '-'; 5411 f[5] = (parity ()) ? 'P' : '-'; 5412 f[6] = (carry ()) ? 'C' : '-'; 5413 f[7] = '\x0'; 5414 // output 5415 printf("%08x flags = %s", _value, f); 5416 } 5417 5418 }; 5419 5420 class IU_Register { 5421 public: 5422 int32_t _value; 5423 5424 void print() const { 5425 printf("%08x %11d", _value, _value); 5426 } 5427 5428 }; 5429 5430 class IU_State { 5431 public: 5432 Flag_Register _eflags; 5433 IU_Register _rdi; 5434 IU_Register _rsi; 5435 IU_Register _rbp; 5436 IU_Register _rsp; 5437 IU_Register _rbx; 5438 IU_Register _rdx; 5439 IU_Register _rcx; 5440 IU_Register _rax; 5441 5442 void print() const { 5443 // computation registers 5444 printf("rax, = "); _rax.print(); printf("\n"); 5445 printf("rbx, = "); _rbx.print(); printf("\n"); 5446 printf("rcx = "); _rcx.print(); printf("\n"); 5447 printf("rdx = "); _rdx.print(); printf("\n"); 5448 printf("rdi = "); _rdi.print(); printf("\n"); 5449 printf("rsi = "); _rsi.print(); printf("\n"); 5450 printf("rbp, = "); _rbp.print(); printf("\n"); 5451 printf("rsp = "); _rsp.print(); printf("\n"); 5452 printf("\n"); 5453 // control registers 5454 printf("flgs = "); _eflags.print(); printf("\n"); 5455 } 5456 }; 5457 5458 5459 class CPU_State { 5460 public: 5461 FPU_State _fpu_state; 5462 IU_State _iu_state; 5463 5464 void print() const { 5465 printf("--------------------------------------------------\n"); 5466 _iu_state .print(); 5467 printf("\n"); 5468 _fpu_state.print(); 5469 printf("--------------------------------------------------\n"); 5470 } 5471 5472 }; 5473 5474 5475 static void _print_CPU_state(CPU_State* state) { 5476 state->print(); 5477 }; 5478 5479 5480 void MacroAssembler::print_CPU_state() { 5481 push_CPU_state(); 5482 push(rsp); // pass CPU state 5483 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _print_CPU_state))); 5484 addptr(rsp, wordSize); // discard argument 5485 pop_CPU_state(); 5486 } 5487 5488 5489 #ifndef _LP64 5490 static bool _verify_FPU(int stack_depth, char* s, CPU_State* state) { 5491 static int counter = 0; 5492 FPU_State* fs = &state->_fpu_state; 5493 counter++; 5494 // For leaf calls, only verify that the top few elements remain empty. 5495 // We only need 1 empty at the top for C2 code. 5496 if( stack_depth < 0 ) { 5497 if( fs->tag_for_st(7) != 3 ) { 5498 printf("FPR7 not empty\n"); 5499 state->print(); 5500 assert(false, "error"); 5501 return false; 5502 } 5503 return true; // All other stack states do not matter 5504 } 5505 5506 assert((fs->_control_word._value & 0xffff) == StubRoutines::x86::fpu_cntrl_wrd_std(), 5507 "bad FPU control word"); 5508 5509 // compute stack depth 5510 int i = 0; 5511 while (i < FPU_State::number_of_registers && fs->tag_for_st(i) < 3) i++; 5512 int d = i; 5513 while (i < FPU_State::number_of_registers && fs->tag_for_st(i) == 3) i++; 5514 // verify findings 5515 if (i != FPU_State::number_of_registers) { 5516 // stack not contiguous 5517 printf("%s: stack not contiguous at ST%d\n", s, i); 5518 state->print(); 5519 assert(false, "error"); 5520 return false; 5521 } 5522 // check if computed stack depth corresponds to expected stack depth 5523 if (stack_depth < 0) { 5524 // expected stack depth is -stack_depth or less 5525 if (d > -stack_depth) { 5526 // too many elements on the stack 5527 printf("%s: <= %d stack elements expected but found %d\n", s, -stack_depth, d); 5528 state->print(); 5529 assert(false, "error"); 5530 return false; 5531 } 5532 } else { 5533 // expected stack depth is stack_depth 5534 if (d != stack_depth) { 5535 // wrong stack depth 5536 printf("%s: %d stack elements expected but found %d\n", s, stack_depth, d); 5537 state->print(); 5538 assert(false, "error"); 5539 return false; 5540 } 5541 } 5542 // everything is cool 5543 return true; 5544 } 5545 5546 void MacroAssembler::verify_FPU(int stack_depth, const char* s) { 5547 if (!VerifyFPU) return; 5548 push_CPU_state(); 5549 push(rsp); // pass CPU state 5550 ExternalAddress msg((address) s); 5551 // pass message string s 5552 pushptr(msg.addr(), noreg); 5553 push(stack_depth); // pass stack depth 5554 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _verify_FPU))); 5555 addptr(rsp, 3 * wordSize); // discard arguments 5556 // check for error 5557 { Label L; 5558 testl(rax, rax); 5559 jcc(Assembler::notZero, L); 5560 int3(); // break if error condition 5561 bind(L); 5562 } 5563 pop_CPU_state(); 5564 } 5565 #endif // _LP64 5566 5567 void MacroAssembler::restore_cpu_control_state_after_jni(Register rscratch) { 5568 // Either restore the MXCSR register after returning from the JNI Call 5569 // or verify that it wasn't changed (with -Xcheck:jni flag). 5570 if (VM_Version::supports_sse()) { 5571 if (RestoreMXCSROnJNICalls) { 5572 ldmxcsr(ExternalAddress(StubRoutines::x86::addr_mxcsr_std()), rscratch); 5573 } else if (CheckJNICalls) { 5574 call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry())); 5575 } 5576 } 5577 // Clear upper bits of YMM registers to avoid SSE <-> AVX transition penalty. 5578 vzeroupper(); 5579 5580 #ifndef _LP64 5581 // Either restore the x87 floating pointer control word after returning 5582 // from the JNI call or verify that it wasn't changed. 5583 if (CheckJNICalls) { 5584 call(RuntimeAddress(StubRoutines::x86::verify_fpu_cntrl_wrd_entry())); 5585 } 5586 #endif // _LP64 5587 } 5588 5589 // ((OopHandle)result).resolve(); 5590 void MacroAssembler::resolve_oop_handle(Register result, Register tmp) { 5591 assert_different_registers(result, tmp); 5592 5593 // Only 64 bit platforms support GCs that require a tmp register 5594 // Only IN_HEAP loads require a thread_tmp register 5595 // OopHandle::resolve is an indirection like jobject. 5596 access_load_at(T_OBJECT, IN_NATIVE, 5597 result, Address(result, 0), tmp, /*tmp_thread*/noreg); 5598 } 5599 5600 // ((WeakHandle)result).resolve(); 5601 void MacroAssembler::resolve_weak_handle(Register rresult, Register rtmp) { 5602 assert_different_registers(rresult, rtmp); 5603 Label resolved; 5604 5605 // A null weak handle resolves to null. 5606 cmpptr(rresult, 0); 5607 jcc(Assembler::equal, resolved); 5608 5609 // Only 64 bit platforms support GCs that require a tmp register 5610 // Only IN_HEAP loads require a thread_tmp register 5611 // WeakHandle::resolve is an indirection like jweak. 5612 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 5613 rresult, Address(rresult, 0), rtmp, /*tmp_thread*/noreg); 5614 bind(resolved); 5615 } 5616 5617 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) { 5618 // get mirror 5619 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 5620 load_method_holder(mirror, method); 5621 movptr(mirror, Address(mirror, mirror_offset)); 5622 resolve_oop_handle(mirror, tmp); 5623 } 5624 5625 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) { 5626 load_method_holder(rresult, rmethod); 5627 movptr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset())); 5628 } 5629 5630 void MacroAssembler::load_method_holder(Register holder, Register method) { 5631 movptr(holder, Address(method, Method::const_offset())); // ConstMethod* 5632 movptr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool* 5633 movptr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass* 5634 } 5635 5636 void MacroAssembler::load_metadata(Register dst, Register src) { 5637 if (UseCompressedClassPointers) { 5638 movl(dst, Address(src, oopDesc::klass_offset_in_bytes())); 5639 } else { 5640 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes())); 5641 } 5642 } 5643 5644 void MacroAssembler::load_klass(Register dst, Register src, Register tmp) { 5645 assert_different_registers(src, tmp); 5646 assert_different_registers(dst, tmp); 5647 #ifdef _LP64 5648 if (UseCompressedClassPointers) { 5649 movl(dst, Address(src, oopDesc::klass_offset_in_bytes())); 5650 decode_klass_not_null(dst, tmp); 5651 } else 5652 #endif 5653 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes())); 5654 } 5655 5656 void MacroAssembler::load_prototype_header(Register dst, Register src, Register tmp) { 5657 load_klass(dst, src, tmp); 5658 movptr(dst, Address(dst, Klass::prototype_header_offset())); 5659 } 5660 5661 void MacroAssembler::store_klass(Register dst, Register src, Register tmp) { 5662 assert_different_registers(src, tmp); 5663 assert_different_registers(dst, tmp); 5664 #ifdef _LP64 5665 if (UseCompressedClassPointers) { 5666 encode_klass_not_null(src, tmp); 5667 movl(Address(dst, oopDesc::klass_offset_in_bytes()), src); 5668 } else 5669 #endif 5670 movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src); 5671 } 5672 5673 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src, 5674 Register tmp1, Register thread_tmp) { 5675 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5676 decorators = AccessInternal::decorator_fixup(decorators, type); 5677 bool as_raw = (decorators & AS_RAW) != 0; 5678 if (as_raw) { 5679 bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, thread_tmp); 5680 } else { 5681 bs->load_at(this, decorators, type, dst, src, tmp1, thread_tmp); 5682 } 5683 } 5684 5685 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val, 5686 Register tmp1, Register tmp2, Register tmp3) { 5687 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5688 decorators = AccessInternal::decorator_fixup(decorators, type); 5689 bool as_raw = (decorators & AS_RAW) != 0; 5690 if (as_raw) { 5691 bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 5692 } else { 5693 bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 5694 } 5695 } 5696 5697 void MacroAssembler::access_value_copy(DecoratorSet decorators, Register src, Register dst, 5698 Register inline_klass) { 5699 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5700 bs->value_copy(this, decorators, src, dst, inline_klass); 5701 } 5702 5703 void MacroAssembler::first_field_offset(Register inline_klass, Register offset) { 5704 movptr(offset, Address(inline_klass, InstanceKlass::adr_inlineklass_fixed_block_offset())); 5705 movl(offset, Address(offset, InlineKlass::first_field_offset_offset())); 5706 } 5707 5708 void MacroAssembler::data_for_oop(Register oop, Register data, Register inline_klass) { 5709 // ((address) (void*) o) + vk->first_field_offset(); 5710 Register offset = (data == oop) ? rscratch1 : data; 5711 first_field_offset(inline_klass, offset); 5712 if (data == oop) { 5713 addptr(data, offset); 5714 } else { 5715 lea(data, Address(oop, offset)); 5716 } 5717 } 5718 5719 void MacroAssembler::data_for_value_array_index(Register array, Register array_klass, 5720 Register index, Register data) { 5721 assert(index != rcx, "index needs to shift by rcx"); 5722 assert_different_registers(array, array_klass, index); 5723 assert_different_registers(rcx, array, index); 5724 5725 // array->base() + (index << Klass::layout_helper_log2_element_size(lh)); 5726 movl(rcx, Address(array_klass, Klass::layout_helper_offset())); 5727 5728 // Klass::layout_helper_log2_element_size(lh) 5729 // (lh >> _lh_log2_element_size_shift) & _lh_log2_element_size_mask; 5730 shrl(rcx, Klass::_lh_log2_element_size_shift); 5731 andl(rcx, Klass::_lh_log2_element_size_mask); 5732 shlptr(index); // index << rcx 5733 5734 lea(data, Address(array, index, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_PRIMITIVE_OBJECT))); 5735 } 5736 5737 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1, 5738 Register thread_tmp, DecoratorSet decorators) { 5739 access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp); 5740 } 5741 5742 // Doesn't do verification, generates fixed size code 5743 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1, 5744 Register thread_tmp, DecoratorSet decorators) { 5745 access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, thread_tmp); 5746 } 5747 5748 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1, 5749 Register tmp2, Register tmp3, DecoratorSet decorators) { 5750 access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3); 5751 } 5752 5753 // Used for storing nulls. 5754 void MacroAssembler::store_heap_oop_null(Address dst) { 5755 access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg); 5756 } 5757 5758 #ifdef _LP64 5759 void MacroAssembler::store_klass_gap(Register dst, Register src) { 5760 if (UseCompressedClassPointers) { 5761 // Store to klass gap in destination 5762 movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src); 5763 } 5764 } 5765 5766 #ifdef ASSERT 5767 void MacroAssembler::verify_heapbase(const char* msg) { 5768 assert (UseCompressedOops, "should be compressed"); 5769 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5770 if (CheckCompressedOops) { 5771 Label ok; 5772 ExternalAddress src2(CompressedOops::ptrs_base_addr()); 5773 const bool is_src2_reachable = reachable(src2); 5774 if (!is_src2_reachable) { 5775 push(rscratch1); // cmpptr trashes rscratch1 5776 } 5777 cmpptr(r12_heapbase, src2, rscratch1); 5778 jcc(Assembler::equal, ok); 5779 STOP(msg); 5780 bind(ok); 5781 if (!is_src2_reachable) { 5782 pop(rscratch1); 5783 } 5784 } 5785 } 5786 #endif 5787 5788 // Algorithm must match oop.inline.hpp encode_heap_oop. 5789 void MacroAssembler::encode_heap_oop(Register r) { 5790 #ifdef ASSERT 5791 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?"); 5792 #endif 5793 verify_oop_msg(r, "broken oop in encode_heap_oop"); 5794 if (CompressedOops::base() == nullptr) { 5795 if (CompressedOops::shift() != 0) { 5796 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5797 shrq(r, LogMinObjAlignmentInBytes); 5798 } 5799 return; 5800 } 5801 testq(r, r); 5802 cmovq(Assembler::equal, r, r12_heapbase); 5803 subq(r, r12_heapbase); 5804 shrq(r, LogMinObjAlignmentInBytes); 5805 } 5806 5807 void MacroAssembler::encode_heap_oop_not_null(Register r) { 5808 #ifdef ASSERT 5809 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?"); 5810 if (CheckCompressedOops) { 5811 Label ok; 5812 testq(r, r); 5813 jcc(Assembler::notEqual, ok); 5814 STOP("null oop passed to encode_heap_oop_not_null"); 5815 bind(ok); 5816 } 5817 #endif 5818 verify_oop_msg(r, "broken oop in encode_heap_oop_not_null"); 5819 if (CompressedOops::base() != nullptr) { 5820 subq(r, r12_heapbase); 5821 } 5822 if (CompressedOops::shift() != 0) { 5823 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5824 shrq(r, LogMinObjAlignmentInBytes); 5825 } 5826 } 5827 5828 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) { 5829 #ifdef ASSERT 5830 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?"); 5831 if (CheckCompressedOops) { 5832 Label ok; 5833 testq(src, src); 5834 jcc(Assembler::notEqual, ok); 5835 STOP("null oop passed to encode_heap_oop_not_null2"); 5836 bind(ok); 5837 } 5838 #endif 5839 verify_oop_msg(src, "broken oop in encode_heap_oop_not_null2"); 5840 if (dst != src) { 5841 movq(dst, src); 5842 } 5843 if (CompressedOops::base() != nullptr) { 5844 subq(dst, r12_heapbase); 5845 } 5846 if (CompressedOops::shift() != 0) { 5847 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5848 shrq(dst, LogMinObjAlignmentInBytes); 5849 } 5850 } 5851 5852 void MacroAssembler::decode_heap_oop(Register r) { 5853 #ifdef ASSERT 5854 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?"); 5855 #endif 5856 if (CompressedOops::base() == nullptr) { 5857 if (CompressedOops::shift() != 0) { 5858 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5859 shlq(r, LogMinObjAlignmentInBytes); 5860 } 5861 } else { 5862 Label done; 5863 shlq(r, LogMinObjAlignmentInBytes); 5864 jccb(Assembler::equal, done); 5865 addq(r, r12_heapbase); 5866 bind(done); 5867 } 5868 verify_oop_msg(r, "broken oop in decode_heap_oop"); 5869 } 5870 5871 void MacroAssembler::decode_heap_oop_not_null(Register r) { 5872 // Note: it will change flags 5873 assert (UseCompressedOops, "should only be used for compressed headers"); 5874 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5875 // Cannot assert, unverified entry point counts instructions (see .ad file) 5876 // vtableStubs also counts instructions in pd_code_size_limit. 5877 // Also do not verify_oop as this is called by verify_oop. 5878 if (CompressedOops::shift() != 0) { 5879 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5880 shlq(r, LogMinObjAlignmentInBytes); 5881 if (CompressedOops::base() != nullptr) { 5882 addq(r, r12_heapbase); 5883 } 5884 } else { 5885 assert (CompressedOops::base() == nullptr, "sanity"); 5886 } 5887 } 5888 5889 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { 5890 // Note: it will change flags 5891 assert (UseCompressedOops, "should only be used for compressed headers"); 5892 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5893 // Cannot assert, unverified entry point counts instructions (see .ad file) 5894 // vtableStubs also counts instructions in pd_code_size_limit. 5895 // Also do not verify_oop as this is called by verify_oop. 5896 if (CompressedOops::shift() != 0) { 5897 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5898 if (LogMinObjAlignmentInBytes == Address::times_8) { 5899 leaq(dst, Address(r12_heapbase, src, Address::times_8, 0)); 5900 } else { 5901 if (dst != src) { 5902 movq(dst, src); 5903 } 5904 shlq(dst, LogMinObjAlignmentInBytes); 5905 if (CompressedOops::base() != nullptr) { 5906 addq(dst, r12_heapbase); 5907 } 5908 } 5909 } else { 5910 assert (CompressedOops::base() == nullptr, "sanity"); 5911 if (dst != src) { 5912 movq(dst, src); 5913 } 5914 } 5915 } 5916 5917 void MacroAssembler::encode_klass_not_null(Register r, Register tmp) { 5918 assert_different_registers(r, tmp); 5919 if (CompressedKlassPointers::base() != nullptr) { 5920 mov64(tmp, (int64_t)CompressedKlassPointers::base()); 5921 subq(r, tmp); 5922 } 5923 if (CompressedKlassPointers::shift() != 0) { 5924 assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); 5925 shrq(r, LogKlassAlignmentInBytes); 5926 } 5927 } 5928 5929 void MacroAssembler::encode_and_move_klass_not_null(Register dst, Register src) { 5930 assert_different_registers(src, dst); 5931 if (CompressedKlassPointers::base() != nullptr) { 5932 mov64(dst, -(int64_t)CompressedKlassPointers::base()); 5933 addq(dst, src); 5934 } else { 5935 movptr(dst, src); 5936 } 5937 if (CompressedKlassPointers::shift() != 0) { 5938 assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); 5939 shrq(dst, LogKlassAlignmentInBytes); 5940 } 5941 } 5942 5943 void MacroAssembler::decode_klass_not_null(Register r, Register tmp) { 5944 assert_different_registers(r, tmp); 5945 // Note: it will change flags 5946 assert(UseCompressedClassPointers, "should only be used for compressed headers"); 5947 // Cannot assert, unverified entry point counts instructions (see .ad file) 5948 // vtableStubs also counts instructions in pd_code_size_limit. 5949 // Also do not verify_oop as this is called by verify_oop. 5950 if (CompressedKlassPointers::shift() != 0) { 5951 assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); 5952 shlq(r, LogKlassAlignmentInBytes); 5953 } 5954 if (CompressedKlassPointers::base() != nullptr) { 5955 mov64(tmp, (int64_t)CompressedKlassPointers::base()); 5956 addq(r, tmp); 5957 } 5958 } 5959 5960 void MacroAssembler::decode_and_move_klass_not_null(Register dst, Register src) { 5961 assert_different_registers(src, dst); 5962 // Note: it will change flags 5963 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5964 // Cannot assert, unverified entry point counts instructions (see .ad file) 5965 // vtableStubs also counts instructions in pd_code_size_limit. 5966 // Also do not verify_oop as this is called by verify_oop. 5967 5968 if (CompressedKlassPointers::base() == nullptr && 5969 CompressedKlassPointers::shift() == 0) { 5970 // The best case scenario is that there is no base or shift. Then it is already 5971 // a pointer that needs nothing but a register rename. 5972 movl(dst, src); 5973 } else { 5974 if (CompressedKlassPointers::base() != nullptr) { 5975 mov64(dst, (int64_t)CompressedKlassPointers::base()); 5976 } else { 5977 xorq(dst, dst); 5978 } 5979 if (CompressedKlassPointers::shift() != 0) { 5980 assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); 5981 assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?"); 5982 leaq(dst, Address(dst, src, Address::times_8, 0)); 5983 } else { 5984 addq(dst, src); 5985 } 5986 } 5987 } 5988 5989 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) { 5990 assert (UseCompressedOops, "should only be used for compressed headers"); 5991 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5992 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5993 int oop_index = oop_recorder()->find_index(obj); 5994 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5995 mov_narrow_oop(dst, oop_index, rspec); 5996 } 5997 5998 void MacroAssembler::set_narrow_oop(Address dst, jobject obj) { 5999 assert (UseCompressedOops, "should only be used for compressed headers"); 6000 assert (Universe::heap() != nullptr, "java heap should be initialized"); 6001 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6002 int oop_index = oop_recorder()->find_index(obj); 6003 RelocationHolder rspec = oop_Relocation::spec(oop_index); 6004 mov_narrow_oop(dst, oop_index, rspec); 6005 } 6006 6007 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) { 6008 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 6009 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6010 int klass_index = oop_recorder()->find_index(k); 6011 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 6012 mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 6013 } 6014 6015 void MacroAssembler::set_narrow_klass(Address dst, Klass* k) { 6016 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 6017 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6018 int klass_index = oop_recorder()->find_index(k); 6019 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 6020 mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 6021 } 6022 6023 void MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) { 6024 assert (UseCompressedOops, "should only be used for compressed headers"); 6025 assert (Universe::heap() != nullptr, "java heap should be initialized"); 6026 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6027 int oop_index = oop_recorder()->find_index(obj); 6028 RelocationHolder rspec = oop_Relocation::spec(oop_index); 6029 Assembler::cmp_narrow_oop(dst, oop_index, rspec); 6030 } 6031 6032 void MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) { 6033 assert (UseCompressedOops, "should only be used for compressed headers"); 6034 assert (Universe::heap() != nullptr, "java heap should be initialized"); 6035 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6036 int oop_index = oop_recorder()->find_index(obj); 6037 RelocationHolder rspec = oop_Relocation::spec(oop_index); 6038 Assembler::cmp_narrow_oop(dst, oop_index, rspec); 6039 } 6040 6041 void MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) { 6042 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 6043 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6044 int klass_index = oop_recorder()->find_index(k); 6045 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 6046 Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 6047 } 6048 6049 void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) { 6050 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 6051 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6052 int klass_index = oop_recorder()->find_index(k); 6053 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 6054 Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 6055 } 6056 6057 void MacroAssembler::reinit_heapbase() { 6058 if (UseCompressedOops) { 6059 if (Universe::heap() != nullptr) { 6060 if (CompressedOops::base() == nullptr) { 6061 MacroAssembler::xorptr(r12_heapbase, r12_heapbase); 6062 } else { 6063 mov64(r12_heapbase, (int64_t)CompressedOops::ptrs_base()); 6064 } 6065 } else { 6066 movptr(r12_heapbase, ExternalAddress(CompressedOops::ptrs_base_addr())); 6067 } 6068 } 6069 } 6070 6071 #endif // _LP64 6072 6073 #if COMPILER2_OR_JVMCI 6074 6075 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM/ZMM registers 6076 void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, KRegister mask) { 6077 // cnt - number of qwords (8-byte words). 6078 // base - start address, qword aligned. 6079 Label L_zero_64_bytes, L_loop, L_sloop, L_tail, L_end; 6080 bool use64byteVector = (MaxVectorSize == 64) && (VM_Version::avx3_threshold() == 0); 6081 if (use64byteVector) { 6082 evpbroadcastq(xtmp, val, AVX_512bit); 6083 } else if (MaxVectorSize >= 32) { 6084 movdq(xtmp, val); 6085 punpcklqdq(xtmp, xtmp); 6086 vinserti128_high(xtmp, xtmp); 6087 } else { 6088 movdq(xtmp, val); 6089 punpcklqdq(xtmp, xtmp); 6090 } 6091 jmp(L_zero_64_bytes); 6092 6093 BIND(L_loop); 6094 if (MaxVectorSize >= 32) { 6095 fill64(base, 0, xtmp, use64byteVector); 6096 } else { 6097 movdqu(Address(base, 0), xtmp); 6098 movdqu(Address(base, 16), xtmp); 6099 movdqu(Address(base, 32), xtmp); 6100 movdqu(Address(base, 48), xtmp); 6101 } 6102 addptr(base, 64); 6103 6104 BIND(L_zero_64_bytes); 6105 subptr(cnt, 8); 6106 jccb(Assembler::greaterEqual, L_loop); 6107 6108 // Copy trailing 64 bytes 6109 if (use64byteVector) { 6110 addptr(cnt, 8); 6111 jccb(Assembler::equal, L_end); 6112 fill64_masked(3, base, 0, xtmp, mask, cnt, val, true); 6113 jmp(L_end); 6114 } else { 6115 addptr(cnt, 4); 6116 jccb(Assembler::less, L_tail); 6117 if (MaxVectorSize >= 32) { 6118 vmovdqu(Address(base, 0), xtmp); 6119 } else { 6120 movdqu(Address(base, 0), xtmp); 6121 movdqu(Address(base, 16), xtmp); 6122 } 6123 } 6124 addptr(base, 32); 6125 subptr(cnt, 4); 6126 6127 BIND(L_tail); 6128 addptr(cnt, 4); 6129 jccb(Assembler::lessEqual, L_end); 6130 if (UseAVX > 2 && MaxVectorSize >= 32 && VM_Version::supports_avx512vl()) { 6131 fill32_masked(3, base, 0, xtmp, mask, cnt, val); 6132 } else { 6133 decrement(cnt); 6134 6135 BIND(L_sloop); 6136 movq(Address(base, 0), xtmp); 6137 addptr(base, 8); 6138 decrement(cnt); 6139 jccb(Assembler::greaterEqual, L_sloop); 6140 } 6141 BIND(L_end); 6142 } 6143 6144 int MacroAssembler::store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter) { 6145 assert(InlineTypeReturnedAsFields, "Inline types should never be returned as fields"); 6146 // An inline type might be returned. If fields are in registers we 6147 // need to allocate an inline type instance and initialize it with 6148 // the value of the fields. 6149 Label skip; 6150 // We only need a new buffered inline type if a new one is not returned 6151 testptr(rax, 1); 6152 jcc(Assembler::zero, skip); 6153 int call_offset = -1; 6154 6155 #ifdef _LP64 6156 // The following code is similar to allocate_instance but has some slight differences, 6157 // e.g. object size is always not zero, sometimes it's constant; storing klass ptr after 6158 // allocating is not necessary if vk != nullptr, etc. allocate_instance is not aware of these. 6159 Label slow_case; 6160 // 1. Try to allocate a new buffered inline instance either from TLAB or eden space 6161 mov(rscratch1, rax); // save rax for slow_case since *_allocate may corrupt it when allocation failed 6162 if (vk != nullptr) { 6163 // Called from C1, where the return type is statically known. 6164 movptr(rbx, (intptr_t)vk->get_InlineKlass()); 6165 jint obj_size = vk->layout_helper(); 6166 assert(obj_size != Klass::_lh_neutral_value, "inline class in return type must have been resolved"); 6167 if (UseTLAB) { 6168 tlab_allocate(r15_thread, rax, noreg, obj_size, r13, r14, slow_case); 6169 } else { 6170 jmp(slow_case); 6171 } 6172 } else { 6173 // Call from interpreter. RAX contains ((the InlineKlass* of the return type) | 0x01) 6174 mov(rbx, rax); 6175 andptr(rbx, -2); 6176 movl(r14, Address(rbx, Klass::layout_helper_offset())); 6177 if (UseTLAB) { 6178 tlab_allocate(r15_thread, rax, r14, 0, r13, r14, slow_case); 6179 } else { 6180 jmp(slow_case); 6181 } 6182 } 6183 if (UseTLAB) { 6184 // 2. Initialize buffered inline instance header 6185 Register buffer_obj = rax; 6186 movptr(Address(buffer_obj, oopDesc::mark_offset_in_bytes()), (intptr_t)markWord::inline_type_prototype().value()); 6187 xorl(r13, r13); 6188 store_klass_gap(buffer_obj, r13); 6189 if (vk == nullptr) { 6190 // store_klass corrupts rbx(klass), so save it in r13 for later use (interpreter case only). 6191 mov(r13, rbx); 6192 } 6193 store_klass(buffer_obj, rbx, rscratch1); 6194 // 3. Initialize its fields with an inline class specific handler 6195 if (vk != nullptr) { 6196 call(RuntimeAddress(vk->pack_handler())); // no need for call info as this will not safepoint. 6197 } else { 6198 movptr(rbx, Address(r13, InstanceKlass::adr_inlineklass_fixed_block_offset())); 6199 movptr(rbx, Address(rbx, InlineKlass::pack_handler_offset())); 6200 call(rbx); 6201 } 6202 jmp(skip); 6203 } 6204 bind(slow_case); 6205 // We failed to allocate a new inline type, fall back to a runtime 6206 // call. Some oop field may be live in some registers but we can't 6207 // tell. That runtime call will take care of preserving them 6208 // across a GC if there's one. 6209 mov(rax, rscratch1); 6210 #endif 6211 6212 if (from_interpreter) { 6213 super_call_VM_leaf(StubRoutines::store_inline_type_fields_to_buf()); 6214 } else { 6215 call(RuntimeAddress(StubRoutines::store_inline_type_fields_to_buf())); 6216 call_offset = offset(); 6217 } 6218 6219 bind(skip); 6220 return call_offset; 6221 } 6222 6223 // Move a value between registers/stack slots and update the reg_state 6224 bool MacroAssembler::move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]) { 6225 assert(from->is_valid() && to->is_valid(), "source and destination must be valid"); 6226 if (reg_state[to->value()] == reg_written) { 6227 return true; // Already written 6228 } 6229 if (from != to && bt != T_VOID) { 6230 if (reg_state[to->value()] == reg_readonly) { 6231 return false; // Not yet writable 6232 } 6233 if (from->is_reg()) { 6234 if (to->is_reg()) { 6235 if (from->is_XMMRegister()) { 6236 if (bt == T_DOUBLE) { 6237 movdbl(to->as_XMMRegister(), from->as_XMMRegister()); 6238 } else { 6239 assert(bt == T_FLOAT, "must be float"); 6240 movflt(to->as_XMMRegister(), from->as_XMMRegister()); 6241 } 6242 } else { 6243 movq(to->as_Register(), from->as_Register()); 6244 } 6245 } else { 6246 int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize; 6247 Address to_addr = Address(rsp, st_off); 6248 if (from->is_XMMRegister()) { 6249 if (bt == T_DOUBLE) { 6250 movdbl(to_addr, from->as_XMMRegister()); 6251 } else { 6252 assert(bt == T_FLOAT, "must be float"); 6253 movflt(to_addr, from->as_XMMRegister()); 6254 } 6255 } else { 6256 movq(to_addr, from->as_Register()); 6257 } 6258 } 6259 } else { 6260 Address from_addr = Address(rsp, from->reg2stack() * VMRegImpl::stack_slot_size + wordSize); 6261 if (to->is_reg()) { 6262 if (to->is_XMMRegister()) { 6263 if (bt == T_DOUBLE) { 6264 movdbl(to->as_XMMRegister(), from_addr); 6265 } else { 6266 assert(bt == T_FLOAT, "must be float"); 6267 movflt(to->as_XMMRegister(), from_addr); 6268 } 6269 } else { 6270 movq(to->as_Register(), from_addr); 6271 } 6272 } else { 6273 int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize; 6274 movq(r13, from_addr); 6275 movq(Address(rsp, st_off), r13); 6276 } 6277 } 6278 } 6279 // Update register states 6280 reg_state[from->value()] = reg_writable; 6281 reg_state[to->value()] = reg_written; 6282 return true; 6283 } 6284 6285 // Calculate the extra stack space required for packing or unpacking inline 6286 // args and adjust the stack pointer 6287 int MacroAssembler::extend_stack_for_inline_args(int args_on_stack) { 6288 // Two additional slots to account for return address 6289 int sp_inc = (args_on_stack + 2) * VMRegImpl::stack_slot_size; 6290 sp_inc = align_up(sp_inc, StackAlignmentInBytes); 6291 // Save the return address, adjust the stack (make sure it is properly 6292 // 16-byte aligned) and copy the return address to the new top of the stack. 6293 // The stack will be repaired on return (see MacroAssembler::remove_frame). 6294 assert(sp_inc > 0, "sanity"); 6295 pop(r13); 6296 subptr(rsp, sp_inc); 6297 push(r13); 6298 return sp_inc; 6299 } 6300 6301 // Read all fields from an inline type buffer and store the field values in registers/stack slots. 6302 bool MacroAssembler::unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, 6303 VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index, 6304 RegState reg_state[]) { 6305 assert(sig->at(sig_index)._bt == T_VOID, "should be at end delimiter"); 6306 assert(from->is_valid(), "source must be valid"); 6307 bool progress = false; 6308 #ifdef ASSERT 6309 const int start_offset = offset(); 6310 #endif 6311 6312 Label L_null, L_notNull; 6313 // Don't use r14 as tmp because it's used for spilling (see MacroAssembler::spill_reg_for) 6314 Register tmp1 = r10; 6315 Register tmp2 = r13; 6316 Register fromReg = noreg; 6317 ScalarizedInlineArgsStream stream(sig, sig_index, to, to_count, to_index, -1); 6318 bool done = true; 6319 bool mark_done = true; 6320 VMReg toReg; 6321 BasicType bt; 6322 // Check if argument requires a null check 6323 bool null_check = false; 6324 VMReg nullCheckReg; 6325 while (stream.next(nullCheckReg, bt)) { 6326 if (sig->at(stream.sig_index())._offset == -1) { 6327 null_check = true; 6328 break; 6329 } 6330 } 6331 stream.reset(sig_index, to_index); 6332 while (stream.next(toReg, bt)) { 6333 assert(toReg->is_valid(), "destination must be valid"); 6334 int idx = (int)toReg->value(); 6335 if (reg_state[idx] == reg_readonly) { 6336 if (idx != from->value()) { 6337 mark_done = false; 6338 } 6339 done = false; 6340 continue; 6341 } else if (reg_state[idx] == reg_written) { 6342 continue; 6343 } 6344 assert(reg_state[idx] == reg_writable, "must be writable"); 6345 reg_state[idx] = reg_written; 6346 progress = true; 6347 6348 if (fromReg == noreg) { 6349 if (from->is_reg()) { 6350 fromReg = from->as_Register(); 6351 } else { 6352 int st_off = from->reg2stack() * VMRegImpl::stack_slot_size + wordSize; 6353 movq(tmp1, Address(rsp, st_off)); 6354 fromReg = tmp1; 6355 } 6356 if (null_check) { 6357 // Nullable inline type argument, emit null check 6358 testptr(fromReg, fromReg); 6359 jcc(Assembler::zero, L_null); 6360 } 6361 } 6362 int off = sig->at(stream.sig_index())._offset; 6363 if (off == -1) { 6364 assert(null_check, "Missing null check at"); 6365 if (toReg->is_stack()) { 6366 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize; 6367 movq(Address(rsp, st_off), 1); 6368 } else { 6369 movq(toReg->as_Register(), 1); 6370 } 6371 continue; 6372 } 6373 assert(off > 0, "offset in object should be positive"); 6374 Address fromAddr = Address(fromReg, off); 6375 if (!toReg->is_XMMRegister()) { 6376 Register dst = toReg->is_stack() ? tmp2 : toReg->as_Register(); 6377 if (is_reference_type(bt)) { 6378 load_heap_oop(dst, fromAddr); 6379 } else { 6380 bool is_signed = (bt != T_CHAR) && (bt != T_BOOLEAN); 6381 load_sized_value(dst, fromAddr, type2aelembytes(bt), is_signed); 6382 } 6383 if (toReg->is_stack()) { 6384 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize; 6385 movq(Address(rsp, st_off), dst); 6386 } 6387 } else if (bt == T_DOUBLE) { 6388 movdbl(toReg->as_XMMRegister(), fromAddr); 6389 } else { 6390 assert(bt == T_FLOAT, "must be float"); 6391 movflt(toReg->as_XMMRegister(), fromAddr); 6392 } 6393 } 6394 if (progress && null_check) { 6395 if (done) { 6396 jmp(L_notNull); 6397 bind(L_null); 6398 // Set IsInit field to zero to signal that the argument is null. 6399 // Also set all oop fields to zero to make the GC happy. 6400 stream.reset(sig_index, to_index); 6401 while (stream.next(toReg, bt)) { 6402 if (sig->at(stream.sig_index())._offset == -1 || 6403 bt == T_OBJECT || bt == T_ARRAY) { 6404 if (toReg->is_stack()) { 6405 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize; 6406 movq(Address(rsp, st_off), 0); 6407 } else { 6408 xorq(toReg->as_Register(), toReg->as_Register()); 6409 } 6410 } 6411 } 6412 bind(L_notNull); 6413 } else { 6414 bind(L_null); 6415 } 6416 } 6417 6418 sig_index = stream.sig_index(); 6419 to_index = stream.regs_index(); 6420 6421 if (mark_done && reg_state[from->value()] != reg_written) { 6422 // This is okay because no one else will write to that slot 6423 reg_state[from->value()] = reg_writable; 6424 } 6425 from_index--; 6426 assert(progress || (start_offset == offset()), "should not emit code"); 6427 return done; 6428 } 6429 6430 bool MacroAssembler::pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index, 6431 VMRegPair* from, int from_count, int& from_index, VMReg to, 6432 RegState reg_state[], Register val_array) { 6433 assert(sig->at(sig_index)._bt == T_METADATA, "should be at delimiter"); 6434 assert(to->is_valid(), "destination must be valid"); 6435 6436 if (reg_state[to->value()] == reg_written) { 6437 skip_unpacked_fields(sig, sig_index, from, from_count, from_index); 6438 return true; // Already written 6439 } 6440 6441 // TODO 8284443 Isn't it an issue if below code uses r14 as tmp when it contains a spilled value? 6442 // Be careful with r14 because it's used for spilling (see MacroAssembler::spill_reg_for). 6443 Register val_obj_tmp = r11; 6444 Register from_reg_tmp = r14; 6445 Register tmp1 = r10; 6446 Register tmp2 = r13; 6447 Register tmp3 = rbx; 6448 Register val_obj = to->is_stack() ? val_obj_tmp : to->as_Register(); 6449 6450 assert_different_registers(val_obj_tmp, from_reg_tmp, tmp1, tmp2, tmp3, val_array); 6451 6452 if (reg_state[to->value()] == reg_readonly) { 6453 if (!is_reg_in_unpacked_fields(sig, sig_index, to, from, from_count, from_index)) { 6454 skip_unpacked_fields(sig, sig_index, from, from_count, from_index); 6455 return false; // Not yet writable 6456 } 6457 val_obj = val_obj_tmp; 6458 } 6459 6460 int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + vtarg_index * type2aelembytes(T_OBJECT); 6461 load_heap_oop(val_obj, Address(val_array, index)); 6462 6463 ScalarizedInlineArgsStream stream(sig, sig_index, from, from_count, from_index); 6464 VMReg fromReg; 6465 BasicType bt; 6466 Label L_null; 6467 while (stream.next(fromReg, bt)) { 6468 assert(fromReg->is_valid(), "source must be valid"); 6469 reg_state[fromReg->value()] = reg_writable; 6470 6471 int off = sig->at(stream.sig_index())._offset; 6472 if (off == -1) { 6473 // Nullable inline type argument, emit null check 6474 Label L_notNull; 6475 if (fromReg->is_stack()) { 6476 int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize; 6477 testb(Address(rsp, ld_off), 1); 6478 } else { 6479 testb(fromReg->as_Register(), 1); 6480 } 6481 jcc(Assembler::notZero, L_notNull); 6482 movptr(val_obj, 0); 6483 jmp(L_null); 6484 bind(L_notNull); 6485 continue; 6486 } 6487 6488 assert(off > 0, "offset in object should be positive"); 6489 size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize; 6490 6491 Address dst(val_obj, off); 6492 if (!fromReg->is_XMMRegister()) { 6493 Register src; 6494 if (fromReg->is_stack()) { 6495 src = from_reg_tmp; 6496 int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize; 6497 load_sized_value(src, Address(rsp, ld_off), size_in_bytes, /* is_signed */ false); 6498 } else { 6499 src = fromReg->as_Register(); 6500 } 6501 assert_different_registers(dst.base(), src, tmp1, tmp2, tmp3, val_array); 6502 if (is_reference_type(bt)) { 6503 store_heap_oop(dst, src, tmp1, tmp2, tmp3, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED); 6504 } else { 6505 store_sized_value(dst, src, size_in_bytes); 6506 } 6507 } else if (bt == T_DOUBLE) { 6508 movdbl(dst, fromReg->as_XMMRegister()); 6509 } else { 6510 assert(bt == T_FLOAT, "must be float"); 6511 movflt(dst, fromReg->as_XMMRegister()); 6512 } 6513 } 6514 bind(L_null); 6515 sig_index = stream.sig_index(); 6516 from_index = stream.regs_index(); 6517 6518 assert(reg_state[to->value()] == reg_writable, "must have already been read"); 6519 bool success = move_helper(val_obj->as_VMReg(), to, T_OBJECT, reg_state); 6520 assert(success, "to register must be writeable"); 6521 return true; 6522 } 6523 6524 VMReg MacroAssembler::spill_reg_for(VMReg reg) { 6525 return reg->is_XMMRegister() ? xmm8->as_VMReg() : r14->as_VMReg(); 6526 } 6527 6528 void MacroAssembler::remove_frame(int initial_framesize, bool needs_stack_repair) { 6529 assert((initial_framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned"); 6530 if (needs_stack_repair) { 6531 movq(rbp, Address(rsp, initial_framesize)); 6532 // The stack increment resides just below the saved rbp 6533 addq(rsp, Address(rsp, initial_framesize - wordSize)); 6534 } else { 6535 if (initial_framesize > 0) { 6536 addq(rsp, initial_framesize); 6537 } 6538 pop(rbp); 6539 } 6540 } 6541 6542 // Clearing constant sized memory using YMM/ZMM registers. 6543 void MacroAssembler::clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask) { 6544 assert(UseAVX > 2 && VM_Version::supports_avx512vlbw(), ""); 6545 bool use64byteVector = (MaxVectorSize > 32) && (VM_Version::avx3_threshold() == 0); 6546 6547 int vector64_count = (cnt & (~0x7)) >> 3; 6548 cnt = cnt & 0x7; 6549 const int fill64_per_loop = 4; 6550 const int max_unrolled_fill64 = 8; 6551 6552 // 64 byte initialization loop. 6553 vpxor(xtmp, xtmp, xtmp, use64byteVector ? AVX_512bit : AVX_256bit); 6554 int start64 = 0; 6555 if (vector64_count > max_unrolled_fill64) { 6556 Label LOOP; 6557 Register index = rtmp; 6558 6559 start64 = vector64_count - (vector64_count % fill64_per_loop); 6560 6561 movl(index, 0); 6562 BIND(LOOP); 6563 for (int i = 0; i < fill64_per_loop; i++) { 6564 fill64(Address(base, index, Address::times_1, i * 64), xtmp, use64byteVector); 6565 } 6566 addl(index, fill64_per_loop * 64); 6567 cmpl(index, start64 * 64); 6568 jccb(Assembler::less, LOOP); 6569 } 6570 for (int i = start64; i < vector64_count; i++) { 6571 fill64(base, i * 64, xtmp, use64byteVector); 6572 } 6573 6574 // Clear remaining 64 byte tail. 6575 int disp = vector64_count * 64; 6576 if (cnt) { 6577 switch (cnt) { 6578 case 1: 6579 movq(Address(base, disp), xtmp); 6580 break; 6581 case 2: 6582 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_128bit); 6583 break; 6584 case 3: 6585 movl(rtmp, 0x7); 6586 kmovwl(mask, rtmp); 6587 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_256bit); 6588 break; 6589 case 4: 6590 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 6591 break; 6592 case 5: 6593 if (use64byteVector) { 6594 movl(rtmp, 0x1F); 6595 kmovwl(mask, rtmp); 6596 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit); 6597 } else { 6598 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 6599 movq(Address(base, disp + 32), xtmp); 6600 } 6601 break; 6602 case 6: 6603 if (use64byteVector) { 6604 movl(rtmp, 0x3F); 6605 kmovwl(mask, rtmp); 6606 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit); 6607 } else { 6608 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 6609 evmovdqu(T_LONG, k0, Address(base, disp + 32), xtmp, false, Assembler::AVX_128bit); 6610 } 6611 break; 6612 case 7: 6613 if (use64byteVector) { 6614 movl(rtmp, 0x7F); 6615 kmovwl(mask, rtmp); 6616 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit); 6617 } else { 6618 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 6619 movl(rtmp, 0x7); 6620 kmovwl(mask, rtmp); 6621 evmovdqu(T_LONG, mask, Address(base, disp + 32), xtmp, true, Assembler::AVX_256bit); 6622 } 6623 break; 6624 default: 6625 fatal("Unexpected length : %d\n",cnt); 6626 break; 6627 } 6628 } 6629 } 6630 6631 void MacroAssembler::clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, 6632 bool is_large, bool word_copy_only, KRegister mask) { 6633 // cnt - number of qwords (8-byte words). 6634 // base - start address, qword aligned. 6635 // is_large - if optimizers know cnt is larger than InitArrayShortSize 6636 assert(base==rdi, "base register must be edi for rep stos"); 6637 assert(val==rax, "val register must be eax for rep stos"); 6638 assert(cnt==rcx, "cnt register must be ecx for rep stos"); 6639 assert(InitArrayShortSize % BytesPerLong == 0, 6640 "InitArrayShortSize should be the multiple of BytesPerLong"); 6641 6642 Label DONE; 6643 6644 if (!is_large) { 6645 Label LOOP, LONG; 6646 cmpptr(cnt, InitArrayShortSize/BytesPerLong); 6647 jccb(Assembler::greater, LONG); 6648 6649 NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM 6650 6651 decrement(cnt); 6652 jccb(Assembler::negative, DONE); // Zero length 6653 6654 // Use individual pointer-sized stores for small counts: 6655 BIND(LOOP); 6656 movptr(Address(base, cnt, Address::times_ptr), val); 6657 decrement(cnt); 6658 jccb(Assembler::greaterEqual, LOOP); 6659 jmpb(DONE); 6660 6661 BIND(LONG); 6662 } 6663 6664 // Use longer rep-prefixed ops for non-small counts: 6665 if (UseFastStosb && !word_copy_only) { 6666 shlptr(cnt, 3); // convert to number of bytes 6667 rep_stosb(); 6668 } else if (UseXMMForObjInit) { 6669 xmm_clear_mem(base, cnt, val, xtmp, mask); 6670 } else { 6671 NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM 6672 rep_stos(); 6673 } 6674 6675 BIND(DONE); 6676 } 6677 6678 #endif //COMPILER2_OR_JVMCI 6679 6680 6681 void MacroAssembler::generate_fill(BasicType t, bool aligned, 6682 Register to, Register value, Register count, 6683 Register rtmp, XMMRegister xtmp) { 6684 ShortBranchVerifier sbv(this); 6685 assert_different_registers(to, value, count, rtmp); 6686 Label L_exit; 6687 Label L_fill_2_bytes, L_fill_4_bytes; 6688 6689 #if defined(COMPILER2) && defined(_LP64) 6690 if(MaxVectorSize >=32 && 6691 VM_Version::supports_avx512vlbw() && 6692 VM_Version::supports_bmi2()) { 6693 generate_fill_avx3(t, to, value, count, rtmp, xtmp); 6694 return; 6695 } 6696 #endif 6697 6698 int shift = -1; 6699 switch (t) { 6700 case T_BYTE: 6701 shift = 2; 6702 break; 6703 case T_SHORT: 6704 shift = 1; 6705 break; 6706 case T_INT: 6707 shift = 0; 6708 break; 6709 default: ShouldNotReachHere(); 6710 } 6711 6712 if (t == T_BYTE) { 6713 andl(value, 0xff); 6714 movl(rtmp, value); 6715 shll(rtmp, 8); 6716 orl(value, rtmp); 6717 } 6718 if (t == T_SHORT) { 6719 andl(value, 0xffff); 6720 } 6721 if (t == T_BYTE || t == T_SHORT) { 6722 movl(rtmp, value); 6723 shll(rtmp, 16); 6724 orl(value, rtmp); 6725 } 6726 6727 cmpl(count, 2<<shift); // Short arrays (< 8 bytes) fill by element 6728 jcc(Assembler::below, L_fill_4_bytes); // use unsigned cmp 6729 if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) { 6730 Label L_skip_align2; 6731 // align source address at 4 bytes address boundary 6732 if (t == T_BYTE) { 6733 Label L_skip_align1; 6734 // One byte misalignment happens only for byte arrays 6735 testptr(to, 1); 6736 jccb(Assembler::zero, L_skip_align1); 6737 movb(Address(to, 0), value); 6738 increment(to); 6739 decrement(count); 6740 BIND(L_skip_align1); 6741 } 6742 // Two bytes misalignment happens only for byte and short (char) arrays 6743 testptr(to, 2); 6744 jccb(Assembler::zero, L_skip_align2); 6745 movw(Address(to, 0), value); 6746 addptr(to, 2); 6747 subl(count, 1<<(shift-1)); 6748 BIND(L_skip_align2); 6749 } 6750 if (UseSSE < 2) { 6751 Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes; 6752 // Fill 32-byte chunks 6753 subl(count, 8 << shift); 6754 jcc(Assembler::less, L_check_fill_8_bytes); 6755 align(16); 6756 6757 BIND(L_fill_32_bytes_loop); 6758 6759 for (int i = 0; i < 32; i += 4) { 6760 movl(Address(to, i), value); 6761 } 6762 6763 addptr(to, 32); 6764 subl(count, 8 << shift); 6765 jcc(Assembler::greaterEqual, L_fill_32_bytes_loop); 6766 BIND(L_check_fill_8_bytes); 6767 addl(count, 8 << shift); 6768 jccb(Assembler::zero, L_exit); 6769 jmpb(L_fill_8_bytes); 6770 6771 // 6772 // length is too short, just fill qwords 6773 // 6774 BIND(L_fill_8_bytes_loop); 6775 movl(Address(to, 0), value); 6776 movl(Address(to, 4), value); 6777 addptr(to, 8); 6778 BIND(L_fill_8_bytes); 6779 subl(count, 1 << (shift + 1)); 6780 jcc(Assembler::greaterEqual, L_fill_8_bytes_loop); 6781 // fall through to fill 4 bytes 6782 } else { 6783 Label L_fill_32_bytes; 6784 if (!UseUnalignedLoadStores) { 6785 // align to 8 bytes, we know we are 4 byte aligned to start 6786 testptr(to, 4); 6787 jccb(Assembler::zero, L_fill_32_bytes); 6788 movl(Address(to, 0), value); 6789 addptr(to, 4); 6790 subl(count, 1<<shift); 6791 } 6792 BIND(L_fill_32_bytes); 6793 { 6794 assert( UseSSE >= 2, "supported cpu only" ); 6795 Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes; 6796 movdl(xtmp, value); 6797 if (UseAVX >= 2 && UseUnalignedLoadStores) { 6798 Label L_check_fill_32_bytes; 6799 if (UseAVX > 2) { 6800 // Fill 64-byte chunks 6801 Label L_fill_64_bytes_loop_avx3, L_check_fill_64_bytes_avx2; 6802 6803 // If number of bytes to fill < VM_Version::avx3_threshold(), perform fill using AVX2 6804 cmpl(count, VM_Version::avx3_threshold()); 6805 jccb(Assembler::below, L_check_fill_64_bytes_avx2); 6806 6807 vpbroadcastd(xtmp, xtmp, Assembler::AVX_512bit); 6808 6809 subl(count, 16 << shift); 6810 jccb(Assembler::less, L_check_fill_32_bytes); 6811 align(16); 6812 6813 BIND(L_fill_64_bytes_loop_avx3); 6814 evmovdqul(Address(to, 0), xtmp, Assembler::AVX_512bit); 6815 addptr(to, 64); 6816 subl(count, 16 << shift); 6817 jcc(Assembler::greaterEqual, L_fill_64_bytes_loop_avx3); 6818 jmpb(L_check_fill_32_bytes); 6819 6820 BIND(L_check_fill_64_bytes_avx2); 6821 } 6822 // Fill 64-byte chunks 6823 Label L_fill_64_bytes_loop; 6824 vpbroadcastd(xtmp, xtmp, Assembler::AVX_256bit); 6825 6826 subl(count, 16 << shift); 6827 jcc(Assembler::less, L_check_fill_32_bytes); 6828 align(16); 6829 6830 BIND(L_fill_64_bytes_loop); 6831 vmovdqu(Address(to, 0), xtmp); 6832 vmovdqu(Address(to, 32), xtmp); 6833 addptr(to, 64); 6834 subl(count, 16 << shift); 6835 jcc(Assembler::greaterEqual, L_fill_64_bytes_loop); 6836 6837 BIND(L_check_fill_32_bytes); 6838 addl(count, 8 << shift); 6839 jccb(Assembler::less, L_check_fill_8_bytes); 6840 vmovdqu(Address(to, 0), xtmp); 6841 addptr(to, 32); 6842 subl(count, 8 << shift); 6843 6844 BIND(L_check_fill_8_bytes); 6845 // clean upper bits of YMM registers 6846 movdl(xtmp, value); 6847 pshufd(xtmp, xtmp, 0); 6848 } else { 6849 // Fill 32-byte chunks 6850 pshufd(xtmp, xtmp, 0); 6851 6852 subl(count, 8 << shift); 6853 jcc(Assembler::less, L_check_fill_8_bytes); 6854 align(16); 6855 6856 BIND(L_fill_32_bytes_loop); 6857 6858 if (UseUnalignedLoadStores) { 6859 movdqu(Address(to, 0), xtmp); 6860 movdqu(Address(to, 16), xtmp); 6861 } else { 6862 movq(Address(to, 0), xtmp); 6863 movq(Address(to, 8), xtmp); 6864 movq(Address(to, 16), xtmp); 6865 movq(Address(to, 24), xtmp); 6866 } 6867 6868 addptr(to, 32); 6869 subl(count, 8 << shift); 6870 jcc(Assembler::greaterEqual, L_fill_32_bytes_loop); 6871 6872 BIND(L_check_fill_8_bytes); 6873 } 6874 addl(count, 8 << shift); 6875 jccb(Assembler::zero, L_exit); 6876 jmpb(L_fill_8_bytes); 6877 6878 // 6879 // length is too short, just fill qwords 6880 // 6881 BIND(L_fill_8_bytes_loop); 6882 movq(Address(to, 0), xtmp); 6883 addptr(to, 8); 6884 BIND(L_fill_8_bytes); 6885 subl(count, 1 << (shift + 1)); 6886 jcc(Assembler::greaterEqual, L_fill_8_bytes_loop); 6887 } 6888 } 6889 // fill trailing 4 bytes 6890 BIND(L_fill_4_bytes); 6891 testl(count, 1<<shift); 6892 jccb(Assembler::zero, L_fill_2_bytes); 6893 movl(Address(to, 0), value); 6894 if (t == T_BYTE || t == T_SHORT) { 6895 Label L_fill_byte; 6896 addptr(to, 4); 6897 BIND(L_fill_2_bytes); 6898 // fill trailing 2 bytes 6899 testl(count, 1<<(shift-1)); 6900 jccb(Assembler::zero, L_fill_byte); 6901 movw(Address(to, 0), value); 6902 if (t == T_BYTE) { 6903 addptr(to, 2); 6904 BIND(L_fill_byte); 6905 // fill trailing byte 6906 testl(count, 1); 6907 jccb(Assembler::zero, L_exit); 6908 movb(Address(to, 0), value); 6909 } else { 6910 BIND(L_fill_byte); 6911 } 6912 } else { 6913 BIND(L_fill_2_bytes); 6914 } 6915 BIND(L_exit); 6916 } 6917 6918 void MacroAssembler::evpbroadcast(BasicType type, XMMRegister dst, Register src, int vector_len) { 6919 switch(type) { 6920 case T_BYTE: 6921 case T_BOOLEAN: 6922 evpbroadcastb(dst, src, vector_len); 6923 break; 6924 case T_SHORT: 6925 case T_CHAR: 6926 evpbroadcastw(dst, src, vector_len); 6927 break; 6928 case T_INT: 6929 case T_FLOAT: 6930 evpbroadcastd(dst, src, vector_len); 6931 break; 6932 case T_LONG: 6933 case T_DOUBLE: 6934 evpbroadcastq(dst, src, vector_len); 6935 break; 6936 default: 6937 fatal("Unhandled type : %s", type2name(type)); 6938 break; 6939 } 6940 } 6941 6942 // encode char[] to byte[] in ISO_8859_1 or ASCII 6943 //@IntrinsicCandidate 6944 //private static int implEncodeISOArray(byte[] sa, int sp, 6945 //byte[] da, int dp, int len) { 6946 // int i = 0; 6947 // for (; i < len; i++) { 6948 // char c = StringUTF16.getChar(sa, sp++); 6949 // if (c > '\u00FF') 6950 // break; 6951 // da[dp++] = (byte)c; 6952 // } 6953 // return i; 6954 //} 6955 // 6956 //@IntrinsicCandidate 6957 //private static int implEncodeAsciiArray(char[] sa, int sp, 6958 // byte[] da, int dp, int len) { 6959 // int i = 0; 6960 // for (; i < len; i++) { 6961 // char c = sa[sp++]; 6962 // if (c >= '\u0080') 6963 // break; 6964 // da[dp++] = (byte)c; 6965 // } 6966 // return i; 6967 //} 6968 void MacroAssembler::encode_iso_array(Register src, Register dst, Register len, 6969 XMMRegister tmp1Reg, XMMRegister tmp2Reg, 6970 XMMRegister tmp3Reg, XMMRegister tmp4Reg, 6971 Register tmp5, Register result, bool ascii) { 6972 6973 // rsi: src 6974 // rdi: dst 6975 // rdx: len 6976 // rcx: tmp5 6977 // rax: result 6978 ShortBranchVerifier sbv(this); 6979 assert_different_registers(src, dst, len, tmp5, result); 6980 Label L_done, L_copy_1_char, L_copy_1_char_exit; 6981 6982 int mask = ascii ? 0xff80ff80 : 0xff00ff00; 6983 int short_mask = ascii ? 0xff80 : 0xff00; 6984 6985 // set result 6986 xorl(result, result); 6987 // check for zero length 6988 testl(len, len); 6989 jcc(Assembler::zero, L_done); 6990 6991 movl(result, len); 6992 6993 // Setup pointers 6994 lea(src, Address(src, len, Address::times_2)); // char[] 6995 lea(dst, Address(dst, len, Address::times_1)); // byte[] 6996 negptr(len); 6997 6998 if (UseSSE42Intrinsics || UseAVX >= 2) { 6999 Label L_copy_8_chars, L_copy_8_chars_exit; 7000 Label L_chars_16_check, L_copy_16_chars, L_copy_16_chars_exit; 7001 7002 if (UseAVX >= 2) { 7003 Label L_chars_32_check, L_copy_32_chars, L_copy_32_chars_exit; 7004 movl(tmp5, mask); // create mask to test for Unicode or non-ASCII chars in vector 7005 movdl(tmp1Reg, tmp5); 7006 vpbroadcastd(tmp1Reg, tmp1Reg, Assembler::AVX_256bit); 7007 jmp(L_chars_32_check); 7008 7009 bind(L_copy_32_chars); 7010 vmovdqu(tmp3Reg, Address(src, len, Address::times_2, -64)); 7011 vmovdqu(tmp4Reg, Address(src, len, Address::times_2, -32)); 7012 vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1); 7013 vptest(tmp2Reg, tmp1Reg); // check for Unicode or non-ASCII chars in vector 7014 jccb(Assembler::notZero, L_copy_32_chars_exit); 7015 vpackuswb(tmp3Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1); 7016 vpermq(tmp4Reg, tmp3Reg, 0xD8, /* vector_len */ 1); 7017 vmovdqu(Address(dst, len, Address::times_1, -32), tmp4Reg); 7018 7019 bind(L_chars_32_check); 7020 addptr(len, 32); 7021 jcc(Assembler::lessEqual, L_copy_32_chars); 7022 7023 bind(L_copy_32_chars_exit); 7024 subptr(len, 16); 7025 jccb(Assembler::greater, L_copy_16_chars_exit); 7026 7027 } else if (UseSSE42Intrinsics) { 7028 movl(tmp5, mask); // create mask to test for Unicode or non-ASCII chars in vector 7029 movdl(tmp1Reg, tmp5); 7030 pshufd(tmp1Reg, tmp1Reg, 0); 7031 jmpb(L_chars_16_check); 7032 } 7033 7034 bind(L_copy_16_chars); 7035 if (UseAVX >= 2) { 7036 vmovdqu(tmp2Reg, Address(src, len, Address::times_2, -32)); 7037 vptest(tmp2Reg, tmp1Reg); 7038 jcc(Assembler::notZero, L_copy_16_chars_exit); 7039 vpackuswb(tmp2Reg, tmp2Reg, tmp1Reg, /* vector_len */ 1); 7040 vpermq(tmp3Reg, tmp2Reg, 0xD8, /* vector_len */ 1); 7041 } else { 7042 if (UseAVX > 0) { 7043 movdqu(tmp3Reg, Address(src, len, Address::times_2, -32)); 7044 movdqu(tmp4Reg, Address(src, len, Address::times_2, -16)); 7045 vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 0); 7046 } else { 7047 movdqu(tmp3Reg, Address(src, len, Address::times_2, -32)); 7048 por(tmp2Reg, tmp3Reg); 7049 movdqu(tmp4Reg, Address(src, len, Address::times_2, -16)); 7050 por(tmp2Reg, tmp4Reg); 7051 } 7052 ptest(tmp2Reg, tmp1Reg); // check for Unicode or non-ASCII chars in vector 7053 jccb(Assembler::notZero, L_copy_16_chars_exit); 7054 packuswb(tmp3Reg, tmp4Reg); 7055 } 7056 movdqu(Address(dst, len, Address::times_1, -16), tmp3Reg); 7057 7058 bind(L_chars_16_check); 7059 addptr(len, 16); 7060 jcc(Assembler::lessEqual, L_copy_16_chars); 7061 7062 bind(L_copy_16_chars_exit); 7063 if (UseAVX >= 2) { 7064 // clean upper bits of YMM registers 7065 vpxor(tmp2Reg, tmp2Reg); 7066 vpxor(tmp3Reg, tmp3Reg); 7067 vpxor(tmp4Reg, tmp4Reg); 7068 movdl(tmp1Reg, tmp5); 7069 pshufd(tmp1Reg, tmp1Reg, 0); 7070 } 7071 subptr(len, 8); 7072 jccb(Assembler::greater, L_copy_8_chars_exit); 7073 7074 bind(L_copy_8_chars); 7075 movdqu(tmp3Reg, Address(src, len, Address::times_2, -16)); 7076 ptest(tmp3Reg, tmp1Reg); 7077 jccb(Assembler::notZero, L_copy_8_chars_exit); 7078 packuswb(tmp3Reg, tmp1Reg); 7079 movq(Address(dst, len, Address::times_1, -8), tmp3Reg); 7080 addptr(len, 8); 7081 jccb(Assembler::lessEqual, L_copy_8_chars); 7082 7083 bind(L_copy_8_chars_exit); 7084 subptr(len, 8); 7085 jccb(Assembler::zero, L_done); 7086 } 7087 7088 bind(L_copy_1_char); 7089 load_unsigned_short(tmp5, Address(src, len, Address::times_2, 0)); 7090 testl(tmp5, short_mask); // check if Unicode or non-ASCII char 7091 jccb(Assembler::notZero, L_copy_1_char_exit); 7092 movb(Address(dst, len, Address::times_1, 0), tmp5); 7093 addptr(len, 1); 7094 jccb(Assembler::less, L_copy_1_char); 7095 7096 bind(L_copy_1_char_exit); 7097 addptr(result, len); // len is negative count of not processed elements 7098 7099 bind(L_done); 7100 } 7101 7102 #ifdef _LP64 7103 /** 7104 * Helper for multiply_to_len(). 7105 */ 7106 void MacroAssembler::add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2) { 7107 addq(dest_lo, src1); 7108 adcq(dest_hi, 0); 7109 addq(dest_lo, src2); 7110 adcq(dest_hi, 0); 7111 } 7112 7113 /** 7114 * Multiply 64 bit by 64 bit first loop. 7115 */ 7116 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 7117 Register y, Register y_idx, Register z, 7118 Register carry, Register product, 7119 Register idx, Register kdx) { 7120 // 7121 // jlong carry, x[], y[], z[]; 7122 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 7123 // huge_128 product = y[idx] * x[xstart] + carry; 7124 // z[kdx] = (jlong)product; 7125 // carry = (jlong)(product >>> 64); 7126 // } 7127 // z[xstart] = carry; 7128 // 7129 7130 Label L_first_loop, L_first_loop_exit; 7131 Label L_one_x, L_one_y, L_multiply; 7132 7133 decrementl(xstart); 7134 jcc(Assembler::negative, L_one_x); 7135 7136 movq(x_xstart, Address(x, xstart, Address::times_4, 0)); 7137 rorq(x_xstart, 32); // convert big-endian to little-endian 7138 7139 bind(L_first_loop); 7140 decrementl(idx); 7141 jcc(Assembler::negative, L_first_loop_exit); 7142 decrementl(idx); 7143 jcc(Assembler::negative, L_one_y); 7144 movq(y_idx, Address(y, idx, Address::times_4, 0)); 7145 rorq(y_idx, 32); // convert big-endian to little-endian 7146 bind(L_multiply); 7147 movq(product, x_xstart); 7148 mulq(y_idx); // product(rax) * y_idx -> rdx:rax 7149 addq(product, carry); 7150 adcq(rdx, 0); 7151 subl(kdx, 2); 7152 movl(Address(z, kdx, Address::times_4, 4), product); 7153 shrq(product, 32); 7154 movl(Address(z, kdx, Address::times_4, 0), product); 7155 movq(carry, rdx); 7156 jmp(L_first_loop); 7157 7158 bind(L_one_y); 7159 movl(y_idx, Address(y, 0)); 7160 jmp(L_multiply); 7161 7162 bind(L_one_x); 7163 movl(x_xstart, Address(x, 0)); 7164 jmp(L_first_loop); 7165 7166 bind(L_first_loop_exit); 7167 } 7168 7169 /** 7170 * Multiply 64 bit by 64 bit and add 128 bit. 7171 */ 7172 void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y, Register z, 7173 Register yz_idx, Register idx, 7174 Register carry, Register product, int offset) { 7175 // huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry; 7176 // z[kdx] = (jlong)product; 7177 7178 movq(yz_idx, Address(y, idx, Address::times_4, offset)); 7179 rorq(yz_idx, 32); // convert big-endian to little-endian 7180 movq(product, x_xstart); 7181 mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax) 7182 movq(yz_idx, Address(z, idx, Address::times_4, offset)); 7183 rorq(yz_idx, 32); // convert big-endian to little-endian 7184 7185 add2_with_carry(rdx, product, carry, yz_idx); 7186 7187 movl(Address(z, idx, Address::times_4, offset+4), product); 7188 shrq(product, 32); 7189 movl(Address(z, idx, Address::times_4, offset), product); 7190 7191 } 7192 7193 /** 7194 * Multiply 128 bit by 128 bit. Unrolled inner loop. 7195 */ 7196 void MacroAssembler::multiply_128_x_128_loop(Register x_xstart, Register y, Register z, 7197 Register yz_idx, Register idx, Register jdx, 7198 Register carry, Register product, 7199 Register carry2) { 7200 // jlong carry, x[], y[], z[]; 7201 // int kdx = ystart+1; 7202 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 7203 // huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry; 7204 // z[kdx+idx+1] = (jlong)product; 7205 // jlong carry2 = (jlong)(product >>> 64); 7206 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry2; 7207 // z[kdx+idx] = (jlong)product; 7208 // carry = (jlong)(product >>> 64); 7209 // } 7210 // idx += 2; 7211 // if (idx > 0) { 7212 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry; 7213 // z[kdx+idx] = (jlong)product; 7214 // carry = (jlong)(product >>> 64); 7215 // } 7216 // 7217 7218 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 7219 7220 movl(jdx, idx); 7221 andl(jdx, 0xFFFFFFFC); 7222 shrl(jdx, 2); 7223 7224 bind(L_third_loop); 7225 subl(jdx, 1); 7226 jcc(Assembler::negative, L_third_loop_exit); 7227 subl(idx, 4); 7228 7229 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 8); 7230 movq(carry2, rdx); 7231 7232 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product, 0); 7233 movq(carry, rdx); 7234 jmp(L_third_loop); 7235 7236 bind (L_third_loop_exit); 7237 7238 andl (idx, 0x3); 7239 jcc(Assembler::zero, L_post_third_loop_done); 7240 7241 Label L_check_1; 7242 subl(idx, 2); 7243 jcc(Assembler::negative, L_check_1); 7244 7245 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 0); 7246 movq(carry, rdx); 7247 7248 bind (L_check_1); 7249 addl (idx, 0x2); 7250 andl (idx, 0x1); 7251 subl(idx, 1); 7252 jcc(Assembler::negative, L_post_third_loop_done); 7253 7254 movl(yz_idx, Address(y, idx, Address::times_4, 0)); 7255 movq(product, x_xstart); 7256 mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax) 7257 movl(yz_idx, Address(z, idx, Address::times_4, 0)); 7258 7259 add2_with_carry(rdx, product, yz_idx, carry); 7260 7261 movl(Address(z, idx, Address::times_4, 0), product); 7262 shrq(product, 32); 7263 7264 shlq(rdx, 32); 7265 orq(product, rdx); 7266 movq(carry, product); 7267 7268 bind(L_post_third_loop_done); 7269 } 7270 7271 /** 7272 * Multiply 128 bit by 128 bit using BMI2. Unrolled inner loop. 7273 * 7274 */ 7275 void MacroAssembler::multiply_128_x_128_bmi2_loop(Register y, Register z, 7276 Register carry, Register carry2, 7277 Register idx, Register jdx, 7278 Register yz_idx1, Register yz_idx2, 7279 Register tmp, Register tmp3, Register tmp4) { 7280 assert(UseBMI2Instructions, "should be used only when BMI2 is available"); 7281 7282 // jlong carry, x[], y[], z[]; 7283 // int kdx = ystart+1; 7284 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 7285 // huge_128 tmp3 = (y[idx+1] * rdx) + z[kdx+idx+1] + carry; 7286 // jlong carry2 = (jlong)(tmp3 >>> 64); 7287 // huge_128 tmp4 = (y[idx] * rdx) + z[kdx+idx] + carry2; 7288 // carry = (jlong)(tmp4 >>> 64); 7289 // z[kdx+idx+1] = (jlong)tmp3; 7290 // z[kdx+idx] = (jlong)tmp4; 7291 // } 7292 // idx += 2; 7293 // if (idx > 0) { 7294 // yz_idx1 = (y[idx] * rdx) + z[kdx+idx] + carry; 7295 // z[kdx+idx] = (jlong)yz_idx1; 7296 // carry = (jlong)(yz_idx1 >>> 64); 7297 // } 7298 // 7299 7300 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 7301 7302 movl(jdx, idx); 7303 andl(jdx, 0xFFFFFFFC); 7304 shrl(jdx, 2); 7305 7306 bind(L_third_loop); 7307 subl(jdx, 1); 7308 jcc(Assembler::negative, L_third_loop_exit); 7309 subl(idx, 4); 7310 7311 movq(yz_idx1, Address(y, idx, Address::times_4, 8)); 7312 rorxq(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian 7313 movq(yz_idx2, Address(y, idx, Address::times_4, 0)); 7314 rorxq(yz_idx2, yz_idx2, 32); 7315 7316 mulxq(tmp4, tmp3, yz_idx1); // yz_idx1 * rdx -> tmp4:tmp3 7317 mulxq(carry2, tmp, yz_idx2); // yz_idx2 * rdx -> carry2:tmp 7318 7319 movq(yz_idx1, Address(z, idx, Address::times_4, 8)); 7320 rorxq(yz_idx1, yz_idx1, 32); 7321 movq(yz_idx2, Address(z, idx, Address::times_4, 0)); 7322 rorxq(yz_idx2, yz_idx2, 32); 7323 7324 if (VM_Version::supports_adx()) { 7325 adcxq(tmp3, carry); 7326 adoxq(tmp3, yz_idx1); 7327 7328 adcxq(tmp4, tmp); 7329 adoxq(tmp4, yz_idx2); 7330 7331 movl(carry, 0); // does not affect flags 7332 adcxq(carry2, carry); 7333 adoxq(carry2, carry); 7334 } else { 7335 add2_with_carry(tmp4, tmp3, carry, yz_idx1); 7336 add2_with_carry(carry2, tmp4, tmp, yz_idx2); 7337 } 7338 movq(carry, carry2); 7339 7340 movl(Address(z, idx, Address::times_4, 12), tmp3); 7341 shrq(tmp3, 32); 7342 movl(Address(z, idx, Address::times_4, 8), tmp3); 7343 7344 movl(Address(z, idx, Address::times_4, 4), tmp4); 7345 shrq(tmp4, 32); 7346 movl(Address(z, idx, Address::times_4, 0), tmp4); 7347 7348 jmp(L_third_loop); 7349 7350 bind (L_third_loop_exit); 7351 7352 andl (idx, 0x3); 7353 jcc(Assembler::zero, L_post_third_loop_done); 7354 7355 Label L_check_1; 7356 subl(idx, 2); 7357 jcc(Assembler::negative, L_check_1); 7358 7359 movq(yz_idx1, Address(y, idx, Address::times_4, 0)); 7360 rorxq(yz_idx1, yz_idx1, 32); 7361 mulxq(tmp4, tmp3, yz_idx1); // yz_idx1 * rdx -> tmp4:tmp3 7362 movq(yz_idx2, Address(z, idx, Address::times_4, 0)); 7363 rorxq(yz_idx2, yz_idx2, 32); 7364 7365 add2_with_carry(tmp4, tmp3, carry, yz_idx2); 7366 7367 movl(Address(z, idx, Address::times_4, 4), tmp3); 7368 shrq(tmp3, 32); 7369 movl(Address(z, idx, Address::times_4, 0), tmp3); 7370 movq(carry, tmp4); 7371 7372 bind (L_check_1); 7373 addl (idx, 0x2); 7374 andl (idx, 0x1); 7375 subl(idx, 1); 7376 jcc(Assembler::negative, L_post_third_loop_done); 7377 movl(tmp4, Address(y, idx, Address::times_4, 0)); 7378 mulxq(carry2, tmp3, tmp4); // tmp4 * rdx -> carry2:tmp3 7379 movl(tmp4, Address(z, idx, Address::times_4, 0)); 7380 7381 add2_with_carry(carry2, tmp3, tmp4, carry); 7382 7383 movl(Address(z, idx, Address::times_4, 0), tmp3); 7384 shrq(tmp3, 32); 7385 7386 shlq(carry2, 32); 7387 orq(tmp3, carry2); 7388 movq(carry, tmp3); 7389 7390 bind(L_post_third_loop_done); 7391 } 7392 7393 /** 7394 * Code for BigInteger::multiplyToLen() intrinsic. 7395 * 7396 * rdi: x 7397 * rax: xlen 7398 * rsi: y 7399 * rcx: ylen 7400 * r8: z 7401 * r11: zlen 7402 * r12: tmp1 7403 * r13: tmp2 7404 * r14: tmp3 7405 * r15: tmp4 7406 * rbx: tmp5 7407 * 7408 */ 7409 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register zlen, 7410 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5) { 7411 ShortBranchVerifier sbv(this); 7412 assert_different_registers(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, rdx); 7413 7414 push(tmp1); 7415 push(tmp2); 7416 push(tmp3); 7417 push(tmp4); 7418 push(tmp5); 7419 7420 push(xlen); 7421 push(zlen); 7422 7423 const Register idx = tmp1; 7424 const Register kdx = tmp2; 7425 const Register xstart = tmp3; 7426 7427 const Register y_idx = tmp4; 7428 const Register carry = tmp5; 7429 const Register product = xlen; 7430 const Register x_xstart = zlen; // reuse register 7431 7432 // First Loop. 7433 // 7434 // final static long LONG_MASK = 0xffffffffL; 7435 // int xstart = xlen - 1; 7436 // int ystart = ylen - 1; 7437 // long carry = 0; 7438 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 7439 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry; 7440 // z[kdx] = (int)product; 7441 // carry = product >>> 32; 7442 // } 7443 // z[xstart] = (int)carry; 7444 // 7445 7446 movl(idx, ylen); // idx = ylen; 7447 movl(kdx, zlen); // kdx = xlen+ylen; 7448 xorq(carry, carry); // carry = 0; 7449 7450 Label L_done; 7451 7452 movl(xstart, xlen); 7453 decrementl(xstart); 7454 jcc(Assembler::negative, L_done); 7455 7456 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx); 7457 7458 Label L_second_loop; 7459 testl(kdx, kdx); 7460 jcc(Assembler::zero, L_second_loop); 7461 7462 Label L_carry; 7463 subl(kdx, 1); 7464 jcc(Assembler::zero, L_carry); 7465 7466 movl(Address(z, kdx, Address::times_4, 0), carry); 7467 shrq(carry, 32); 7468 subl(kdx, 1); 7469 7470 bind(L_carry); 7471 movl(Address(z, kdx, Address::times_4, 0), carry); 7472 7473 // Second and third (nested) loops. 7474 // 7475 // for (int i = xstart-1; i >= 0; i--) { // Second loop 7476 // carry = 0; 7477 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop 7478 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) + 7479 // (z[k] & LONG_MASK) + carry; 7480 // z[k] = (int)product; 7481 // carry = product >>> 32; 7482 // } 7483 // z[i] = (int)carry; 7484 // } 7485 // 7486 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx 7487 7488 const Register jdx = tmp1; 7489 7490 bind(L_second_loop); 7491 xorl(carry, carry); // carry = 0; 7492 movl(jdx, ylen); // j = ystart+1 7493 7494 subl(xstart, 1); // i = xstart-1; 7495 jcc(Assembler::negative, L_done); 7496 7497 push (z); 7498 7499 Label L_last_x; 7500 lea(z, Address(z, xstart, Address::times_4, 4)); // z = z + k - j 7501 subl(xstart, 1); // i = xstart-1; 7502 jcc(Assembler::negative, L_last_x); 7503 7504 if (UseBMI2Instructions) { 7505 movq(rdx, Address(x, xstart, Address::times_4, 0)); 7506 rorxq(rdx, rdx, 32); // convert big-endian to little-endian 7507 } else { 7508 movq(x_xstart, Address(x, xstart, Address::times_4, 0)); 7509 rorq(x_xstart, 32); // convert big-endian to little-endian 7510 } 7511 7512 Label L_third_loop_prologue; 7513 bind(L_third_loop_prologue); 7514 7515 push (x); 7516 push (xstart); 7517 push (ylen); 7518 7519 7520 if (UseBMI2Instructions) { 7521 multiply_128_x_128_bmi2_loop(y, z, carry, x, jdx, ylen, product, tmp2, x_xstart, tmp3, tmp4); 7522 } else { // !UseBMI2Instructions 7523 multiply_128_x_128_loop(x_xstart, y, z, y_idx, jdx, ylen, carry, product, x); 7524 } 7525 7526 pop(ylen); 7527 pop(xlen); 7528 pop(x); 7529 pop(z); 7530 7531 movl(tmp3, xlen); 7532 addl(tmp3, 1); 7533 movl(Address(z, tmp3, Address::times_4, 0), carry); 7534 subl(tmp3, 1); 7535 jccb(Assembler::negative, L_done); 7536 7537 shrq(carry, 32); 7538 movl(Address(z, tmp3, Address::times_4, 0), carry); 7539 jmp(L_second_loop); 7540 7541 // Next infrequent code is moved outside loops. 7542 bind(L_last_x); 7543 if (UseBMI2Instructions) { 7544 movl(rdx, Address(x, 0)); 7545 } else { 7546 movl(x_xstart, Address(x, 0)); 7547 } 7548 jmp(L_third_loop_prologue); 7549 7550 bind(L_done); 7551 7552 pop(zlen); 7553 pop(xlen); 7554 7555 pop(tmp5); 7556 pop(tmp4); 7557 pop(tmp3); 7558 pop(tmp2); 7559 pop(tmp1); 7560 } 7561 7562 void MacroAssembler::vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale, 7563 Register result, Register tmp1, Register tmp2, XMMRegister rymm0, XMMRegister rymm1, XMMRegister rymm2){ 7564 assert(UseSSE42Intrinsics, "SSE4.2 must be enabled."); 7565 Label VECTOR16_LOOP, VECTOR8_LOOP, VECTOR4_LOOP; 7566 Label VECTOR8_TAIL, VECTOR4_TAIL; 7567 Label VECTOR32_NOT_EQUAL, VECTOR16_NOT_EQUAL, VECTOR8_NOT_EQUAL, VECTOR4_NOT_EQUAL; 7568 Label SAME_TILL_END, DONE; 7569 Label BYTES_LOOP, BYTES_TAIL, BYTES_NOT_EQUAL; 7570 7571 //scale is in rcx in both Win64 and Unix 7572 ShortBranchVerifier sbv(this); 7573 7574 shlq(length); 7575 xorq(result, result); 7576 7577 if ((AVX3Threshold == 0) && (UseAVX > 2) && 7578 VM_Version::supports_avx512vlbw()) { 7579 Label VECTOR64_LOOP, VECTOR64_NOT_EQUAL, VECTOR32_TAIL; 7580 7581 cmpq(length, 64); 7582 jcc(Assembler::less, VECTOR32_TAIL); 7583 7584 movq(tmp1, length); 7585 andq(tmp1, 0x3F); // tail count 7586 andq(length, ~(0x3F)); //vector count 7587 7588 bind(VECTOR64_LOOP); 7589 // AVX512 code to compare 64 byte vectors. 7590 evmovdqub(rymm0, Address(obja, result), Assembler::AVX_512bit); 7591 evpcmpeqb(k7, rymm0, Address(objb, result), Assembler::AVX_512bit); 7592 kortestql(k7, k7); 7593 jcc(Assembler::aboveEqual, VECTOR64_NOT_EQUAL); // mismatch 7594 addq(result, 64); 7595 subq(length, 64); 7596 jccb(Assembler::notZero, VECTOR64_LOOP); 7597 7598 //bind(VECTOR64_TAIL); 7599 testq(tmp1, tmp1); 7600 jcc(Assembler::zero, SAME_TILL_END); 7601 7602 //bind(VECTOR64_TAIL); 7603 // AVX512 code to compare up to 63 byte vectors. 7604 mov64(tmp2, 0xFFFFFFFFFFFFFFFF); 7605 shlxq(tmp2, tmp2, tmp1); 7606 notq(tmp2); 7607 kmovql(k3, tmp2); 7608 7609 evmovdqub(rymm0, k3, Address(obja, result), false, Assembler::AVX_512bit); 7610 evpcmpeqb(k7, k3, rymm0, Address(objb, result), Assembler::AVX_512bit); 7611 7612 ktestql(k7, k3); 7613 jcc(Assembler::below, SAME_TILL_END); // not mismatch 7614 7615 bind(VECTOR64_NOT_EQUAL); 7616 kmovql(tmp1, k7); 7617 notq(tmp1); 7618 tzcntq(tmp1, tmp1); 7619 addq(result, tmp1); 7620 shrq(result); 7621 jmp(DONE); 7622 bind(VECTOR32_TAIL); 7623 } 7624 7625 cmpq(length, 8); 7626 jcc(Assembler::equal, VECTOR8_LOOP); 7627 jcc(Assembler::less, VECTOR4_TAIL); 7628 7629 if (UseAVX >= 2) { 7630 Label VECTOR16_TAIL, VECTOR32_LOOP; 7631 7632 cmpq(length, 16); 7633 jcc(Assembler::equal, VECTOR16_LOOP); 7634 jcc(Assembler::less, VECTOR8_LOOP); 7635 7636 cmpq(length, 32); 7637 jccb(Assembler::less, VECTOR16_TAIL); 7638 7639 subq(length, 32); 7640 bind(VECTOR32_LOOP); 7641 vmovdqu(rymm0, Address(obja, result)); 7642 vmovdqu(rymm1, Address(objb, result)); 7643 vpxor(rymm2, rymm0, rymm1, Assembler::AVX_256bit); 7644 vptest(rymm2, rymm2); 7645 jcc(Assembler::notZero, VECTOR32_NOT_EQUAL);//mismatch found 7646 addq(result, 32); 7647 subq(length, 32); 7648 jcc(Assembler::greaterEqual, VECTOR32_LOOP); 7649 addq(length, 32); 7650 jcc(Assembler::equal, SAME_TILL_END); 7651 //falling through if less than 32 bytes left //close the branch here. 7652 7653 bind(VECTOR16_TAIL); 7654 cmpq(length, 16); 7655 jccb(Assembler::less, VECTOR8_TAIL); 7656 bind(VECTOR16_LOOP); 7657 movdqu(rymm0, Address(obja, result)); 7658 movdqu(rymm1, Address(objb, result)); 7659 vpxor(rymm2, rymm0, rymm1, Assembler::AVX_128bit); 7660 ptest(rymm2, rymm2); 7661 jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found 7662 addq(result, 16); 7663 subq(length, 16); 7664 jcc(Assembler::equal, SAME_TILL_END); 7665 //falling through if less than 16 bytes left 7666 } else {//regular intrinsics 7667 7668 cmpq(length, 16); 7669 jccb(Assembler::less, VECTOR8_TAIL); 7670 7671 subq(length, 16); 7672 bind(VECTOR16_LOOP); 7673 movdqu(rymm0, Address(obja, result)); 7674 movdqu(rymm1, Address(objb, result)); 7675 pxor(rymm0, rymm1); 7676 ptest(rymm0, rymm0); 7677 jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found 7678 addq(result, 16); 7679 subq(length, 16); 7680 jccb(Assembler::greaterEqual, VECTOR16_LOOP); 7681 addq(length, 16); 7682 jcc(Assembler::equal, SAME_TILL_END); 7683 //falling through if less than 16 bytes left 7684 } 7685 7686 bind(VECTOR8_TAIL); 7687 cmpq(length, 8); 7688 jccb(Assembler::less, VECTOR4_TAIL); 7689 bind(VECTOR8_LOOP); 7690 movq(tmp1, Address(obja, result)); 7691 movq(tmp2, Address(objb, result)); 7692 xorq(tmp1, tmp2); 7693 testq(tmp1, tmp1); 7694 jcc(Assembler::notZero, VECTOR8_NOT_EQUAL);//mismatch found 7695 addq(result, 8); 7696 subq(length, 8); 7697 jcc(Assembler::equal, SAME_TILL_END); 7698 //falling through if less than 8 bytes left 7699 7700 bind(VECTOR4_TAIL); 7701 cmpq(length, 4); 7702 jccb(Assembler::less, BYTES_TAIL); 7703 bind(VECTOR4_LOOP); 7704 movl(tmp1, Address(obja, result)); 7705 xorl(tmp1, Address(objb, result)); 7706 testl(tmp1, tmp1); 7707 jcc(Assembler::notZero, VECTOR4_NOT_EQUAL);//mismatch found 7708 addq(result, 4); 7709 subq(length, 4); 7710 jcc(Assembler::equal, SAME_TILL_END); 7711 //falling through if less than 4 bytes left 7712 7713 bind(BYTES_TAIL); 7714 bind(BYTES_LOOP); 7715 load_unsigned_byte(tmp1, Address(obja, result)); 7716 load_unsigned_byte(tmp2, Address(objb, result)); 7717 xorl(tmp1, tmp2); 7718 testl(tmp1, tmp1); 7719 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found 7720 decq(length); 7721 jcc(Assembler::zero, SAME_TILL_END); 7722 incq(result); 7723 load_unsigned_byte(tmp1, Address(obja, result)); 7724 load_unsigned_byte(tmp2, Address(objb, result)); 7725 xorl(tmp1, tmp2); 7726 testl(tmp1, tmp1); 7727 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found 7728 decq(length); 7729 jcc(Assembler::zero, SAME_TILL_END); 7730 incq(result); 7731 load_unsigned_byte(tmp1, Address(obja, result)); 7732 load_unsigned_byte(tmp2, Address(objb, result)); 7733 xorl(tmp1, tmp2); 7734 testl(tmp1, tmp1); 7735 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found 7736 jmp(SAME_TILL_END); 7737 7738 if (UseAVX >= 2) { 7739 bind(VECTOR32_NOT_EQUAL); 7740 vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_256bit); 7741 vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_256bit); 7742 vpxor(rymm0, rymm0, rymm2, Assembler::AVX_256bit); 7743 vpmovmskb(tmp1, rymm0); 7744 bsfq(tmp1, tmp1); 7745 addq(result, tmp1); 7746 shrq(result); 7747 jmp(DONE); 7748 } 7749 7750 bind(VECTOR16_NOT_EQUAL); 7751 if (UseAVX >= 2) { 7752 vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_128bit); 7753 vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_128bit); 7754 pxor(rymm0, rymm2); 7755 } else { 7756 pcmpeqb(rymm2, rymm2); 7757 pxor(rymm0, rymm1); 7758 pcmpeqb(rymm0, rymm1); 7759 pxor(rymm0, rymm2); 7760 } 7761 pmovmskb(tmp1, rymm0); 7762 bsfq(tmp1, tmp1); 7763 addq(result, tmp1); 7764 shrq(result); 7765 jmpb(DONE); 7766 7767 bind(VECTOR8_NOT_EQUAL); 7768 bind(VECTOR4_NOT_EQUAL); 7769 bsfq(tmp1, tmp1); 7770 shrq(tmp1, 3); 7771 addq(result, tmp1); 7772 bind(BYTES_NOT_EQUAL); 7773 shrq(result); 7774 jmpb(DONE); 7775 7776 bind(SAME_TILL_END); 7777 mov64(result, -1); 7778 7779 bind(DONE); 7780 } 7781 7782 //Helper functions for square_to_len() 7783 7784 /** 7785 * Store the squares of x[], right shifted one bit (divided by 2) into z[] 7786 * Preserves x and z and modifies rest of the registers. 7787 */ 7788 void MacroAssembler::square_rshift(Register x, Register xlen, Register z, Register tmp1, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 7789 // Perform square and right shift by 1 7790 // Handle odd xlen case first, then for even xlen do the following 7791 // jlong carry = 0; 7792 // for (int j=0, i=0; j < xlen; j+=2, i+=4) { 7793 // huge_128 product = x[j:j+1] * x[j:j+1]; 7794 // z[i:i+1] = (carry << 63) | (jlong)(product >>> 65); 7795 // z[i+2:i+3] = (jlong)(product >>> 1); 7796 // carry = (jlong)product; 7797 // } 7798 7799 xorq(tmp5, tmp5); // carry 7800 xorq(rdxReg, rdxReg); 7801 xorl(tmp1, tmp1); // index for x 7802 xorl(tmp4, tmp4); // index for z 7803 7804 Label L_first_loop, L_first_loop_exit; 7805 7806 testl(xlen, 1); 7807 jccb(Assembler::zero, L_first_loop); //jump if xlen is even 7808 7809 // Square and right shift by 1 the odd element using 32 bit multiply 7810 movl(raxReg, Address(x, tmp1, Address::times_4, 0)); 7811 imulq(raxReg, raxReg); 7812 shrq(raxReg, 1); 7813 adcq(tmp5, 0); 7814 movq(Address(z, tmp4, Address::times_4, 0), raxReg); 7815 incrementl(tmp1); 7816 addl(tmp4, 2); 7817 7818 // Square and right shift by 1 the rest using 64 bit multiply 7819 bind(L_first_loop); 7820 cmpptr(tmp1, xlen); 7821 jccb(Assembler::equal, L_first_loop_exit); 7822 7823 // Square 7824 movq(raxReg, Address(x, tmp1, Address::times_4, 0)); 7825 rorq(raxReg, 32); // convert big-endian to little-endian 7826 mulq(raxReg); // 64-bit multiply rax * rax -> rdx:rax 7827 7828 // Right shift by 1 and save carry 7829 shrq(tmp5, 1); // rdx:rax:tmp5 = (tmp5:rdx:rax) >>> 1 7830 rcrq(rdxReg, 1); 7831 rcrq(raxReg, 1); 7832 adcq(tmp5, 0); 7833 7834 // Store result in z 7835 movq(Address(z, tmp4, Address::times_4, 0), rdxReg); 7836 movq(Address(z, tmp4, Address::times_4, 8), raxReg); 7837 7838 // Update indices for x and z 7839 addl(tmp1, 2); 7840 addl(tmp4, 4); 7841 jmp(L_first_loop); 7842 7843 bind(L_first_loop_exit); 7844 } 7845 7846 7847 /** 7848 * Perform the following multiply add operation using BMI2 instructions 7849 * carry:sum = sum + op1*op2 + carry 7850 * op2 should be in rdx 7851 * op2 is preserved, all other registers are modified 7852 */ 7853 void MacroAssembler::multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, Register tmp2) { 7854 // assert op2 is rdx 7855 mulxq(tmp2, op1, op1); // op1 * op2 -> tmp2:op1 7856 addq(sum, carry); 7857 adcq(tmp2, 0); 7858 addq(sum, op1); 7859 adcq(tmp2, 0); 7860 movq(carry, tmp2); 7861 } 7862 7863 /** 7864 * Perform the following multiply add operation: 7865 * carry:sum = sum + op1*op2 + carry 7866 * Preserves op1, op2 and modifies rest of registers 7867 */ 7868 void MacroAssembler::multiply_add_64(Register sum, Register op1, Register op2, Register carry, Register rdxReg, Register raxReg) { 7869 // rdx:rax = op1 * op2 7870 movq(raxReg, op2); 7871 mulq(op1); 7872 7873 // rdx:rax = sum + carry + rdx:rax 7874 addq(sum, carry); 7875 adcq(rdxReg, 0); 7876 addq(sum, raxReg); 7877 adcq(rdxReg, 0); 7878 7879 // carry:sum = rdx:sum 7880 movq(carry, rdxReg); 7881 } 7882 7883 /** 7884 * Add 64 bit long carry into z[] with carry propagation. 7885 * Preserves z and carry register values and modifies rest of registers. 7886 * 7887 */ 7888 void MacroAssembler::add_one_64(Register z, Register zlen, Register carry, Register tmp1) { 7889 Label L_fourth_loop, L_fourth_loop_exit; 7890 7891 movl(tmp1, 1); 7892 subl(zlen, 2); 7893 addq(Address(z, zlen, Address::times_4, 0), carry); 7894 7895 bind(L_fourth_loop); 7896 jccb(Assembler::carryClear, L_fourth_loop_exit); 7897 subl(zlen, 2); 7898 jccb(Assembler::negative, L_fourth_loop_exit); 7899 addq(Address(z, zlen, Address::times_4, 0), tmp1); 7900 jmp(L_fourth_loop); 7901 bind(L_fourth_loop_exit); 7902 } 7903 7904 /** 7905 * Shift z[] left by 1 bit. 7906 * Preserves x, len, z and zlen registers and modifies rest of the registers. 7907 * 7908 */ 7909 void MacroAssembler::lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4) { 7910 7911 Label L_fifth_loop, L_fifth_loop_exit; 7912 7913 // Fifth loop 7914 // Perform primitiveLeftShift(z, zlen, 1) 7915 7916 const Register prev_carry = tmp1; 7917 const Register new_carry = tmp4; 7918 const Register value = tmp2; 7919 const Register zidx = tmp3; 7920 7921 // int zidx, carry; 7922 // long value; 7923 // carry = 0; 7924 // for (zidx = zlen-2; zidx >=0; zidx -= 2) { 7925 // (carry:value) = (z[i] << 1) | carry ; 7926 // z[i] = value; 7927 // } 7928 7929 movl(zidx, zlen); 7930 xorl(prev_carry, prev_carry); // clear carry flag and prev_carry register 7931 7932 bind(L_fifth_loop); 7933 decl(zidx); // Use decl to preserve carry flag 7934 decl(zidx); 7935 jccb(Assembler::negative, L_fifth_loop_exit); 7936 7937 if (UseBMI2Instructions) { 7938 movq(value, Address(z, zidx, Address::times_4, 0)); 7939 rclq(value, 1); 7940 rorxq(value, value, 32); 7941 movq(Address(z, zidx, Address::times_4, 0), value); // Store back in big endian form 7942 } 7943 else { 7944 // clear new_carry 7945 xorl(new_carry, new_carry); 7946 7947 // Shift z[i] by 1, or in previous carry and save new carry 7948 movq(value, Address(z, zidx, Address::times_4, 0)); 7949 shlq(value, 1); 7950 adcl(new_carry, 0); 7951 7952 orq(value, prev_carry); 7953 rorq(value, 0x20); 7954 movq(Address(z, zidx, Address::times_4, 0), value); // Store back in big endian form 7955 7956 // Set previous carry = new carry 7957 movl(prev_carry, new_carry); 7958 } 7959 jmp(L_fifth_loop); 7960 7961 bind(L_fifth_loop_exit); 7962 } 7963 7964 7965 /** 7966 * Code for BigInteger::squareToLen() intrinsic 7967 * 7968 * rdi: x 7969 * rsi: len 7970 * r8: z 7971 * rcx: zlen 7972 * r12: tmp1 7973 * r13: tmp2 7974 * r14: tmp3 7975 * r15: tmp4 7976 * rbx: tmp5 7977 * 7978 */ 7979 void MacroAssembler::square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 7980 7981 Label L_second_loop, L_second_loop_exit, L_third_loop, L_third_loop_exit, L_last_x, L_multiply; 7982 push(tmp1); 7983 push(tmp2); 7984 push(tmp3); 7985 push(tmp4); 7986 push(tmp5); 7987 7988 // First loop 7989 // Store the squares, right shifted one bit (i.e., divided by 2). 7990 square_rshift(x, len, z, tmp1, tmp3, tmp4, tmp5, rdxReg, raxReg); 7991 7992 // Add in off-diagonal sums. 7993 // 7994 // Second, third (nested) and fourth loops. 7995 // zlen +=2; 7996 // for (int xidx=len-2,zidx=zlen-4; xidx > 0; xidx-=2,zidx-=4) { 7997 // carry = 0; 7998 // long op2 = x[xidx:xidx+1]; 7999 // for (int j=xidx-2,k=zidx; j >= 0; j-=2) { 8000 // k -= 2; 8001 // long op1 = x[j:j+1]; 8002 // long sum = z[k:k+1]; 8003 // carry:sum = multiply_add_64(sum, op1, op2, carry, tmp_regs); 8004 // z[k:k+1] = sum; 8005 // } 8006 // add_one_64(z, k, carry, tmp_regs); 8007 // } 8008 8009 const Register carry = tmp5; 8010 const Register sum = tmp3; 8011 const Register op1 = tmp4; 8012 Register op2 = tmp2; 8013 8014 push(zlen); 8015 push(len); 8016 addl(zlen,2); 8017 bind(L_second_loop); 8018 xorq(carry, carry); 8019 subl(zlen, 4); 8020 subl(len, 2); 8021 push(zlen); 8022 push(len); 8023 cmpl(len, 0); 8024 jccb(Assembler::lessEqual, L_second_loop_exit); 8025 8026 // Multiply an array by one 64 bit long. 8027 if (UseBMI2Instructions) { 8028 op2 = rdxReg; 8029 movq(op2, Address(x, len, Address::times_4, 0)); 8030 rorxq(op2, op2, 32); 8031 } 8032 else { 8033 movq(op2, Address(x, len, Address::times_4, 0)); 8034 rorq(op2, 32); 8035 } 8036 8037 bind(L_third_loop); 8038 decrementl(len); 8039 jccb(Assembler::negative, L_third_loop_exit); 8040 decrementl(len); 8041 jccb(Assembler::negative, L_last_x); 8042 8043 movq(op1, Address(x, len, Address::times_4, 0)); 8044 rorq(op1, 32); 8045 8046 bind(L_multiply); 8047 subl(zlen, 2); 8048 movq(sum, Address(z, zlen, Address::times_4, 0)); 8049 8050 // Multiply 64 bit by 64 bit and add 64 bits lower half and upper 64 bits as carry. 8051 if (UseBMI2Instructions) { 8052 multiply_add_64_bmi2(sum, op1, op2, carry, tmp2); 8053 } 8054 else { 8055 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 8056 } 8057 8058 movq(Address(z, zlen, Address::times_4, 0), sum); 8059 8060 jmp(L_third_loop); 8061 bind(L_third_loop_exit); 8062 8063 // Fourth loop 8064 // Add 64 bit long carry into z with carry propagation. 8065 // Uses offsetted zlen. 8066 add_one_64(z, zlen, carry, tmp1); 8067 8068 pop(len); 8069 pop(zlen); 8070 jmp(L_second_loop); 8071 8072 // Next infrequent code is moved outside loops. 8073 bind(L_last_x); 8074 movl(op1, Address(x, 0)); 8075 jmp(L_multiply); 8076 8077 bind(L_second_loop_exit); 8078 pop(len); 8079 pop(zlen); 8080 pop(len); 8081 pop(zlen); 8082 8083 // Fifth loop 8084 // Shift z left 1 bit. 8085 lshift_by_1(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4); 8086 8087 // z[zlen-1] |= x[len-1] & 1; 8088 movl(tmp3, Address(x, len, Address::times_4, -4)); 8089 andl(tmp3, 1); 8090 orl(Address(z, zlen, Address::times_4, -4), tmp3); 8091 8092 pop(tmp5); 8093 pop(tmp4); 8094 pop(tmp3); 8095 pop(tmp2); 8096 pop(tmp1); 8097 } 8098 8099 /** 8100 * Helper function for mul_add() 8101 * Multiply the in[] by int k and add to out[] starting at offset offs using 8102 * 128 bit by 32 bit multiply and return the carry in tmp5. 8103 * Only quad int aligned length of in[] is operated on in this function. 8104 * k is in rdxReg for BMI2Instructions, for others it is in tmp2. 8105 * This function preserves out, in and k registers. 8106 * len and offset point to the appropriate index in "in" & "out" correspondingly 8107 * tmp5 has the carry. 8108 * other registers are temporary and are modified. 8109 * 8110 */ 8111 void MacroAssembler::mul_add_128_x_32_loop(Register out, Register in, 8112 Register offset, Register len, Register tmp1, Register tmp2, Register tmp3, 8113 Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 8114 8115 Label L_first_loop, L_first_loop_exit; 8116 8117 movl(tmp1, len); 8118 shrl(tmp1, 2); 8119 8120 bind(L_first_loop); 8121 subl(tmp1, 1); 8122 jccb(Assembler::negative, L_first_loop_exit); 8123 8124 subl(len, 4); 8125 subl(offset, 4); 8126 8127 Register op2 = tmp2; 8128 const Register sum = tmp3; 8129 const Register op1 = tmp4; 8130 const Register carry = tmp5; 8131 8132 if (UseBMI2Instructions) { 8133 op2 = rdxReg; 8134 } 8135 8136 movq(op1, Address(in, len, Address::times_4, 8)); 8137 rorq(op1, 32); 8138 movq(sum, Address(out, offset, Address::times_4, 8)); 8139 rorq(sum, 32); 8140 if (UseBMI2Instructions) { 8141 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg); 8142 } 8143 else { 8144 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 8145 } 8146 // Store back in big endian from little endian 8147 rorq(sum, 0x20); 8148 movq(Address(out, offset, Address::times_4, 8), sum); 8149 8150 movq(op1, Address(in, len, Address::times_4, 0)); 8151 rorq(op1, 32); 8152 movq(sum, Address(out, offset, Address::times_4, 0)); 8153 rorq(sum, 32); 8154 if (UseBMI2Instructions) { 8155 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg); 8156 } 8157 else { 8158 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 8159 } 8160 // Store back in big endian from little endian 8161 rorq(sum, 0x20); 8162 movq(Address(out, offset, Address::times_4, 0), sum); 8163 8164 jmp(L_first_loop); 8165 bind(L_first_loop_exit); 8166 } 8167 8168 /** 8169 * Code for BigInteger::mulAdd() intrinsic 8170 * 8171 * rdi: out 8172 * rsi: in 8173 * r11: offs (out.length - offset) 8174 * rcx: len 8175 * r8: k 8176 * r12: tmp1 8177 * r13: tmp2 8178 * r14: tmp3 8179 * r15: tmp4 8180 * rbx: tmp5 8181 * Multiply the in[] by word k and add to out[], return the carry in rax 8182 */ 8183 void MacroAssembler::mul_add(Register out, Register in, Register offs, 8184 Register len, Register k, Register tmp1, Register tmp2, Register tmp3, 8185 Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 8186 8187 Label L_carry, L_last_in, L_done; 8188 8189 // carry = 0; 8190 // for (int j=len-1; j >= 0; j--) { 8191 // long product = (in[j] & LONG_MASK) * kLong + 8192 // (out[offs] & LONG_MASK) + carry; 8193 // out[offs--] = (int)product; 8194 // carry = product >>> 32; 8195 // } 8196 // 8197 push(tmp1); 8198 push(tmp2); 8199 push(tmp3); 8200 push(tmp4); 8201 push(tmp5); 8202 8203 Register op2 = tmp2; 8204 const Register sum = tmp3; 8205 const Register op1 = tmp4; 8206 const Register carry = tmp5; 8207 8208 if (UseBMI2Instructions) { 8209 op2 = rdxReg; 8210 movl(op2, k); 8211 } 8212 else { 8213 movl(op2, k); 8214 } 8215 8216 xorq(carry, carry); 8217 8218 //First loop 8219 8220 //Multiply in[] by k in a 4 way unrolled loop using 128 bit by 32 bit multiply 8221 //The carry is in tmp5 8222 mul_add_128_x_32_loop(out, in, offs, len, tmp1, tmp2, tmp3, tmp4, tmp5, rdxReg, raxReg); 8223 8224 //Multiply the trailing in[] entry using 64 bit by 32 bit, if any 8225 decrementl(len); 8226 jccb(Assembler::negative, L_carry); 8227 decrementl(len); 8228 jccb(Assembler::negative, L_last_in); 8229 8230 movq(op1, Address(in, len, Address::times_4, 0)); 8231 rorq(op1, 32); 8232 8233 subl(offs, 2); 8234 movq(sum, Address(out, offs, Address::times_4, 0)); 8235 rorq(sum, 32); 8236 8237 if (UseBMI2Instructions) { 8238 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg); 8239 } 8240 else { 8241 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 8242 } 8243 8244 // Store back in big endian from little endian 8245 rorq(sum, 0x20); 8246 movq(Address(out, offs, Address::times_4, 0), sum); 8247 8248 testl(len, len); 8249 jccb(Assembler::zero, L_carry); 8250 8251 //Multiply the last in[] entry, if any 8252 bind(L_last_in); 8253 movl(op1, Address(in, 0)); 8254 movl(sum, Address(out, offs, Address::times_4, -4)); 8255 8256 movl(raxReg, k); 8257 mull(op1); //tmp4 * eax -> edx:eax 8258 addl(sum, carry); 8259 adcl(rdxReg, 0); 8260 addl(sum, raxReg); 8261 adcl(rdxReg, 0); 8262 movl(carry, rdxReg); 8263 8264 movl(Address(out, offs, Address::times_4, -4), sum); 8265 8266 bind(L_carry); 8267 //return tmp5/carry as carry in rax 8268 movl(rax, carry); 8269 8270 bind(L_done); 8271 pop(tmp5); 8272 pop(tmp4); 8273 pop(tmp3); 8274 pop(tmp2); 8275 pop(tmp1); 8276 } 8277 #endif 8278 8279 /** 8280 * Emits code to update CRC-32 with a byte value according to constants in table 8281 * 8282 * @param [in,out]crc Register containing the crc. 8283 * @param [in]val Register containing the byte to fold into the CRC. 8284 * @param [in]table Register containing the table of crc constants. 8285 * 8286 * uint32_t crc; 8287 * val = crc_table[(val ^ crc) & 0xFF]; 8288 * crc = val ^ (crc >> 8); 8289 * 8290 */ 8291 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 8292 xorl(val, crc); 8293 andl(val, 0xFF); 8294 shrl(crc, 8); // unsigned shift 8295 xorl(crc, Address(table, val, Address::times_4, 0)); 8296 } 8297 8298 /** 8299 * Fold 128-bit data chunk 8300 */ 8301 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) { 8302 if (UseAVX > 0) { 8303 vpclmulhdq(xtmp, xK, xcrc); // [123:64] 8304 vpclmulldq(xcrc, xK, xcrc); // [63:0] 8305 vpxor(xcrc, xcrc, Address(buf, offset), 0 /* vector_len */); 8306 pxor(xcrc, xtmp); 8307 } else { 8308 movdqa(xtmp, xcrc); 8309 pclmulhdq(xtmp, xK); // [123:64] 8310 pclmulldq(xcrc, xK); // [63:0] 8311 pxor(xcrc, xtmp); 8312 movdqu(xtmp, Address(buf, offset)); 8313 pxor(xcrc, xtmp); 8314 } 8315 } 8316 8317 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf) { 8318 if (UseAVX > 0) { 8319 vpclmulhdq(xtmp, xK, xcrc); 8320 vpclmulldq(xcrc, xK, xcrc); 8321 pxor(xcrc, xbuf); 8322 pxor(xcrc, xtmp); 8323 } else { 8324 movdqa(xtmp, xcrc); 8325 pclmulhdq(xtmp, xK); 8326 pclmulldq(xcrc, xK); 8327 pxor(xcrc, xbuf); 8328 pxor(xcrc, xtmp); 8329 } 8330 } 8331 8332 /** 8333 * 8-bit folds to compute 32-bit CRC 8334 * 8335 * uint64_t xcrc; 8336 * timesXtoThe32[xcrc & 0xFF] ^ (xcrc >> 8); 8337 */ 8338 void MacroAssembler::fold_8bit_crc32(XMMRegister xcrc, Register table, XMMRegister xtmp, Register tmp) { 8339 movdl(tmp, xcrc); 8340 andl(tmp, 0xFF); 8341 movdl(xtmp, Address(table, tmp, Address::times_4, 0)); 8342 psrldq(xcrc, 1); // unsigned shift one byte 8343 pxor(xcrc, xtmp); 8344 } 8345 8346 /** 8347 * uint32_t crc; 8348 * timesXtoThe32[crc & 0xFF] ^ (crc >> 8); 8349 */ 8350 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) { 8351 movl(tmp, crc); 8352 andl(tmp, 0xFF); 8353 shrl(crc, 8); 8354 xorl(crc, Address(table, tmp, Address::times_4, 0)); 8355 } 8356 8357 /** 8358 * @param crc register containing existing CRC (32-bit) 8359 * @param buf register pointing to input byte buffer (byte*) 8360 * @param len register containing number of bytes 8361 * @param table register that will contain address of CRC table 8362 * @param tmp scratch register 8363 */ 8364 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp) { 8365 assert_different_registers(crc, buf, len, table, tmp, rax); 8366 8367 Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned; 8368 Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop; 8369 8370 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 8371 // context for the registers used, where all instructions below are using 128-bit mode 8372 // On EVEX without VL and BW, these instructions will all be AVX. 8373 lea(table, ExternalAddress(StubRoutines::crc_table_addr())); 8374 notl(crc); // ~crc 8375 cmpl(len, 16); 8376 jcc(Assembler::less, L_tail); 8377 8378 // Align buffer to 16 bytes 8379 movl(tmp, buf); 8380 andl(tmp, 0xF); 8381 jccb(Assembler::zero, L_aligned); 8382 subl(tmp, 16); 8383 addl(len, tmp); 8384 8385 align(4); 8386 BIND(L_align_loop); 8387 movsbl(rax, Address(buf, 0)); // load byte with sign extension 8388 update_byte_crc32(crc, rax, table); 8389 increment(buf); 8390 incrementl(tmp); 8391 jccb(Assembler::less, L_align_loop); 8392 8393 BIND(L_aligned); 8394 movl(tmp, len); // save 8395 shrl(len, 4); 8396 jcc(Assembler::zero, L_tail_restore); 8397 8398 // Fold crc into first bytes of vector 8399 movdqa(xmm1, Address(buf, 0)); 8400 movdl(rax, xmm1); 8401 xorl(crc, rax); 8402 if (VM_Version::supports_sse4_1()) { 8403 pinsrd(xmm1, crc, 0); 8404 } else { 8405 pinsrw(xmm1, crc, 0); 8406 shrl(crc, 16); 8407 pinsrw(xmm1, crc, 1); 8408 } 8409 addptr(buf, 16); 8410 subl(len, 4); // len > 0 8411 jcc(Assembler::less, L_fold_tail); 8412 8413 movdqa(xmm2, Address(buf, 0)); 8414 movdqa(xmm3, Address(buf, 16)); 8415 movdqa(xmm4, Address(buf, 32)); 8416 addptr(buf, 48); 8417 subl(len, 3); 8418 jcc(Assembler::lessEqual, L_fold_512b); 8419 8420 // Fold total 512 bits of polynomial on each iteration, 8421 // 128 bits per each of 4 parallel streams. 8422 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 32), rscratch1); 8423 8424 align32(); 8425 BIND(L_fold_512b_loop); 8426 fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0); 8427 fold_128bit_crc32(xmm2, xmm0, xmm5, buf, 16); 8428 fold_128bit_crc32(xmm3, xmm0, xmm5, buf, 32); 8429 fold_128bit_crc32(xmm4, xmm0, xmm5, buf, 48); 8430 addptr(buf, 64); 8431 subl(len, 4); 8432 jcc(Assembler::greater, L_fold_512b_loop); 8433 8434 // Fold 512 bits to 128 bits. 8435 BIND(L_fold_512b); 8436 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16), rscratch1); 8437 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm2); 8438 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm3); 8439 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm4); 8440 8441 // Fold the rest of 128 bits data chunks 8442 BIND(L_fold_tail); 8443 addl(len, 3); 8444 jccb(Assembler::lessEqual, L_fold_128b); 8445 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16), rscratch1); 8446 8447 BIND(L_fold_tail_loop); 8448 fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0); 8449 addptr(buf, 16); 8450 decrementl(len); 8451 jccb(Assembler::greater, L_fold_tail_loop); 8452 8453 // Fold 128 bits in xmm1 down into 32 bits in crc register. 8454 BIND(L_fold_128b); 8455 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr()), rscratch1); 8456 if (UseAVX > 0) { 8457 vpclmulqdq(xmm2, xmm0, xmm1, 0x1); 8458 vpand(xmm3, xmm0, xmm2, 0 /* vector_len */); 8459 vpclmulqdq(xmm0, xmm0, xmm3, 0x1); 8460 } else { 8461 movdqa(xmm2, xmm0); 8462 pclmulqdq(xmm2, xmm1, 0x1); 8463 movdqa(xmm3, xmm0); 8464 pand(xmm3, xmm2); 8465 pclmulqdq(xmm0, xmm3, 0x1); 8466 } 8467 psrldq(xmm1, 8); 8468 psrldq(xmm2, 4); 8469 pxor(xmm0, xmm1); 8470 pxor(xmm0, xmm2); 8471 8472 // 8 8-bit folds to compute 32-bit CRC. 8473 for (int j = 0; j < 4; j++) { 8474 fold_8bit_crc32(xmm0, table, xmm1, rax); 8475 } 8476 movdl(crc, xmm0); // mov 32 bits to general register 8477 for (int j = 0; j < 4; j++) { 8478 fold_8bit_crc32(crc, table, rax); 8479 } 8480 8481 BIND(L_tail_restore); 8482 movl(len, tmp); // restore 8483 BIND(L_tail); 8484 andl(len, 0xf); 8485 jccb(Assembler::zero, L_exit); 8486 8487 // Fold the rest of bytes 8488 align(4); 8489 BIND(L_tail_loop); 8490 movsbl(rax, Address(buf, 0)); // load byte with sign extension 8491 update_byte_crc32(crc, rax, table); 8492 increment(buf); 8493 decrementl(len); 8494 jccb(Assembler::greater, L_tail_loop); 8495 8496 BIND(L_exit); 8497 notl(crc); // ~c 8498 } 8499 8500 #ifdef _LP64 8501 // Helper function for AVX 512 CRC32 8502 // Fold 512-bit data chunks 8503 void MacroAssembler::fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, 8504 Register pos, int offset) { 8505 evmovdquq(xmm3, Address(buf, pos, Address::times_1, offset), Assembler::AVX_512bit); 8506 evpclmulqdq(xtmp, xcrc, xK, 0x10, Assembler::AVX_512bit); // [123:64] 8507 evpclmulqdq(xmm2, xcrc, xK, 0x01, Assembler::AVX_512bit); // [63:0] 8508 evpxorq(xcrc, xtmp, xmm2, Assembler::AVX_512bit /* vector_len */); 8509 evpxorq(xcrc, xcrc, xmm3, Assembler::AVX_512bit /* vector_len */); 8510 } 8511 8512 // Helper function for AVX 512 CRC32 8513 // Compute CRC32 for < 256B buffers 8514 void MacroAssembler::kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register table, Register pos, 8515 Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop, 8516 Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup) { 8517 8518 Label L_less_than_32, L_exact_16_left, L_less_than_16_left; 8519 Label L_less_than_8_left, L_less_than_4_left, L_less_than_2_left, L_zero_left; 8520 Label L_only_less_than_4, L_only_less_than_3, L_only_less_than_2; 8521 8522 // check if there is enough buffer to be able to fold 16B at a time 8523 cmpl(len, 32); 8524 jcc(Assembler::less, L_less_than_32); 8525 8526 // if there is, load the constants 8527 movdqu(xmm10, Address(table, 1 * 16)); //rk1 and rk2 in xmm10 8528 movdl(xmm0, crc); // get the initial crc value 8529 movdqu(xmm7, Address(buf, pos, Address::times_1, 0 * 16)); //load the plaintext 8530 pxor(xmm7, xmm0); 8531 8532 // update the buffer pointer 8533 addl(pos, 16); 8534 //update the counter.subtract 32 instead of 16 to save one instruction from the loop 8535 subl(len, 32); 8536 jmp(L_16B_reduction_loop); 8537 8538 bind(L_less_than_32); 8539 //mov initial crc to the return value. this is necessary for zero - length buffers. 8540 movl(rax, crc); 8541 testl(len, len); 8542 jcc(Assembler::equal, L_cleanup); 8543 8544 movdl(xmm0, crc); //get the initial crc value 8545 8546 cmpl(len, 16); 8547 jcc(Assembler::equal, L_exact_16_left); 8548 jcc(Assembler::less, L_less_than_16_left); 8549 8550 movdqu(xmm7, Address(buf, pos, Address::times_1, 0 * 16)); //load the plaintext 8551 pxor(xmm7, xmm0); //xor the initial crc value 8552 addl(pos, 16); 8553 subl(len, 16); 8554 movdqu(xmm10, Address(table, 1 * 16)); // rk1 and rk2 in xmm10 8555 jmp(L_get_last_two_xmms); 8556 8557 bind(L_less_than_16_left); 8558 //use stack space to load data less than 16 bytes, zero - out the 16B in memory first. 8559 pxor(xmm1, xmm1); 8560 movptr(tmp1, rsp); 8561 movdqu(Address(tmp1, 0 * 16), xmm1); 8562 8563 cmpl(len, 4); 8564 jcc(Assembler::less, L_only_less_than_4); 8565 8566 //backup the counter value 8567 movl(tmp2, len); 8568 cmpl(len, 8); 8569 jcc(Assembler::less, L_less_than_8_left); 8570 8571 //load 8 Bytes 8572 movq(rax, Address(buf, pos, Address::times_1, 0 * 16)); 8573 movq(Address(tmp1, 0 * 16), rax); 8574 addptr(tmp1, 8); 8575 subl(len, 8); 8576 addl(pos, 8); 8577 8578 bind(L_less_than_8_left); 8579 cmpl(len, 4); 8580 jcc(Assembler::less, L_less_than_4_left); 8581 8582 //load 4 Bytes 8583 movl(rax, Address(buf, pos, Address::times_1, 0)); 8584 movl(Address(tmp1, 0 * 16), rax); 8585 addptr(tmp1, 4); 8586 subl(len, 4); 8587 addl(pos, 4); 8588 8589 bind(L_less_than_4_left); 8590 cmpl(len, 2); 8591 jcc(Assembler::less, L_less_than_2_left); 8592 8593 // load 2 Bytes 8594 movw(rax, Address(buf, pos, Address::times_1, 0)); 8595 movl(Address(tmp1, 0 * 16), rax); 8596 addptr(tmp1, 2); 8597 subl(len, 2); 8598 addl(pos, 2); 8599 8600 bind(L_less_than_2_left); 8601 cmpl(len, 1); 8602 jcc(Assembler::less, L_zero_left); 8603 8604 // load 1 Byte 8605 movb(rax, Address(buf, pos, Address::times_1, 0)); 8606 movb(Address(tmp1, 0 * 16), rax); 8607 8608 bind(L_zero_left); 8609 movdqu(xmm7, Address(rsp, 0)); 8610 pxor(xmm7, xmm0); //xor the initial crc value 8611 8612 lea(rax, ExternalAddress(StubRoutines::x86::shuf_table_crc32_avx512_addr())); 8613 movdqu(xmm0, Address(rax, tmp2)); 8614 pshufb(xmm7, xmm0); 8615 jmp(L_128_done); 8616 8617 bind(L_exact_16_left); 8618 movdqu(xmm7, Address(buf, pos, Address::times_1, 0)); 8619 pxor(xmm7, xmm0); //xor the initial crc value 8620 jmp(L_128_done); 8621 8622 bind(L_only_less_than_4); 8623 cmpl(len, 3); 8624 jcc(Assembler::less, L_only_less_than_3); 8625 8626 // load 3 Bytes 8627 movb(rax, Address(buf, pos, Address::times_1, 0)); 8628 movb(Address(tmp1, 0), rax); 8629 8630 movb(rax, Address(buf, pos, Address::times_1, 1)); 8631 movb(Address(tmp1, 1), rax); 8632 8633 movb(rax, Address(buf, pos, Address::times_1, 2)); 8634 movb(Address(tmp1, 2), rax); 8635 8636 movdqu(xmm7, Address(rsp, 0)); 8637 pxor(xmm7, xmm0); //xor the initial crc value 8638 8639 pslldq(xmm7, 0x5); 8640 jmp(L_barrett); 8641 bind(L_only_less_than_3); 8642 cmpl(len, 2); 8643 jcc(Assembler::less, L_only_less_than_2); 8644 8645 // load 2 Bytes 8646 movb(rax, Address(buf, pos, Address::times_1, 0)); 8647 movb(Address(tmp1, 0), rax); 8648 8649 movb(rax, Address(buf, pos, Address::times_1, 1)); 8650 movb(Address(tmp1, 1), rax); 8651 8652 movdqu(xmm7, Address(rsp, 0)); 8653 pxor(xmm7, xmm0); //xor the initial crc value 8654 8655 pslldq(xmm7, 0x6); 8656 jmp(L_barrett); 8657 8658 bind(L_only_less_than_2); 8659 //load 1 Byte 8660 movb(rax, Address(buf, pos, Address::times_1, 0)); 8661 movb(Address(tmp1, 0), rax); 8662 8663 movdqu(xmm7, Address(rsp, 0)); 8664 pxor(xmm7, xmm0); //xor the initial crc value 8665 8666 pslldq(xmm7, 0x7); 8667 } 8668 8669 /** 8670 * Compute CRC32 using AVX512 instructions 8671 * param crc register containing existing CRC (32-bit) 8672 * param buf register pointing to input byte buffer (byte*) 8673 * param len register containing number of bytes 8674 * param table address of crc or crc32c table 8675 * param tmp1 scratch register 8676 * param tmp2 scratch register 8677 * return rax result register 8678 * 8679 * This routine is identical for crc32c with the exception of the precomputed constant 8680 * table which will be passed as the table argument. The calculation steps are 8681 * the same for both variants. 8682 */ 8683 void MacroAssembler::kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2) { 8684 assert_different_registers(crc, buf, len, table, tmp1, tmp2, rax, r12); 8685 8686 Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned; 8687 Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop; 8688 Label L_less_than_256, L_fold_128_B_loop, L_fold_256_B_loop; 8689 Label L_fold_128_B_register, L_final_reduction_for_128, L_16B_reduction_loop; 8690 Label L_128_done, L_get_last_two_xmms, L_barrett, L_cleanup; 8691 8692 const Register pos = r12; 8693 push(r12); 8694 subptr(rsp, 16 * 2 + 8); 8695 8696 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 8697 // context for the registers used, where all instructions below are using 128-bit mode 8698 // On EVEX without VL and BW, these instructions will all be AVX. 8699 movl(pos, 0); 8700 8701 // check if smaller than 256B 8702 cmpl(len, 256); 8703 jcc(Assembler::less, L_less_than_256); 8704 8705 // load the initial crc value 8706 movdl(xmm10, crc); 8707 8708 // receive the initial 64B data, xor the initial crc value 8709 evmovdquq(xmm0, Address(buf, pos, Address::times_1, 0 * 64), Assembler::AVX_512bit); 8710 evmovdquq(xmm4, Address(buf, pos, Address::times_1, 1 * 64), Assembler::AVX_512bit); 8711 evpxorq(xmm0, xmm0, xmm10, Assembler::AVX_512bit); 8712 evbroadcasti32x4(xmm10, Address(table, 2 * 16), Assembler::AVX_512bit); //zmm10 has rk3 and rk4 8713 8714 subl(len, 256); 8715 cmpl(len, 256); 8716 jcc(Assembler::less, L_fold_128_B_loop); 8717 8718 evmovdquq(xmm7, Address(buf, pos, Address::times_1, 2 * 64), Assembler::AVX_512bit); 8719 evmovdquq(xmm8, Address(buf, pos, Address::times_1, 3 * 64), Assembler::AVX_512bit); 8720 evbroadcasti32x4(xmm16, Address(table, 0 * 16), Assembler::AVX_512bit); //zmm16 has rk-1 and rk-2 8721 subl(len, 256); 8722 8723 bind(L_fold_256_B_loop); 8724 addl(pos, 256); 8725 fold512bit_crc32_avx512(xmm0, xmm16, xmm1, buf, pos, 0 * 64); 8726 fold512bit_crc32_avx512(xmm4, xmm16, xmm1, buf, pos, 1 * 64); 8727 fold512bit_crc32_avx512(xmm7, xmm16, xmm1, buf, pos, 2 * 64); 8728 fold512bit_crc32_avx512(xmm8, xmm16, xmm1, buf, pos, 3 * 64); 8729 8730 subl(len, 256); 8731 jcc(Assembler::greaterEqual, L_fold_256_B_loop); 8732 8733 // Fold 256 into 128 8734 addl(pos, 256); 8735 evpclmulqdq(xmm1, xmm0, xmm10, 0x01, Assembler::AVX_512bit); 8736 evpclmulqdq(xmm2, xmm0, xmm10, 0x10, Assembler::AVX_512bit); 8737 vpternlogq(xmm7, 0x96, xmm1, xmm2, Assembler::AVX_512bit); // xor ABC 8738 8739 evpclmulqdq(xmm5, xmm4, xmm10, 0x01, Assembler::AVX_512bit); 8740 evpclmulqdq(xmm6, xmm4, xmm10, 0x10, Assembler::AVX_512bit); 8741 vpternlogq(xmm8, 0x96, xmm5, xmm6, Assembler::AVX_512bit); // xor ABC 8742 8743 evmovdquq(xmm0, xmm7, Assembler::AVX_512bit); 8744 evmovdquq(xmm4, xmm8, Assembler::AVX_512bit); 8745 8746 addl(len, 128); 8747 jmp(L_fold_128_B_register); 8748 8749 // at this section of the code, there is 128 * x + y(0 <= y<128) bytes of buffer.The fold_128_B_loop 8750 // loop will fold 128B at a time until we have 128 + y Bytes of buffer 8751 8752 // fold 128B at a time.This section of the code folds 8 xmm registers in parallel 8753 bind(L_fold_128_B_loop); 8754 addl(pos, 128); 8755 fold512bit_crc32_avx512(xmm0, xmm10, xmm1, buf, pos, 0 * 64); 8756 fold512bit_crc32_avx512(xmm4, xmm10, xmm1, buf, pos, 1 * 64); 8757 8758 subl(len, 128); 8759 jcc(Assembler::greaterEqual, L_fold_128_B_loop); 8760 8761 addl(pos, 128); 8762 8763 // at this point, the buffer pointer is pointing at the last y Bytes of the buffer, where 0 <= y < 128 8764 // the 128B of folded data is in 8 of the xmm registers : xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7 8765 bind(L_fold_128_B_register); 8766 evmovdquq(xmm16, Address(table, 5 * 16), Assembler::AVX_512bit); // multiply by rk9-rk16 8767 evmovdquq(xmm11, Address(table, 9 * 16), Assembler::AVX_512bit); // multiply by rk17-rk20, rk1,rk2, 0,0 8768 evpclmulqdq(xmm1, xmm0, xmm16, 0x01, Assembler::AVX_512bit); 8769 evpclmulqdq(xmm2, xmm0, xmm16, 0x10, Assembler::AVX_512bit); 8770 // save last that has no multiplicand 8771 vextracti64x2(xmm7, xmm4, 3); 8772 8773 evpclmulqdq(xmm5, xmm4, xmm11, 0x01, Assembler::AVX_512bit); 8774 evpclmulqdq(xmm6, xmm4, xmm11, 0x10, Assembler::AVX_512bit); 8775 // Needed later in reduction loop 8776 movdqu(xmm10, Address(table, 1 * 16)); 8777 vpternlogq(xmm1, 0x96, xmm2, xmm5, Assembler::AVX_512bit); // xor ABC 8778 vpternlogq(xmm1, 0x96, xmm6, xmm7, Assembler::AVX_512bit); // xor ABC 8779 8780 // Swap 1,0,3,2 - 01 00 11 10 8781 evshufi64x2(xmm8, xmm1, xmm1, 0x4e, Assembler::AVX_512bit); 8782 evpxorq(xmm8, xmm8, xmm1, Assembler::AVX_256bit); 8783 vextracti128(xmm5, xmm8, 1); 8784 evpxorq(xmm7, xmm5, xmm8, Assembler::AVX_128bit); 8785 8786 // instead of 128, we add 128 - 16 to the loop counter to save 1 instruction from the loop 8787 // instead of a cmp instruction, we use the negative flag with the jl instruction 8788 addl(len, 128 - 16); 8789 jcc(Assembler::less, L_final_reduction_for_128); 8790 8791 bind(L_16B_reduction_loop); 8792 vpclmulqdq(xmm8, xmm7, xmm10, 0x01); 8793 vpclmulqdq(xmm7, xmm7, xmm10, 0x10); 8794 vpxor(xmm7, xmm7, xmm8, Assembler::AVX_128bit); 8795 movdqu(xmm0, Address(buf, pos, Address::times_1, 0 * 16)); 8796 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 8797 addl(pos, 16); 8798 subl(len, 16); 8799 jcc(Assembler::greaterEqual, L_16B_reduction_loop); 8800 8801 bind(L_final_reduction_for_128); 8802 addl(len, 16); 8803 jcc(Assembler::equal, L_128_done); 8804 8805 bind(L_get_last_two_xmms); 8806 movdqu(xmm2, xmm7); 8807 addl(pos, len); 8808 movdqu(xmm1, Address(buf, pos, Address::times_1, -16)); 8809 subl(pos, len); 8810 8811 // get rid of the extra data that was loaded before 8812 // load the shift constant 8813 lea(rax, ExternalAddress(StubRoutines::x86::shuf_table_crc32_avx512_addr())); 8814 movdqu(xmm0, Address(rax, len)); 8815 addl(rax, len); 8816 8817 vpshufb(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 8818 //Change mask to 512 8819 vpxor(xmm0, xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr() + 2 * 16), Assembler::AVX_128bit, tmp2); 8820 vpshufb(xmm2, xmm2, xmm0, Assembler::AVX_128bit); 8821 8822 blendvpb(xmm2, xmm2, xmm1, xmm0, Assembler::AVX_128bit); 8823 vpclmulqdq(xmm8, xmm7, xmm10, 0x01); 8824 vpclmulqdq(xmm7, xmm7, xmm10, 0x10); 8825 vpxor(xmm7, xmm7, xmm8, Assembler::AVX_128bit); 8826 vpxor(xmm7, xmm7, xmm2, Assembler::AVX_128bit); 8827 8828 bind(L_128_done); 8829 // compute crc of a 128-bit value 8830 movdqu(xmm10, Address(table, 3 * 16)); 8831 movdqu(xmm0, xmm7); 8832 8833 // 64b fold 8834 vpclmulqdq(xmm7, xmm7, xmm10, 0x0); 8835 vpsrldq(xmm0, xmm0, 0x8, Assembler::AVX_128bit); 8836 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 8837 8838 // 32b fold 8839 movdqu(xmm0, xmm7); 8840 vpslldq(xmm7, xmm7, 0x4, Assembler::AVX_128bit); 8841 vpclmulqdq(xmm7, xmm7, xmm10, 0x10); 8842 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 8843 jmp(L_barrett); 8844 8845 bind(L_less_than_256); 8846 kernel_crc32_avx512_256B(crc, buf, len, table, pos, tmp1, tmp2, L_barrett, L_16B_reduction_loop, L_get_last_two_xmms, L_128_done, L_cleanup); 8847 8848 //barrett reduction 8849 bind(L_barrett); 8850 vpand(xmm7, xmm7, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr() + 1 * 16), Assembler::AVX_128bit, tmp2); 8851 movdqu(xmm1, xmm7); 8852 movdqu(xmm2, xmm7); 8853 movdqu(xmm10, Address(table, 4 * 16)); 8854 8855 pclmulqdq(xmm7, xmm10, 0x0); 8856 pxor(xmm7, xmm2); 8857 vpand(xmm7, xmm7, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr()), Assembler::AVX_128bit, tmp2); 8858 movdqu(xmm2, xmm7); 8859 pclmulqdq(xmm7, xmm10, 0x10); 8860 pxor(xmm7, xmm2); 8861 pxor(xmm7, xmm1); 8862 pextrd(crc, xmm7, 2); 8863 8864 bind(L_cleanup); 8865 addptr(rsp, 16 * 2 + 8); 8866 pop(r12); 8867 } 8868 8869 // S. Gueron / Information Processing Letters 112 (2012) 184 8870 // Algorithm 4: Computing carry-less multiplication using a precomputed lookup table. 8871 // Input: A 32 bit value B = [byte3, byte2, byte1, byte0]. 8872 // Output: the 64-bit carry-less product of B * CONST 8873 void MacroAssembler::crc32c_ipl_alg4(Register in, uint32_t n, 8874 Register tmp1, Register tmp2, Register tmp3) { 8875 lea(tmp3, ExternalAddress(StubRoutines::crc32c_table_addr())); 8876 if (n > 0) { 8877 addq(tmp3, n * 256 * 8); 8878 } 8879 // Q1 = TABLEExt[n][B & 0xFF]; 8880 movl(tmp1, in); 8881 andl(tmp1, 0x000000FF); 8882 shll(tmp1, 3); 8883 addq(tmp1, tmp3); 8884 movq(tmp1, Address(tmp1, 0)); 8885 8886 // Q2 = TABLEExt[n][B >> 8 & 0xFF]; 8887 movl(tmp2, in); 8888 shrl(tmp2, 8); 8889 andl(tmp2, 0x000000FF); 8890 shll(tmp2, 3); 8891 addq(tmp2, tmp3); 8892 movq(tmp2, Address(tmp2, 0)); 8893 8894 shlq(tmp2, 8); 8895 xorq(tmp1, tmp2); 8896 8897 // Q3 = TABLEExt[n][B >> 16 & 0xFF]; 8898 movl(tmp2, in); 8899 shrl(tmp2, 16); 8900 andl(tmp2, 0x000000FF); 8901 shll(tmp2, 3); 8902 addq(tmp2, tmp3); 8903 movq(tmp2, Address(tmp2, 0)); 8904 8905 shlq(tmp2, 16); 8906 xorq(tmp1, tmp2); 8907 8908 // Q4 = TABLEExt[n][B >> 24 & 0xFF]; 8909 shrl(in, 24); 8910 andl(in, 0x000000FF); 8911 shll(in, 3); 8912 addq(in, tmp3); 8913 movq(in, Address(in, 0)); 8914 8915 shlq(in, 24); 8916 xorq(in, tmp1); 8917 // return Q1 ^ Q2 << 8 ^ Q3 << 16 ^ Q4 << 24; 8918 } 8919 8920 void MacroAssembler::crc32c_pclmulqdq(XMMRegister w_xtmp1, 8921 Register in_out, 8922 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, 8923 XMMRegister w_xtmp2, 8924 Register tmp1, 8925 Register n_tmp2, Register n_tmp3) { 8926 if (is_pclmulqdq_supported) { 8927 movdl(w_xtmp1, in_out); // modified blindly 8928 8929 movl(tmp1, const_or_pre_comp_const_index); 8930 movdl(w_xtmp2, tmp1); 8931 pclmulqdq(w_xtmp1, w_xtmp2, 0); 8932 8933 movdq(in_out, w_xtmp1); 8934 } else { 8935 crc32c_ipl_alg4(in_out, const_or_pre_comp_const_index, tmp1, n_tmp2, n_tmp3); 8936 } 8937 } 8938 8939 // Recombination Alternative 2: No bit-reflections 8940 // T1 = (CRC_A * U1) << 1 8941 // T2 = (CRC_B * U2) << 1 8942 // C1 = T1 >> 32 8943 // C2 = T2 >> 32 8944 // T1 = T1 & 0xFFFFFFFF 8945 // T2 = T2 & 0xFFFFFFFF 8946 // T1 = CRC32(0, T1) 8947 // T2 = CRC32(0, T2) 8948 // C1 = C1 ^ T1 8949 // C2 = C2 ^ T2 8950 // CRC = C1 ^ C2 ^ CRC_C 8951 void MacroAssembler::crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, 8952 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 8953 Register tmp1, Register tmp2, 8954 Register n_tmp3) { 8955 crc32c_pclmulqdq(w_xtmp1, in_out, const_or_pre_comp_const_index_u1, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 8956 crc32c_pclmulqdq(w_xtmp2, in1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 8957 shlq(in_out, 1); 8958 movl(tmp1, in_out); 8959 shrq(in_out, 32); 8960 xorl(tmp2, tmp2); 8961 crc32(tmp2, tmp1, 4); 8962 xorl(in_out, tmp2); // we don't care about upper 32 bit contents here 8963 shlq(in1, 1); 8964 movl(tmp1, in1); 8965 shrq(in1, 32); 8966 xorl(tmp2, tmp2); 8967 crc32(tmp2, tmp1, 4); 8968 xorl(in1, tmp2); 8969 xorl(in_out, in1); 8970 xorl(in_out, in2); 8971 } 8972 8973 // Set N to predefined value 8974 // Subtract from a length of a buffer 8975 // execute in a loop: 8976 // CRC_A = 0xFFFFFFFF, CRC_B = 0, CRC_C = 0 8977 // for i = 1 to N do 8978 // CRC_A = CRC32(CRC_A, A[i]) 8979 // CRC_B = CRC32(CRC_B, B[i]) 8980 // CRC_C = CRC32(CRC_C, C[i]) 8981 // end for 8982 // Recombine 8983 void MacroAssembler::crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, 8984 Register in_out1, Register in_out2, Register in_out3, 8985 Register tmp1, Register tmp2, Register tmp3, 8986 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 8987 Register tmp4, Register tmp5, 8988 Register n_tmp6) { 8989 Label L_processPartitions; 8990 Label L_processPartition; 8991 Label L_exit; 8992 8993 bind(L_processPartitions); 8994 cmpl(in_out1, 3 * size); 8995 jcc(Assembler::less, L_exit); 8996 xorl(tmp1, tmp1); 8997 xorl(tmp2, tmp2); 8998 movq(tmp3, in_out2); 8999 addq(tmp3, size); 9000 9001 bind(L_processPartition); 9002 crc32(in_out3, Address(in_out2, 0), 8); 9003 crc32(tmp1, Address(in_out2, size), 8); 9004 crc32(tmp2, Address(in_out2, size * 2), 8); 9005 addq(in_out2, 8); 9006 cmpq(in_out2, tmp3); 9007 jcc(Assembler::less, L_processPartition); 9008 crc32c_rec_alt2(const_or_pre_comp_const_index_u1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, in_out3, tmp1, tmp2, 9009 w_xtmp1, w_xtmp2, w_xtmp3, 9010 tmp4, tmp5, 9011 n_tmp6); 9012 addq(in_out2, 2 * size); 9013 subl(in_out1, 3 * size); 9014 jmp(L_processPartitions); 9015 9016 bind(L_exit); 9017 } 9018 #else 9019 void MacroAssembler::crc32c_ipl_alg4(Register in_out, uint32_t n, 9020 Register tmp1, Register tmp2, Register tmp3, 9021 XMMRegister xtmp1, XMMRegister xtmp2) { 9022 lea(tmp3, ExternalAddress(StubRoutines::crc32c_table_addr())); 9023 if (n > 0) { 9024 addl(tmp3, n * 256 * 8); 9025 } 9026 // Q1 = TABLEExt[n][B & 0xFF]; 9027 movl(tmp1, in_out); 9028 andl(tmp1, 0x000000FF); 9029 shll(tmp1, 3); 9030 addl(tmp1, tmp3); 9031 movq(xtmp1, Address(tmp1, 0)); 9032 9033 // Q2 = TABLEExt[n][B >> 8 & 0xFF]; 9034 movl(tmp2, in_out); 9035 shrl(tmp2, 8); 9036 andl(tmp2, 0x000000FF); 9037 shll(tmp2, 3); 9038 addl(tmp2, tmp3); 9039 movq(xtmp2, Address(tmp2, 0)); 9040 9041 psllq(xtmp2, 8); 9042 pxor(xtmp1, xtmp2); 9043 9044 // Q3 = TABLEExt[n][B >> 16 & 0xFF]; 9045 movl(tmp2, in_out); 9046 shrl(tmp2, 16); 9047 andl(tmp2, 0x000000FF); 9048 shll(tmp2, 3); 9049 addl(tmp2, tmp3); 9050 movq(xtmp2, Address(tmp2, 0)); 9051 9052 psllq(xtmp2, 16); 9053 pxor(xtmp1, xtmp2); 9054 9055 // Q4 = TABLEExt[n][B >> 24 & 0xFF]; 9056 shrl(in_out, 24); 9057 andl(in_out, 0x000000FF); 9058 shll(in_out, 3); 9059 addl(in_out, tmp3); 9060 movq(xtmp2, Address(in_out, 0)); 9061 9062 psllq(xtmp2, 24); 9063 pxor(xtmp1, xtmp2); // Result in CXMM 9064 // return Q1 ^ Q2 << 8 ^ Q3 << 16 ^ Q4 << 24; 9065 } 9066 9067 void MacroAssembler::crc32c_pclmulqdq(XMMRegister w_xtmp1, 9068 Register in_out, 9069 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, 9070 XMMRegister w_xtmp2, 9071 Register tmp1, 9072 Register n_tmp2, Register n_tmp3) { 9073 if (is_pclmulqdq_supported) { 9074 movdl(w_xtmp1, in_out); 9075 9076 movl(tmp1, const_or_pre_comp_const_index); 9077 movdl(w_xtmp2, tmp1); 9078 pclmulqdq(w_xtmp1, w_xtmp2, 0); 9079 // Keep result in XMM since GPR is 32 bit in length 9080 } else { 9081 crc32c_ipl_alg4(in_out, const_or_pre_comp_const_index, tmp1, n_tmp2, n_tmp3, w_xtmp1, w_xtmp2); 9082 } 9083 } 9084 9085 void MacroAssembler::crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, 9086 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 9087 Register tmp1, Register tmp2, 9088 Register n_tmp3) { 9089 crc32c_pclmulqdq(w_xtmp1, in_out, const_or_pre_comp_const_index_u1, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 9090 crc32c_pclmulqdq(w_xtmp2, in1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 9091 9092 psllq(w_xtmp1, 1); 9093 movdl(tmp1, w_xtmp1); 9094 psrlq(w_xtmp1, 32); 9095 movdl(in_out, w_xtmp1); 9096 9097 xorl(tmp2, tmp2); 9098 crc32(tmp2, tmp1, 4); 9099 xorl(in_out, tmp2); 9100 9101 psllq(w_xtmp2, 1); 9102 movdl(tmp1, w_xtmp2); 9103 psrlq(w_xtmp2, 32); 9104 movdl(in1, w_xtmp2); 9105 9106 xorl(tmp2, tmp2); 9107 crc32(tmp2, tmp1, 4); 9108 xorl(in1, tmp2); 9109 xorl(in_out, in1); 9110 xorl(in_out, in2); 9111 } 9112 9113 void MacroAssembler::crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, 9114 Register in_out1, Register in_out2, Register in_out3, 9115 Register tmp1, Register tmp2, Register tmp3, 9116 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 9117 Register tmp4, Register tmp5, 9118 Register n_tmp6) { 9119 Label L_processPartitions; 9120 Label L_processPartition; 9121 Label L_exit; 9122 9123 bind(L_processPartitions); 9124 cmpl(in_out1, 3 * size); 9125 jcc(Assembler::less, L_exit); 9126 xorl(tmp1, tmp1); 9127 xorl(tmp2, tmp2); 9128 movl(tmp3, in_out2); 9129 addl(tmp3, size); 9130 9131 bind(L_processPartition); 9132 crc32(in_out3, Address(in_out2, 0), 4); 9133 crc32(tmp1, Address(in_out2, size), 4); 9134 crc32(tmp2, Address(in_out2, size*2), 4); 9135 crc32(in_out3, Address(in_out2, 0+4), 4); 9136 crc32(tmp1, Address(in_out2, size+4), 4); 9137 crc32(tmp2, Address(in_out2, size*2+4), 4); 9138 addl(in_out2, 8); 9139 cmpl(in_out2, tmp3); 9140 jcc(Assembler::less, L_processPartition); 9141 9142 push(tmp3); 9143 push(in_out1); 9144 push(in_out2); 9145 tmp4 = tmp3; 9146 tmp5 = in_out1; 9147 n_tmp6 = in_out2; 9148 9149 crc32c_rec_alt2(const_or_pre_comp_const_index_u1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, in_out3, tmp1, tmp2, 9150 w_xtmp1, w_xtmp2, w_xtmp3, 9151 tmp4, tmp5, 9152 n_tmp6); 9153 9154 pop(in_out2); 9155 pop(in_out1); 9156 pop(tmp3); 9157 9158 addl(in_out2, 2 * size); 9159 subl(in_out1, 3 * size); 9160 jmp(L_processPartitions); 9161 9162 bind(L_exit); 9163 } 9164 #endif //LP64 9165 9166 #ifdef _LP64 9167 // Algorithm 2: Pipelined usage of the CRC32 instruction. 9168 // Input: A buffer I of L bytes. 9169 // Output: the CRC32C value of the buffer. 9170 // Notations: 9171 // Write L = 24N + r, with N = floor (L/24). 9172 // r = L mod 24 (0 <= r < 24). 9173 // Consider I as the concatenation of A|B|C|R, where A, B, C, each, 9174 // N quadwords, and R consists of r bytes. 9175 // A[j] = I [8j+7:8j], j= 0, 1, ..., N-1 9176 // B[j] = I [N + 8j+7:N + 8j], j= 0, 1, ..., N-1 9177 // C[j] = I [2N + 8j+7:2N + 8j], j= 0, 1, ..., N-1 9178 // if r > 0 R[j] = I [3N +j], j= 0, 1, ...,r-1 9179 void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, 9180 Register tmp1, Register tmp2, Register tmp3, 9181 Register tmp4, Register tmp5, Register tmp6, 9182 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 9183 bool is_pclmulqdq_supported) { 9184 uint32_t const_or_pre_comp_const_index[CRC32C_NUM_PRECOMPUTED_CONSTANTS]; 9185 Label L_wordByWord; 9186 Label L_byteByByteProlog; 9187 Label L_byteByByte; 9188 Label L_exit; 9189 9190 if (is_pclmulqdq_supported ) { 9191 const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::_crc32c_table_addr; 9192 const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::_crc32c_table_addr+1); 9193 9194 const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 2); 9195 const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 3); 9196 9197 const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 4); 9198 const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 5); 9199 assert((CRC32C_NUM_PRECOMPUTED_CONSTANTS - 1 ) == 5, "Checking whether you declared all of the constants based on the number of \"chunks\""); 9200 } else { 9201 const_or_pre_comp_const_index[0] = 1; 9202 const_or_pre_comp_const_index[1] = 0; 9203 9204 const_or_pre_comp_const_index[2] = 3; 9205 const_or_pre_comp_const_index[3] = 2; 9206 9207 const_or_pre_comp_const_index[4] = 5; 9208 const_or_pre_comp_const_index[5] = 4; 9209 } 9210 crc32c_proc_chunk(CRC32C_HIGH, const_or_pre_comp_const_index[0], const_or_pre_comp_const_index[1], is_pclmulqdq_supported, 9211 in2, in1, in_out, 9212 tmp1, tmp2, tmp3, 9213 w_xtmp1, w_xtmp2, w_xtmp3, 9214 tmp4, tmp5, 9215 tmp6); 9216 crc32c_proc_chunk(CRC32C_MIDDLE, const_or_pre_comp_const_index[2], const_or_pre_comp_const_index[3], is_pclmulqdq_supported, 9217 in2, in1, in_out, 9218 tmp1, tmp2, tmp3, 9219 w_xtmp1, w_xtmp2, w_xtmp3, 9220 tmp4, tmp5, 9221 tmp6); 9222 crc32c_proc_chunk(CRC32C_LOW, const_or_pre_comp_const_index[4], const_or_pre_comp_const_index[5], is_pclmulqdq_supported, 9223 in2, in1, in_out, 9224 tmp1, tmp2, tmp3, 9225 w_xtmp1, w_xtmp2, w_xtmp3, 9226 tmp4, tmp5, 9227 tmp6); 9228 movl(tmp1, in2); 9229 andl(tmp1, 0x00000007); 9230 negl(tmp1); 9231 addl(tmp1, in2); 9232 addq(tmp1, in1); 9233 9234 cmpq(in1, tmp1); 9235 jccb(Assembler::greaterEqual, L_byteByByteProlog); 9236 align(16); 9237 BIND(L_wordByWord); 9238 crc32(in_out, Address(in1, 0), 8); 9239 addq(in1, 8); 9240 cmpq(in1, tmp1); 9241 jcc(Assembler::less, L_wordByWord); 9242 9243 BIND(L_byteByByteProlog); 9244 andl(in2, 0x00000007); 9245 movl(tmp2, 1); 9246 9247 cmpl(tmp2, in2); 9248 jccb(Assembler::greater, L_exit); 9249 BIND(L_byteByByte); 9250 crc32(in_out, Address(in1, 0), 1); 9251 incq(in1); 9252 incl(tmp2); 9253 cmpl(tmp2, in2); 9254 jcc(Assembler::lessEqual, L_byteByByte); 9255 9256 BIND(L_exit); 9257 } 9258 #else 9259 void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, 9260 Register tmp1, Register tmp2, Register tmp3, 9261 Register tmp4, Register tmp5, Register tmp6, 9262 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 9263 bool is_pclmulqdq_supported) { 9264 uint32_t const_or_pre_comp_const_index[CRC32C_NUM_PRECOMPUTED_CONSTANTS]; 9265 Label L_wordByWord; 9266 Label L_byteByByteProlog; 9267 Label L_byteByByte; 9268 Label L_exit; 9269 9270 if (is_pclmulqdq_supported) { 9271 const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::_crc32c_table_addr; 9272 const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 1); 9273 9274 const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 2); 9275 const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 3); 9276 9277 const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 4); 9278 const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 5); 9279 } else { 9280 const_or_pre_comp_const_index[0] = 1; 9281 const_or_pre_comp_const_index[1] = 0; 9282 9283 const_or_pre_comp_const_index[2] = 3; 9284 const_or_pre_comp_const_index[3] = 2; 9285 9286 const_or_pre_comp_const_index[4] = 5; 9287 const_or_pre_comp_const_index[5] = 4; 9288 } 9289 crc32c_proc_chunk(CRC32C_HIGH, const_or_pre_comp_const_index[0], const_or_pre_comp_const_index[1], is_pclmulqdq_supported, 9290 in2, in1, in_out, 9291 tmp1, tmp2, tmp3, 9292 w_xtmp1, w_xtmp2, w_xtmp3, 9293 tmp4, tmp5, 9294 tmp6); 9295 crc32c_proc_chunk(CRC32C_MIDDLE, const_or_pre_comp_const_index[2], const_or_pre_comp_const_index[3], is_pclmulqdq_supported, 9296 in2, in1, in_out, 9297 tmp1, tmp2, tmp3, 9298 w_xtmp1, w_xtmp2, w_xtmp3, 9299 tmp4, tmp5, 9300 tmp6); 9301 crc32c_proc_chunk(CRC32C_LOW, const_or_pre_comp_const_index[4], const_or_pre_comp_const_index[5], is_pclmulqdq_supported, 9302 in2, in1, in_out, 9303 tmp1, tmp2, tmp3, 9304 w_xtmp1, w_xtmp2, w_xtmp3, 9305 tmp4, tmp5, 9306 tmp6); 9307 movl(tmp1, in2); 9308 andl(tmp1, 0x00000007); 9309 negl(tmp1); 9310 addl(tmp1, in2); 9311 addl(tmp1, in1); 9312 9313 BIND(L_wordByWord); 9314 cmpl(in1, tmp1); 9315 jcc(Assembler::greaterEqual, L_byteByByteProlog); 9316 crc32(in_out, Address(in1,0), 4); 9317 addl(in1, 4); 9318 jmp(L_wordByWord); 9319 9320 BIND(L_byteByByteProlog); 9321 andl(in2, 0x00000007); 9322 movl(tmp2, 1); 9323 9324 BIND(L_byteByByte); 9325 cmpl(tmp2, in2); 9326 jccb(Assembler::greater, L_exit); 9327 movb(tmp1, Address(in1, 0)); 9328 crc32(in_out, tmp1, 1); 9329 incl(in1); 9330 incl(tmp2); 9331 jmp(L_byteByByte); 9332 9333 BIND(L_exit); 9334 } 9335 #endif // LP64 9336 #undef BIND 9337 #undef BLOCK_COMMENT 9338 9339 // Compress char[] array to byte[]. 9340 // Intrinsic for java.lang.StringUTF16.compress(char[] src, int srcOff, byte[] dst, int dstOff, int len) 9341 // Return the array length if every element in array can be encoded, 9342 // otherwise, the index of first non-latin1 (> 0xff) character. 9343 // @IntrinsicCandidate 9344 // public static int compress(char[] src, int srcOff, byte[] dst, int dstOff, int len) { 9345 // for (int i = 0; i < len; i++) { 9346 // char c = src[srcOff]; 9347 // if (c > 0xff) { 9348 // return i; // return index of non-latin1 char 9349 // } 9350 // dst[dstOff] = (byte)c; 9351 // srcOff++; 9352 // dstOff++; 9353 // } 9354 // return len; 9355 // } 9356 void MacroAssembler::char_array_compress(Register src, Register dst, Register len, 9357 XMMRegister tmp1Reg, XMMRegister tmp2Reg, 9358 XMMRegister tmp3Reg, XMMRegister tmp4Reg, 9359 Register tmp5, Register result, KRegister mask1, KRegister mask2) { 9360 Label copy_chars_loop, done, reset_sp, copy_tail; 9361 9362 // rsi: src 9363 // rdi: dst 9364 // rdx: len 9365 // rcx: tmp5 9366 // rax: result 9367 9368 // rsi holds start addr of source char[] to be compressed 9369 // rdi holds start addr of destination byte[] 9370 // rdx holds length 9371 9372 assert(len != result, ""); 9373 9374 // save length for return 9375 movl(result, len); 9376 9377 if ((AVX3Threshold == 0) && (UseAVX > 2) && // AVX512 9378 VM_Version::supports_avx512vlbw() && 9379 VM_Version::supports_bmi2()) { 9380 9381 Label copy_32_loop, copy_loop_tail, below_threshold, reset_for_copy_tail; 9382 9383 // alignment 9384 Label post_alignment; 9385 9386 // if length of the string is less than 32, handle it the old fashioned way 9387 testl(len, -32); 9388 jcc(Assembler::zero, below_threshold); 9389 9390 // First check whether a character is compressible ( <= 0xFF). 9391 // Create mask to test for Unicode chars inside zmm vector 9392 movl(tmp5, 0x00FF); 9393 evpbroadcastw(tmp2Reg, tmp5, Assembler::AVX_512bit); 9394 9395 testl(len, -64); 9396 jccb(Assembler::zero, post_alignment); 9397 9398 movl(tmp5, dst); 9399 andl(tmp5, (32 - 1)); 9400 negl(tmp5); 9401 andl(tmp5, (32 - 1)); 9402 9403 // bail out when there is nothing to be done 9404 testl(tmp5, 0xFFFFFFFF); 9405 jccb(Assembler::zero, post_alignment); 9406 9407 // ~(~0 << len), where len is the # of remaining elements to process 9408 movl(len, 0xFFFFFFFF); 9409 shlxl(len, len, tmp5); 9410 notl(len); 9411 kmovdl(mask2, len); 9412 movl(len, result); 9413 9414 evmovdquw(tmp1Reg, mask2, Address(src, 0), /*merge*/ false, Assembler::AVX_512bit); 9415 evpcmpw(mask1, mask2, tmp1Reg, tmp2Reg, Assembler::le, /*signed*/ false, Assembler::AVX_512bit); 9416 ktestd(mask1, mask2); 9417 jcc(Assembler::carryClear, copy_tail); 9418 9419 evpmovwb(Address(dst, 0), mask2, tmp1Reg, Assembler::AVX_512bit); 9420 9421 addptr(src, tmp5); 9422 addptr(src, tmp5); 9423 addptr(dst, tmp5); 9424 subl(len, tmp5); 9425 9426 bind(post_alignment); 9427 // end of alignment 9428 9429 movl(tmp5, len); 9430 andl(tmp5, (32 - 1)); // tail count (in chars) 9431 andl(len, ~(32 - 1)); // vector count (in chars) 9432 jccb(Assembler::zero, copy_loop_tail); 9433 9434 lea(src, Address(src, len, Address::times_2)); 9435 lea(dst, Address(dst, len, Address::times_1)); 9436 negptr(len); 9437 9438 bind(copy_32_loop); 9439 evmovdquw(tmp1Reg, Address(src, len, Address::times_2), Assembler::AVX_512bit); 9440 evpcmpuw(mask1, tmp1Reg, tmp2Reg, Assembler::le, Assembler::AVX_512bit); 9441 kortestdl(mask1, mask1); 9442 jccb(Assembler::carryClear, reset_for_copy_tail); 9443 9444 // All elements in current processed chunk are valid candidates for 9445 // compression. Write a truncated byte elements to the memory. 9446 evpmovwb(Address(dst, len, Address::times_1), tmp1Reg, Assembler::AVX_512bit); 9447 addptr(len, 32); 9448 jccb(Assembler::notZero, copy_32_loop); 9449 9450 bind(copy_loop_tail); 9451 // bail out when there is nothing to be done 9452 testl(tmp5, 0xFFFFFFFF); 9453 jcc(Assembler::zero, done); 9454 9455 movl(len, tmp5); 9456 9457 // ~(~0 << len), where len is the # of remaining elements to process 9458 movl(tmp5, 0xFFFFFFFF); 9459 shlxl(tmp5, tmp5, len); 9460 notl(tmp5); 9461 9462 kmovdl(mask2, tmp5); 9463 9464 evmovdquw(tmp1Reg, mask2, Address(src, 0), /*merge*/ false, Assembler::AVX_512bit); 9465 evpcmpw(mask1, mask2, tmp1Reg, tmp2Reg, Assembler::le, /*signed*/ false, Assembler::AVX_512bit); 9466 ktestd(mask1, mask2); 9467 jcc(Assembler::carryClear, copy_tail); 9468 9469 evpmovwb(Address(dst, 0), mask2, tmp1Reg, Assembler::AVX_512bit); 9470 jmp(done); 9471 9472 bind(reset_for_copy_tail); 9473 lea(src, Address(src, tmp5, Address::times_2)); 9474 lea(dst, Address(dst, tmp5, Address::times_1)); 9475 subptr(len, tmp5); 9476 jmp(copy_chars_loop); 9477 9478 bind(below_threshold); 9479 } 9480 9481 if (UseSSE42Intrinsics) { 9482 Label copy_32_loop, copy_16, copy_tail_sse, reset_for_copy_tail; 9483 9484 // vectored compression 9485 testl(len, 0xfffffff8); 9486 jcc(Assembler::zero, copy_tail); 9487 9488 movl(tmp5, 0xff00ff00); // create mask to test for Unicode chars in vectors 9489 movdl(tmp1Reg, tmp5); 9490 pshufd(tmp1Reg, tmp1Reg, 0); // store Unicode mask in tmp1Reg 9491 9492 andl(len, 0xfffffff0); 9493 jccb(Assembler::zero, copy_16); 9494 9495 // compress 16 chars per iter 9496 pxor(tmp4Reg, tmp4Reg); 9497 9498 lea(src, Address(src, len, Address::times_2)); 9499 lea(dst, Address(dst, len, Address::times_1)); 9500 negptr(len); 9501 9502 bind(copy_32_loop); 9503 movdqu(tmp2Reg, Address(src, len, Address::times_2)); // load 1st 8 characters 9504 por(tmp4Reg, tmp2Reg); 9505 movdqu(tmp3Reg, Address(src, len, Address::times_2, 16)); // load next 8 characters 9506 por(tmp4Reg, tmp3Reg); 9507 ptest(tmp4Reg, tmp1Reg); // check for Unicode chars in next vector 9508 jccb(Assembler::notZero, reset_for_copy_tail); 9509 packuswb(tmp2Reg, tmp3Reg); // only ASCII chars; compress each to 1 byte 9510 movdqu(Address(dst, len, Address::times_1), tmp2Reg); 9511 addptr(len, 16); 9512 jccb(Assembler::notZero, copy_32_loop); 9513 9514 // compress next vector of 8 chars (if any) 9515 bind(copy_16); 9516 // len = 0 9517 testl(result, 0x00000008); // check if there's a block of 8 chars to compress 9518 jccb(Assembler::zero, copy_tail_sse); 9519 9520 pxor(tmp3Reg, tmp3Reg); 9521 9522 movdqu(tmp2Reg, Address(src, 0)); 9523 ptest(tmp2Reg, tmp1Reg); // check for Unicode chars in vector 9524 jccb(Assembler::notZero, reset_for_copy_tail); 9525 packuswb(tmp2Reg, tmp3Reg); // only LATIN1 chars; compress each to 1 byte 9526 movq(Address(dst, 0), tmp2Reg); 9527 addptr(src, 16); 9528 addptr(dst, 8); 9529 jmpb(copy_tail_sse); 9530 9531 bind(reset_for_copy_tail); 9532 movl(tmp5, result); 9533 andl(tmp5, 0x0000000f); 9534 lea(src, Address(src, tmp5, Address::times_2)); 9535 lea(dst, Address(dst, tmp5, Address::times_1)); 9536 subptr(len, tmp5); 9537 jmpb(copy_chars_loop); 9538 9539 bind(copy_tail_sse); 9540 movl(len, result); 9541 andl(len, 0x00000007); // tail count (in chars) 9542 } 9543 // compress 1 char per iter 9544 bind(copy_tail); 9545 testl(len, len); 9546 jccb(Assembler::zero, done); 9547 lea(src, Address(src, len, Address::times_2)); 9548 lea(dst, Address(dst, len, Address::times_1)); 9549 negptr(len); 9550 9551 bind(copy_chars_loop); 9552 load_unsigned_short(tmp5, Address(src, len, Address::times_2)); 9553 testl(tmp5, 0xff00); // check if Unicode char 9554 jccb(Assembler::notZero, reset_sp); 9555 movb(Address(dst, len, Address::times_1), tmp5); // ASCII char; compress to 1 byte 9556 increment(len); 9557 jccb(Assembler::notZero, copy_chars_loop); 9558 9559 // add len then return (len will be zero if compress succeeded, otherwise negative) 9560 bind(reset_sp); 9561 addl(result, len); 9562 9563 bind(done); 9564 } 9565 9566 // Inflate byte[] array to char[]. 9567 // ..\jdk\src\java.base\share\classes\java\lang\StringLatin1.java 9568 // @IntrinsicCandidate 9569 // private static void inflate(byte[] src, int srcOff, char[] dst, int dstOff, int len) { 9570 // for (int i = 0; i < len; i++) { 9571 // dst[dstOff++] = (char)(src[srcOff++] & 0xff); 9572 // } 9573 // } 9574 void MacroAssembler::byte_array_inflate(Register src, Register dst, Register len, 9575 XMMRegister tmp1, Register tmp2, KRegister mask) { 9576 Label copy_chars_loop, done, below_threshold, avx3_threshold; 9577 // rsi: src 9578 // rdi: dst 9579 // rdx: len 9580 // rcx: tmp2 9581 9582 // rsi holds start addr of source byte[] to be inflated 9583 // rdi holds start addr of destination char[] 9584 // rdx holds length 9585 assert_different_registers(src, dst, len, tmp2); 9586 movl(tmp2, len); 9587 if ((UseAVX > 2) && // AVX512 9588 VM_Version::supports_avx512vlbw() && 9589 VM_Version::supports_bmi2()) { 9590 9591 Label copy_32_loop, copy_tail; 9592 Register tmp3_aliased = len; 9593 9594 // if length of the string is less than 16, handle it in an old fashioned way 9595 testl(len, -16); 9596 jcc(Assembler::zero, below_threshold); 9597 9598 testl(len, -1 * AVX3Threshold); 9599 jcc(Assembler::zero, avx3_threshold); 9600 9601 // In order to use only one arithmetic operation for the main loop we use 9602 // this pre-calculation 9603 andl(tmp2, (32 - 1)); // tail count (in chars), 32 element wide loop 9604 andl(len, -32); // vector count 9605 jccb(Assembler::zero, copy_tail); 9606 9607 lea(src, Address(src, len, Address::times_1)); 9608 lea(dst, Address(dst, len, Address::times_2)); 9609 negptr(len); 9610 9611 9612 // inflate 32 chars per iter 9613 bind(copy_32_loop); 9614 vpmovzxbw(tmp1, Address(src, len, Address::times_1), Assembler::AVX_512bit); 9615 evmovdquw(Address(dst, len, Address::times_2), tmp1, Assembler::AVX_512bit); 9616 addptr(len, 32); 9617 jcc(Assembler::notZero, copy_32_loop); 9618 9619 bind(copy_tail); 9620 // bail out when there is nothing to be done 9621 testl(tmp2, -1); // we don't destroy the contents of tmp2 here 9622 jcc(Assembler::zero, done); 9623 9624 // ~(~0 << length), where length is the # of remaining elements to process 9625 movl(tmp3_aliased, -1); 9626 shlxl(tmp3_aliased, tmp3_aliased, tmp2); 9627 notl(tmp3_aliased); 9628 kmovdl(mask, tmp3_aliased); 9629 evpmovzxbw(tmp1, mask, Address(src, 0), Assembler::AVX_512bit); 9630 evmovdquw(Address(dst, 0), mask, tmp1, /*merge*/ true, Assembler::AVX_512bit); 9631 9632 jmp(done); 9633 bind(avx3_threshold); 9634 } 9635 if (UseSSE42Intrinsics) { 9636 Label copy_16_loop, copy_8_loop, copy_bytes, copy_new_tail, copy_tail; 9637 9638 if (UseAVX > 1) { 9639 andl(tmp2, (16 - 1)); 9640 andl(len, -16); 9641 jccb(Assembler::zero, copy_new_tail); 9642 } else { 9643 andl(tmp2, 0x00000007); // tail count (in chars) 9644 andl(len, 0xfffffff8); // vector count (in chars) 9645 jccb(Assembler::zero, copy_tail); 9646 } 9647 9648 // vectored inflation 9649 lea(src, Address(src, len, Address::times_1)); 9650 lea(dst, Address(dst, len, Address::times_2)); 9651 negptr(len); 9652 9653 if (UseAVX > 1) { 9654 bind(copy_16_loop); 9655 vpmovzxbw(tmp1, Address(src, len, Address::times_1), Assembler::AVX_256bit); 9656 vmovdqu(Address(dst, len, Address::times_2), tmp1); 9657 addptr(len, 16); 9658 jcc(Assembler::notZero, copy_16_loop); 9659 9660 bind(below_threshold); 9661 bind(copy_new_tail); 9662 movl(len, tmp2); 9663 andl(tmp2, 0x00000007); 9664 andl(len, 0xFFFFFFF8); 9665 jccb(Assembler::zero, copy_tail); 9666 9667 pmovzxbw(tmp1, Address(src, 0)); 9668 movdqu(Address(dst, 0), tmp1); 9669 addptr(src, 8); 9670 addptr(dst, 2 * 8); 9671 9672 jmp(copy_tail, true); 9673 } 9674 9675 // inflate 8 chars per iter 9676 bind(copy_8_loop); 9677 pmovzxbw(tmp1, Address(src, len, Address::times_1)); // unpack to 8 words 9678 movdqu(Address(dst, len, Address::times_2), tmp1); 9679 addptr(len, 8); 9680 jcc(Assembler::notZero, copy_8_loop); 9681 9682 bind(copy_tail); 9683 movl(len, tmp2); 9684 9685 cmpl(len, 4); 9686 jccb(Assembler::less, copy_bytes); 9687 9688 movdl(tmp1, Address(src, 0)); // load 4 byte chars 9689 pmovzxbw(tmp1, tmp1); 9690 movq(Address(dst, 0), tmp1); 9691 subptr(len, 4); 9692 addptr(src, 4); 9693 addptr(dst, 8); 9694 9695 bind(copy_bytes); 9696 } else { 9697 bind(below_threshold); 9698 } 9699 9700 testl(len, len); 9701 jccb(Assembler::zero, done); 9702 lea(src, Address(src, len, Address::times_1)); 9703 lea(dst, Address(dst, len, Address::times_2)); 9704 negptr(len); 9705 9706 // inflate 1 char per iter 9707 bind(copy_chars_loop); 9708 load_unsigned_byte(tmp2, Address(src, len, Address::times_1)); // load byte char 9709 movw(Address(dst, len, Address::times_2), tmp2); // inflate byte char to word 9710 increment(len); 9711 jcc(Assembler::notZero, copy_chars_loop); 9712 9713 bind(done); 9714 } 9715 9716 9717 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, bool merge, int vector_len) { 9718 switch(type) { 9719 case T_BYTE: 9720 case T_BOOLEAN: 9721 evmovdqub(dst, kmask, src, merge, vector_len); 9722 break; 9723 case T_CHAR: 9724 case T_SHORT: 9725 evmovdquw(dst, kmask, src, merge, vector_len); 9726 break; 9727 case T_INT: 9728 case T_FLOAT: 9729 evmovdqul(dst, kmask, src, merge, vector_len); 9730 break; 9731 case T_LONG: 9732 case T_DOUBLE: 9733 evmovdquq(dst, kmask, src, merge, vector_len); 9734 break; 9735 default: 9736 fatal("Unexpected type argument %s", type2name(type)); 9737 break; 9738 } 9739 } 9740 9741 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, bool merge, int vector_len) { 9742 switch(type) { 9743 case T_BYTE: 9744 case T_BOOLEAN: 9745 evmovdqub(dst, kmask, src, merge, vector_len); 9746 break; 9747 case T_CHAR: 9748 case T_SHORT: 9749 evmovdquw(dst, kmask, src, merge, vector_len); 9750 break; 9751 case T_INT: 9752 case T_FLOAT: 9753 evmovdqul(dst, kmask, src, merge, vector_len); 9754 break; 9755 case T_LONG: 9756 case T_DOUBLE: 9757 evmovdquq(dst, kmask, src, merge, vector_len); 9758 break; 9759 default: 9760 fatal("Unexpected type argument %s", type2name(type)); 9761 break; 9762 } 9763 } 9764 9765 void MacroAssembler::knot(uint masklen, KRegister dst, KRegister src, KRegister ktmp, Register rtmp) { 9766 switch(masklen) { 9767 case 2: 9768 knotbl(dst, src); 9769 movl(rtmp, 3); 9770 kmovbl(ktmp, rtmp); 9771 kandbl(dst, ktmp, dst); 9772 break; 9773 case 4: 9774 knotbl(dst, src); 9775 movl(rtmp, 15); 9776 kmovbl(ktmp, rtmp); 9777 kandbl(dst, ktmp, dst); 9778 break; 9779 case 8: 9780 knotbl(dst, src); 9781 break; 9782 case 16: 9783 knotwl(dst, src); 9784 break; 9785 case 32: 9786 knotdl(dst, src); 9787 break; 9788 case 64: 9789 knotql(dst, src); 9790 break; 9791 default: 9792 fatal("Unexpected vector length %d", masklen); 9793 break; 9794 } 9795 } 9796 9797 void MacroAssembler::kand(BasicType type, KRegister dst, KRegister src1, KRegister src2) { 9798 switch(type) { 9799 case T_BOOLEAN: 9800 case T_BYTE: 9801 kandbl(dst, src1, src2); 9802 break; 9803 case T_CHAR: 9804 case T_SHORT: 9805 kandwl(dst, src1, src2); 9806 break; 9807 case T_INT: 9808 case T_FLOAT: 9809 kanddl(dst, src1, src2); 9810 break; 9811 case T_LONG: 9812 case T_DOUBLE: 9813 kandql(dst, src1, src2); 9814 break; 9815 default: 9816 fatal("Unexpected type argument %s", type2name(type)); 9817 break; 9818 } 9819 } 9820 9821 void MacroAssembler::kor(BasicType type, KRegister dst, KRegister src1, KRegister src2) { 9822 switch(type) { 9823 case T_BOOLEAN: 9824 case T_BYTE: 9825 korbl(dst, src1, src2); 9826 break; 9827 case T_CHAR: 9828 case T_SHORT: 9829 korwl(dst, src1, src2); 9830 break; 9831 case T_INT: 9832 case T_FLOAT: 9833 kordl(dst, src1, src2); 9834 break; 9835 case T_LONG: 9836 case T_DOUBLE: 9837 korql(dst, src1, src2); 9838 break; 9839 default: 9840 fatal("Unexpected type argument %s", type2name(type)); 9841 break; 9842 } 9843 } 9844 9845 void MacroAssembler::kxor(BasicType type, KRegister dst, KRegister src1, KRegister src2) { 9846 switch(type) { 9847 case T_BOOLEAN: 9848 case T_BYTE: 9849 kxorbl(dst, src1, src2); 9850 break; 9851 case T_CHAR: 9852 case T_SHORT: 9853 kxorwl(dst, src1, src2); 9854 break; 9855 case T_INT: 9856 case T_FLOAT: 9857 kxordl(dst, src1, src2); 9858 break; 9859 case T_LONG: 9860 case T_DOUBLE: 9861 kxorql(dst, src1, src2); 9862 break; 9863 default: 9864 fatal("Unexpected type argument %s", type2name(type)); 9865 break; 9866 } 9867 } 9868 9869 void MacroAssembler::evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9870 switch(type) { 9871 case T_BOOLEAN: 9872 case T_BYTE: 9873 evpermb(dst, mask, nds, src, merge, vector_len); break; 9874 case T_CHAR: 9875 case T_SHORT: 9876 evpermw(dst, mask, nds, src, merge, vector_len); break; 9877 case T_INT: 9878 case T_FLOAT: 9879 evpermd(dst, mask, nds, src, merge, vector_len); break; 9880 case T_LONG: 9881 case T_DOUBLE: 9882 evpermq(dst, mask, nds, src, merge, vector_len); break; 9883 default: 9884 fatal("Unexpected type argument %s", type2name(type)); break; 9885 } 9886 } 9887 9888 void MacroAssembler::evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9889 switch(type) { 9890 case T_BOOLEAN: 9891 case T_BYTE: 9892 evpermb(dst, mask, nds, src, merge, vector_len); break; 9893 case T_CHAR: 9894 case T_SHORT: 9895 evpermw(dst, mask, nds, src, merge, vector_len); break; 9896 case T_INT: 9897 case T_FLOAT: 9898 evpermd(dst, mask, nds, src, merge, vector_len); break; 9899 case T_LONG: 9900 case T_DOUBLE: 9901 evpermq(dst, mask, nds, src, merge, vector_len); break; 9902 default: 9903 fatal("Unexpected type argument %s", type2name(type)); break; 9904 } 9905 } 9906 9907 void MacroAssembler::evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9908 switch(type) { 9909 case T_BYTE: 9910 evpminsb(dst, mask, nds, src, merge, vector_len); break; 9911 case T_SHORT: 9912 evpminsw(dst, mask, nds, src, merge, vector_len); break; 9913 case T_INT: 9914 evpminsd(dst, mask, nds, src, merge, vector_len); break; 9915 case T_LONG: 9916 evpminsq(dst, mask, nds, src, merge, vector_len); break; 9917 default: 9918 fatal("Unexpected type argument %s", type2name(type)); break; 9919 } 9920 } 9921 9922 void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9923 switch(type) { 9924 case T_BYTE: 9925 evpmaxsb(dst, mask, nds, src, merge, vector_len); break; 9926 case T_SHORT: 9927 evpmaxsw(dst, mask, nds, src, merge, vector_len); break; 9928 case T_INT: 9929 evpmaxsd(dst, mask, nds, src, merge, vector_len); break; 9930 case T_LONG: 9931 evpmaxsq(dst, mask, nds, src, merge, vector_len); break; 9932 default: 9933 fatal("Unexpected type argument %s", type2name(type)); break; 9934 } 9935 } 9936 9937 void MacroAssembler::evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9938 switch(type) { 9939 case T_BYTE: 9940 evpminsb(dst, mask, nds, src, merge, vector_len); break; 9941 case T_SHORT: 9942 evpminsw(dst, mask, nds, src, merge, vector_len); break; 9943 case T_INT: 9944 evpminsd(dst, mask, nds, src, merge, vector_len); break; 9945 case T_LONG: 9946 evpminsq(dst, mask, nds, src, merge, vector_len); break; 9947 default: 9948 fatal("Unexpected type argument %s", type2name(type)); break; 9949 } 9950 } 9951 9952 void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9953 switch(type) { 9954 case T_BYTE: 9955 evpmaxsb(dst, mask, nds, src, merge, vector_len); break; 9956 case T_SHORT: 9957 evpmaxsw(dst, mask, nds, src, merge, vector_len); break; 9958 case T_INT: 9959 evpmaxsd(dst, mask, nds, src, merge, vector_len); break; 9960 case T_LONG: 9961 evpmaxsq(dst, mask, nds, src, merge, vector_len); break; 9962 default: 9963 fatal("Unexpected type argument %s", type2name(type)); break; 9964 } 9965 } 9966 9967 void MacroAssembler::evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9968 switch(type) { 9969 case T_INT: 9970 evpxord(dst, mask, nds, src, merge, vector_len); break; 9971 case T_LONG: 9972 evpxorq(dst, mask, nds, src, merge, vector_len); break; 9973 default: 9974 fatal("Unexpected type argument %s", type2name(type)); break; 9975 } 9976 } 9977 9978 void MacroAssembler::evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9979 switch(type) { 9980 case T_INT: 9981 evpxord(dst, mask, nds, src, merge, vector_len); break; 9982 case T_LONG: 9983 evpxorq(dst, mask, nds, src, merge, vector_len); break; 9984 default: 9985 fatal("Unexpected type argument %s", type2name(type)); break; 9986 } 9987 } 9988 9989 void MacroAssembler::evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9990 switch(type) { 9991 case T_INT: 9992 Assembler::evpord(dst, mask, nds, src, merge, vector_len); break; 9993 case T_LONG: 9994 evporq(dst, mask, nds, src, merge, vector_len); break; 9995 default: 9996 fatal("Unexpected type argument %s", type2name(type)); break; 9997 } 9998 } 9999 10000 void MacroAssembler::evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 10001 switch(type) { 10002 case T_INT: 10003 Assembler::evpord(dst, mask, nds, src, merge, vector_len); break; 10004 case T_LONG: 10005 evporq(dst, mask, nds, src, merge, vector_len); break; 10006 default: 10007 fatal("Unexpected type argument %s", type2name(type)); break; 10008 } 10009 } 10010 10011 void MacroAssembler::evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 10012 switch(type) { 10013 case T_INT: 10014 evpandd(dst, mask, nds, src, merge, vector_len); break; 10015 case T_LONG: 10016 evpandq(dst, mask, nds, src, merge, vector_len); break; 10017 default: 10018 fatal("Unexpected type argument %s", type2name(type)); break; 10019 } 10020 } 10021 10022 void MacroAssembler::evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 10023 switch(type) { 10024 case T_INT: 10025 evpandd(dst, mask, nds, src, merge, vector_len); break; 10026 case T_LONG: 10027 evpandq(dst, mask, nds, src, merge, vector_len); break; 10028 default: 10029 fatal("Unexpected type argument %s", type2name(type)); break; 10030 } 10031 } 10032 10033 void MacroAssembler::kortest(uint masklen, KRegister src1, KRegister src2) { 10034 switch(masklen) { 10035 case 8: 10036 kortestbl(src1, src2); 10037 break; 10038 case 16: 10039 kortestwl(src1, src2); 10040 break; 10041 case 32: 10042 kortestdl(src1, src2); 10043 break; 10044 case 64: 10045 kortestql(src1, src2); 10046 break; 10047 default: 10048 fatal("Unexpected mask length %d", masklen); 10049 break; 10050 } 10051 } 10052 10053 10054 void MacroAssembler::ktest(uint masklen, KRegister src1, KRegister src2) { 10055 switch(masklen) { 10056 case 8: 10057 ktestbl(src1, src2); 10058 break; 10059 case 16: 10060 ktestwl(src1, src2); 10061 break; 10062 case 32: 10063 ktestdl(src1, src2); 10064 break; 10065 case 64: 10066 ktestql(src1, src2); 10067 break; 10068 default: 10069 fatal("Unexpected mask length %d", masklen); 10070 break; 10071 } 10072 } 10073 10074 void MacroAssembler::evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc) { 10075 switch(type) { 10076 case T_INT: 10077 evprold(dst, mask, src, shift, merge, vlen_enc); break; 10078 case T_LONG: 10079 evprolq(dst, mask, src, shift, merge, vlen_enc); break; 10080 default: 10081 fatal("Unexpected type argument %s", type2name(type)); break; 10082 break; 10083 } 10084 } 10085 10086 void MacroAssembler::evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc) { 10087 switch(type) { 10088 case T_INT: 10089 evprord(dst, mask, src, shift, merge, vlen_enc); break; 10090 case T_LONG: 10091 evprorq(dst, mask, src, shift, merge, vlen_enc); break; 10092 default: 10093 fatal("Unexpected type argument %s", type2name(type)); break; 10094 } 10095 } 10096 10097 void MacroAssembler::evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc) { 10098 switch(type) { 10099 case T_INT: 10100 evprolvd(dst, mask, src1, src2, merge, vlen_enc); break; 10101 case T_LONG: 10102 evprolvq(dst, mask, src1, src2, merge, vlen_enc); break; 10103 default: 10104 fatal("Unexpected type argument %s", type2name(type)); break; 10105 } 10106 } 10107 10108 void MacroAssembler::evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc) { 10109 switch(type) { 10110 case T_INT: 10111 evprorvd(dst, mask, src1, src2, merge, vlen_enc); break; 10112 case T_LONG: 10113 evprorvq(dst, mask, src1, src2, merge, vlen_enc); break; 10114 default: 10115 fatal("Unexpected type argument %s", type2name(type)); break; 10116 } 10117 } 10118 10119 void MacroAssembler::evpandq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 10120 assert(rscratch != noreg || always_reachable(src), "missing"); 10121 10122 if (reachable(src)) { 10123 evpandq(dst, nds, as_Address(src), vector_len); 10124 } else { 10125 lea(rscratch, src); 10126 evpandq(dst, nds, Address(rscratch, 0), vector_len); 10127 } 10128 } 10129 10130 void MacroAssembler::evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch) { 10131 assert(rscratch != noreg || always_reachable(src), "missing"); 10132 10133 if (reachable(src)) { 10134 Assembler::evpaddq(dst, mask, nds, as_Address(src), merge, vector_len); 10135 } else { 10136 lea(rscratch, src); 10137 Assembler::evpaddq(dst, mask, nds, Address(rscratch, 0), merge, vector_len); 10138 } 10139 } 10140 10141 void MacroAssembler::evporq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 10142 assert(rscratch != noreg || always_reachable(src), "missing"); 10143 10144 if (reachable(src)) { 10145 evporq(dst, nds, as_Address(src), vector_len); 10146 } else { 10147 lea(rscratch, src); 10148 evporq(dst, nds, Address(rscratch, 0), vector_len); 10149 } 10150 } 10151 10152 void MacroAssembler::vpshufb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 10153 assert(rscratch != noreg || always_reachable(src), "missing"); 10154 10155 if (reachable(src)) { 10156 vpshufb(dst, nds, as_Address(src), vector_len); 10157 } else { 10158 lea(rscratch, src); 10159 vpshufb(dst, nds, Address(rscratch, 0), vector_len); 10160 } 10161 } 10162 10163 void MacroAssembler::vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, AddressLiteral src3, int vector_len, Register rscratch) { 10164 assert(rscratch != noreg || always_reachable(src3), "missing"); 10165 10166 if (reachable(src3)) { 10167 vpternlogq(dst, imm8, src2, as_Address(src3), vector_len); 10168 } else { 10169 lea(rscratch, src3); 10170 vpternlogq(dst, imm8, src2, Address(rscratch, 0), vector_len); 10171 } 10172 } 10173 10174 #if COMPILER2_OR_JVMCI 10175 10176 void MacroAssembler::fill_masked(BasicType bt, Address dst, XMMRegister xmm, KRegister mask, 10177 Register length, Register temp, int vec_enc) { 10178 // Computing mask for predicated vector store. 10179 movptr(temp, -1); 10180 bzhiq(temp, temp, length); 10181 kmov(mask, temp); 10182 evmovdqu(bt, mask, dst, xmm, true, vec_enc); 10183 } 10184 10185 // Set memory operation for length "less than" 64 bytes. 10186 void MacroAssembler::fill64_masked(uint shift, Register dst, int disp, 10187 XMMRegister xmm, KRegister mask, Register length, 10188 Register temp, bool use64byteVector) { 10189 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 10190 const BasicType type[] = { T_BYTE, T_SHORT, T_INT, T_LONG}; 10191 if (!use64byteVector) { 10192 fill32(dst, disp, xmm); 10193 subptr(length, 32 >> shift); 10194 fill32_masked(shift, dst, disp + 32, xmm, mask, length, temp); 10195 } else { 10196 assert(MaxVectorSize == 64, "vector length != 64"); 10197 fill_masked(type[shift], Address(dst, disp), xmm, mask, length, temp, Assembler::AVX_512bit); 10198 } 10199 } 10200 10201 10202 void MacroAssembler::fill32_masked(uint shift, Register dst, int disp, 10203 XMMRegister xmm, KRegister mask, Register length, 10204 Register temp) { 10205 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 10206 const BasicType type[] = { T_BYTE, T_SHORT, T_INT, T_LONG}; 10207 fill_masked(type[shift], Address(dst, disp), xmm, mask, length, temp, Assembler::AVX_256bit); 10208 } 10209 10210 10211 void MacroAssembler::fill32(Address dst, XMMRegister xmm) { 10212 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 10213 vmovdqu(dst, xmm); 10214 } 10215 10216 void MacroAssembler::fill32(Register dst, int disp, XMMRegister xmm) { 10217 fill32(Address(dst, disp), xmm); 10218 } 10219 10220 void MacroAssembler::fill64(Address dst, XMMRegister xmm, bool use64byteVector) { 10221 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 10222 if (!use64byteVector) { 10223 fill32(dst, xmm); 10224 fill32(dst.plus_disp(32), xmm); 10225 } else { 10226 evmovdquq(dst, xmm, Assembler::AVX_512bit); 10227 } 10228 } 10229 10230 void MacroAssembler::fill64(Register dst, int disp, XMMRegister xmm, bool use64byteVector) { 10231 fill64(Address(dst, disp), xmm, use64byteVector); 10232 } 10233 10234 #ifdef _LP64 10235 void MacroAssembler::generate_fill_avx3(BasicType type, Register to, Register value, 10236 Register count, Register rtmp, XMMRegister xtmp) { 10237 Label L_exit; 10238 Label L_fill_start; 10239 Label L_fill_64_bytes; 10240 Label L_fill_96_bytes; 10241 Label L_fill_128_bytes; 10242 Label L_fill_128_bytes_loop; 10243 Label L_fill_128_loop_header; 10244 Label L_fill_128_bytes_loop_header; 10245 Label L_fill_128_bytes_loop_pre_header; 10246 Label L_fill_zmm_sequence; 10247 10248 int shift = -1; 10249 int avx3threshold = VM_Version::avx3_threshold(); 10250 switch(type) { 10251 case T_BYTE: shift = 0; 10252 break; 10253 case T_SHORT: shift = 1; 10254 break; 10255 case T_INT: shift = 2; 10256 break; 10257 /* Uncomment when LONG fill stubs are supported. 10258 case T_LONG: shift = 3; 10259 break; 10260 */ 10261 default: 10262 fatal("Unhandled type: %s\n", type2name(type)); 10263 } 10264 10265 if ((avx3threshold != 0) || (MaxVectorSize == 32)) { 10266 10267 if (MaxVectorSize == 64) { 10268 cmpq(count, avx3threshold >> shift); 10269 jcc(Assembler::greater, L_fill_zmm_sequence); 10270 } 10271 10272 evpbroadcast(type, xtmp, value, Assembler::AVX_256bit); 10273 10274 bind(L_fill_start); 10275 10276 cmpq(count, 32 >> shift); 10277 jccb(Assembler::greater, L_fill_64_bytes); 10278 fill32_masked(shift, to, 0, xtmp, k2, count, rtmp); 10279 jmp(L_exit); 10280 10281 bind(L_fill_64_bytes); 10282 cmpq(count, 64 >> shift); 10283 jccb(Assembler::greater, L_fill_96_bytes); 10284 fill64_masked(shift, to, 0, xtmp, k2, count, rtmp); 10285 jmp(L_exit); 10286 10287 bind(L_fill_96_bytes); 10288 cmpq(count, 96 >> shift); 10289 jccb(Assembler::greater, L_fill_128_bytes); 10290 fill64(to, 0, xtmp); 10291 subq(count, 64 >> shift); 10292 fill32_masked(shift, to, 64, xtmp, k2, count, rtmp); 10293 jmp(L_exit); 10294 10295 bind(L_fill_128_bytes); 10296 cmpq(count, 128 >> shift); 10297 jccb(Assembler::greater, L_fill_128_bytes_loop_pre_header); 10298 fill64(to, 0, xtmp); 10299 fill32(to, 64, xtmp); 10300 subq(count, 96 >> shift); 10301 fill32_masked(shift, to, 96, xtmp, k2, count, rtmp); 10302 jmp(L_exit); 10303 10304 bind(L_fill_128_bytes_loop_pre_header); 10305 { 10306 mov(rtmp, to); 10307 andq(rtmp, 31); 10308 jccb(Assembler::zero, L_fill_128_bytes_loop_header); 10309 negq(rtmp); 10310 addq(rtmp, 32); 10311 mov64(r8, -1L); 10312 bzhiq(r8, r8, rtmp); 10313 kmovql(k2, r8); 10314 evmovdqu(T_BYTE, k2, Address(to, 0), xtmp, true, Assembler::AVX_256bit); 10315 addq(to, rtmp); 10316 shrq(rtmp, shift); 10317 subq(count, rtmp); 10318 } 10319 10320 cmpq(count, 128 >> shift); 10321 jcc(Assembler::less, L_fill_start); 10322 10323 bind(L_fill_128_bytes_loop_header); 10324 subq(count, 128 >> shift); 10325 10326 align32(); 10327 bind(L_fill_128_bytes_loop); 10328 fill64(to, 0, xtmp); 10329 fill64(to, 64, xtmp); 10330 addq(to, 128); 10331 subq(count, 128 >> shift); 10332 jccb(Assembler::greaterEqual, L_fill_128_bytes_loop); 10333 10334 addq(count, 128 >> shift); 10335 jcc(Assembler::zero, L_exit); 10336 jmp(L_fill_start); 10337 } 10338 10339 if (MaxVectorSize == 64) { 10340 // Sequence using 64 byte ZMM register. 10341 Label L_fill_128_bytes_zmm; 10342 Label L_fill_192_bytes_zmm; 10343 Label L_fill_192_bytes_loop_zmm; 10344 Label L_fill_192_bytes_loop_header_zmm; 10345 Label L_fill_192_bytes_loop_pre_header_zmm; 10346 Label L_fill_start_zmm_sequence; 10347 10348 bind(L_fill_zmm_sequence); 10349 evpbroadcast(type, xtmp, value, Assembler::AVX_512bit); 10350 10351 bind(L_fill_start_zmm_sequence); 10352 cmpq(count, 64 >> shift); 10353 jccb(Assembler::greater, L_fill_128_bytes_zmm); 10354 fill64_masked(shift, to, 0, xtmp, k2, count, rtmp, true); 10355 jmp(L_exit); 10356 10357 bind(L_fill_128_bytes_zmm); 10358 cmpq(count, 128 >> shift); 10359 jccb(Assembler::greater, L_fill_192_bytes_zmm); 10360 fill64(to, 0, xtmp, true); 10361 subq(count, 64 >> shift); 10362 fill64_masked(shift, to, 64, xtmp, k2, count, rtmp, true); 10363 jmp(L_exit); 10364 10365 bind(L_fill_192_bytes_zmm); 10366 cmpq(count, 192 >> shift); 10367 jccb(Assembler::greater, L_fill_192_bytes_loop_pre_header_zmm); 10368 fill64(to, 0, xtmp, true); 10369 fill64(to, 64, xtmp, true); 10370 subq(count, 128 >> shift); 10371 fill64_masked(shift, to, 128, xtmp, k2, count, rtmp, true); 10372 jmp(L_exit); 10373 10374 bind(L_fill_192_bytes_loop_pre_header_zmm); 10375 { 10376 movq(rtmp, to); 10377 andq(rtmp, 63); 10378 jccb(Assembler::zero, L_fill_192_bytes_loop_header_zmm); 10379 negq(rtmp); 10380 addq(rtmp, 64); 10381 mov64(r8, -1L); 10382 bzhiq(r8, r8, rtmp); 10383 kmovql(k2, r8); 10384 evmovdqu(T_BYTE, k2, Address(to, 0), xtmp, true, Assembler::AVX_512bit); 10385 addq(to, rtmp); 10386 shrq(rtmp, shift); 10387 subq(count, rtmp); 10388 } 10389 10390 cmpq(count, 192 >> shift); 10391 jcc(Assembler::less, L_fill_start_zmm_sequence); 10392 10393 bind(L_fill_192_bytes_loop_header_zmm); 10394 subq(count, 192 >> shift); 10395 10396 align32(); 10397 bind(L_fill_192_bytes_loop_zmm); 10398 fill64(to, 0, xtmp, true); 10399 fill64(to, 64, xtmp, true); 10400 fill64(to, 128, xtmp, true); 10401 addq(to, 192); 10402 subq(count, 192 >> shift); 10403 jccb(Assembler::greaterEqual, L_fill_192_bytes_loop_zmm); 10404 10405 addq(count, 192 >> shift); 10406 jcc(Assembler::zero, L_exit); 10407 jmp(L_fill_start_zmm_sequence); 10408 } 10409 bind(L_exit); 10410 } 10411 #endif 10412 #endif //COMPILER2_OR_JVMCI 10413 10414 10415 #ifdef _LP64 10416 void MacroAssembler::convert_f2i(Register dst, XMMRegister src) { 10417 Label done; 10418 cvttss2sil(dst, src); 10419 // Conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub 10420 cmpl(dst, 0x80000000); // float_sign_flip 10421 jccb(Assembler::notEqual, done); 10422 subptr(rsp, 8); 10423 movflt(Address(rsp, 0), src); 10424 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::f2i_fixup()))); 10425 pop(dst); 10426 bind(done); 10427 } 10428 10429 void MacroAssembler::convert_d2i(Register dst, XMMRegister src) { 10430 Label done; 10431 cvttsd2sil(dst, src); 10432 // Conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub 10433 cmpl(dst, 0x80000000); // float_sign_flip 10434 jccb(Assembler::notEqual, done); 10435 subptr(rsp, 8); 10436 movdbl(Address(rsp, 0), src); 10437 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::d2i_fixup()))); 10438 pop(dst); 10439 bind(done); 10440 } 10441 10442 void MacroAssembler::convert_f2l(Register dst, XMMRegister src) { 10443 Label done; 10444 cvttss2siq(dst, src); 10445 cmp64(dst, ExternalAddress((address) StubRoutines::x86::double_sign_flip())); 10446 jccb(Assembler::notEqual, done); 10447 subptr(rsp, 8); 10448 movflt(Address(rsp, 0), src); 10449 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::f2l_fixup()))); 10450 pop(dst); 10451 bind(done); 10452 } 10453 10454 void MacroAssembler::round_float(Register dst, XMMRegister src, Register rtmp, Register rcx) { 10455 // Following code is line by line assembly translation rounding algorithm. 10456 // Please refer to java.lang.Math.round(float) algorithm for details. 10457 const int32_t FloatConsts_EXP_BIT_MASK = 0x7F800000; 10458 const int32_t FloatConsts_SIGNIFICAND_WIDTH = 24; 10459 const int32_t FloatConsts_EXP_BIAS = 127; 10460 const int32_t FloatConsts_SIGNIF_BIT_MASK = 0x007FFFFF; 10461 const int32_t MINUS_32 = 0xFFFFFFE0; 10462 Label L_special_case, L_block1, L_exit; 10463 movl(rtmp, FloatConsts_EXP_BIT_MASK); 10464 movdl(dst, src); 10465 andl(dst, rtmp); 10466 sarl(dst, FloatConsts_SIGNIFICAND_WIDTH - 1); 10467 movl(rtmp, FloatConsts_SIGNIFICAND_WIDTH - 2 + FloatConsts_EXP_BIAS); 10468 subl(rtmp, dst); 10469 movl(rcx, rtmp); 10470 movl(dst, MINUS_32); 10471 testl(rtmp, dst); 10472 jccb(Assembler::notEqual, L_special_case); 10473 movdl(dst, src); 10474 andl(dst, FloatConsts_SIGNIF_BIT_MASK); 10475 orl(dst, FloatConsts_SIGNIF_BIT_MASK + 1); 10476 movdl(rtmp, src); 10477 testl(rtmp, rtmp); 10478 jccb(Assembler::greaterEqual, L_block1); 10479 negl(dst); 10480 bind(L_block1); 10481 sarl(dst); 10482 addl(dst, 0x1); 10483 sarl(dst, 0x1); 10484 jmp(L_exit); 10485 bind(L_special_case); 10486 convert_f2i(dst, src); 10487 bind(L_exit); 10488 } 10489 10490 void MacroAssembler::round_double(Register dst, XMMRegister src, Register rtmp, Register rcx) { 10491 // Following code is line by line assembly translation rounding algorithm. 10492 // Please refer to java.lang.Math.round(double) algorithm for details. 10493 const int64_t DoubleConsts_EXP_BIT_MASK = 0x7FF0000000000000L; 10494 const int64_t DoubleConsts_SIGNIFICAND_WIDTH = 53; 10495 const int64_t DoubleConsts_EXP_BIAS = 1023; 10496 const int64_t DoubleConsts_SIGNIF_BIT_MASK = 0x000FFFFFFFFFFFFFL; 10497 const int64_t MINUS_64 = 0xFFFFFFFFFFFFFFC0L; 10498 Label L_special_case, L_block1, L_exit; 10499 mov64(rtmp, DoubleConsts_EXP_BIT_MASK); 10500 movq(dst, src); 10501 andq(dst, rtmp); 10502 sarq(dst, DoubleConsts_SIGNIFICAND_WIDTH - 1); 10503 mov64(rtmp, DoubleConsts_SIGNIFICAND_WIDTH - 2 + DoubleConsts_EXP_BIAS); 10504 subq(rtmp, dst); 10505 movq(rcx, rtmp); 10506 mov64(dst, MINUS_64); 10507 testq(rtmp, dst); 10508 jccb(Assembler::notEqual, L_special_case); 10509 movq(dst, src); 10510 mov64(rtmp, DoubleConsts_SIGNIF_BIT_MASK); 10511 andq(dst, rtmp); 10512 mov64(rtmp, DoubleConsts_SIGNIF_BIT_MASK + 1); 10513 orq(dst, rtmp); 10514 movq(rtmp, src); 10515 testq(rtmp, rtmp); 10516 jccb(Assembler::greaterEqual, L_block1); 10517 negq(dst); 10518 bind(L_block1); 10519 sarq(dst); 10520 addq(dst, 0x1); 10521 sarq(dst, 0x1); 10522 jmp(L_exit); 10523 bind(L_special_case); 10524 convert_d2l(dst, src); 10525 bind(L_exit); 10526 } 10527 10528 void MacroAssembler::convert_d2l(Register dst, XMMRegister src) { 10529 Label done; 10530 cvttsd2siq(dst, src); 10531 cmp64(dst, ExternalAddress((address) StubRoutines::x86::double_sign_flip())); 10532 jccb(Assembler::notEqual, done); 10533 subptr(rsp, 8); 10534 movdbl(Address(rsp, 0), src); 10535 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::d2l_fixup()))); 10536 pop(dst); 10537 bind(done); 10538 } 10539 10540 void MacroAssembler::cache_wb(Address line) 10541 { 10542 // 64 bit cpus always support clflush 10543 assert(VM_Version::supports_clflush(), "clflush should be available"); 10544 bool optimized = VM_Version::supports_clflushopt(); 10545 bool no_evict = VM_Version::supports_clwb(); 10546 10547 // prefer clwb (writeback without evict) otherwise 10548 // prefer clflushopt (potentially parallel writeback with evict) 10549 // otherwise fallback on clflush (serial writeback with evict) 10550 10551 if (optimized) { 10552 if (no_evict) { 10553 clwb(line); 10554 } else { 10555 clflushopt(line); 10556 } 10557 } else { 10558 // no need for fence when using CLFLUSH 10559 clflush(line); 10560 } 10561 } 10562 10563 void MacroAssembler::cache_wbsync(bool is_pre) 10564 { 10565 assert(VM_Version::supports_clflush(), "clflush should be available"); 10566 bool optimized = VM_Version::supports_clflushopt(); 10567 bool no_evict = VM_Version::supports_clwb(); 10568 10569 // pick the correct implementation 10570 10571 if (!is_pre && (optimized || no_evict)) { 10572 // need an sfence for post flush when using clflushopt or clwb 10573 // otherwise no no need for any synchroniaztion 10574 10575 sfence(); 10576 } 10577 } 10578 10579 #endif // _LP64 10580 10581 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) { 10582 switch (cond) { 10583 // Note some conditions are synonyms for others 10584 case Assembler::zero: return Assembler::notZero; 10585 case Assembler::notZero: return Assembler::zero; 10586 case Assembler::less: return Assembler::greaterEqual; 10587 case Assembler::lessEqual: return Assembler::greater; 10588 case Assembler::greater: return Assembler::lessEqual; 10589 case Assembler::greaterEqual: return Assembler::less; 10590 case Assembler::below: return Assembler::aboveEqual; 10591 case Assembler::belowEqual: return Assembler::above; 10592 case Assembler::above: return Assembler::belowEqual; 10593 case Assembler::aboveEqual: return Assembler::below; 10594 case Assembler::overflow: return Assembler::noOverflow; 10595 case Assembler::noOverflow: return Assembler::overflow; 10596 case Assembler::negative: return Assembler::positive; 10597 case Assembler::positive: return Assembler::negative; 10598 case Assembler::parity: return Assembler::noParity; 10599 case Assembler::noParity: return Assembler::parity; 10600 } 10601 ShouldNotReachHere(); return Assembler::overflow; 10602 } 10603 10604 SkipIfEqual::SkipIfEqual( 10605 MacroAssembler* masm, const bool* flag_addr, bool value, Register rscratch) { 10606 _masm = masm; 10607 _masm->cmp8(ExternalAddress((address)flag_addr), value, rscratch); 10608 _masm->jcc(Assembler::equal, _label); 10609 } 10610 10611 SkipIfEqual::~SkipIfEqual() { 10612 _masm->bind(_label); 10613 } 10614 10615 // 32-bit Windows has its own fast-path implementation 10616 // of get_thread 10617 #if !defined(WIN32) || defined(_LP64) 10618 10619 // This is simply a call to Thread::current() 10620 void MacroAssembler::get_thread(Register thread) { 10621 if (thread != rax) { 10622 push(rax); 10623 } 10624 LP64_ONLY(push(rdi);) 10625 LP64_ONLY(push(rsi);) 10626 push(rdx); 10627 push(rcx); 10628 #ifdef _LP64 10629 push(r8); 10630 push(r9); 10631 push(r10); 10632 push(r11); 10633 #endif 10634 10635 MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, Thread::current), 0); 10636 10637 #ifdef _LP64 10638 pop(r11); 10639 pop(r10); 10640 pop(r9); 10641 pop(r8); 10642 #endif 10643 pop(rcx); 10644 pop(rdx); 10645 LP64_ONLY(pop(rsi);) 10646 LP64_ONLY(pop(rdi);) 10647 if (thread != rax) { 10648 mov(thread, rax); 10649 pop(rax); 10650 } 10651 } 10652 10653 10654 #endif // !WIN32 || _LP64 10655 10656 void MacroAssembler::check_stack_alignment(Register sp, const char* msg, unsigned bias, Register tmp) { 10657 Label L_stack_ok; 10658 if (bias == 0) { 10659 testptr(sp, 2 * wordSize - 1); 10660 } else { 10661 // lea(tmp, Address(rsp, bias); 10662 mov(tmp, sp); 10663 addptr(tmp, bias); 10664 testptr(tmp, 2 * wordSize - 1); 10665 } 10666 jcc(Assembler::equal, L_stack_ok); 10667 block_comment(msg); 10668 stop(msg); 10669 bind(L_stack_ok); 10670 } 10671 10672 // Implements lightweight-locking. 10673 // 10674 // obj: the object to be locked 10675 // reg_rax: rax 10676 // thread: the thread which attempts to lock obj 10677 // tmp: a temporary register 10678 void MacroAssembler::lightweight_lock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow) { 10679 assert(reg_rax == rax, ""); 10680 assert_different_registers(obj, reg_rax, thread, tmp); 10681 10682 Label push; 10683 const Register top = tmp; 10684 10685 // Preload the markWord. It is important that this is the first 10686 // instruction emitted as it is part of C1's null check semantics. 10687 movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes())); 10688 10689 // Load top. 10690 movl(top, Address(thread, JavaThread::lock_stack_top_offset())); 10691 10692 // Check if the lock-stack is full. 10693 cmpl(top, LockStack::end_offset()); 10694 jcc(Assembler::greaterEqual, slow); 10695 10696 // Check for recursion. 10697 cmpptr(obj, Address(thread, top, Address::times_1, -oopSize)); 10698 jcc(Assembler::equal, push); 10699 10700 // Check header for monitor (0b10). 10701 testptr(reg_rax, markWord::monitor_value); 10702 jcc(Assembler::notZero, slow); 10703 10704 // Try to lock. Transition lock bits 0b01 => 0b00 10705 movptr(tmp, reg_rax); 10706 andptr(tmp, ~(int32_t)markWord::unlocked_value); 10707 orptr(reg_rax, markWord::unlocked_value); 10708 if (EnableValhalla) { 10709 // Mask inline_type bit such that we go to the slow path if object is an inline type 10710 andptr(reg_rax, ~((int) markWord::inline_type_bit_in_place)); 10711 } 10712 lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes())); 10713 jcc(Assembler::notEqual, slow); 10714 10715 // Restore top, CAS clobbers register. 10716 movl(top, Address(thread, JavaThread::lock_stack_top_offset())); 10717 10718 bind(push); 10719 // After successful lock, push object on lock-stack. 10720 movptr(Address(thread, top), obj); 10721 incrementl(top, oopSize); 10722 movl(Address(thread, JavaThread::lock_stack_top_offset()), top); 10723 } 10724 10725 // Implements lightweight-unlocking. 10726 // 10727 // obj: the object to be unlocked 10728 // reg_rax: rax 10729 // thread: the thread 10730 // tmp: a temporary register 10731 // 10732 // x86_32 Note: reg_rax and thread may alias each other due to limited register 10733 // availiability. 10734 void MacroAssembler::lightweight_unlock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow) { 10735 assert(reg_rax == rax, ""); 10736 assert_different_registers(obj, reg_rax, tmp); 10737 LP64_ONLY(assert_different_registers(obj, reg_rax, thread, tmp);) 10738 10739 Label unlocked, push_and_slow; 10740 const Register top = tmp; 10741 10742 // Check if obj is top of lock-stack. 10743 movl(top, Address(thread, JavaThread::lock_stack_top_offset())); 10744 cmpptr(obj, Address(thread, top, Address::times_1, -oopSize)); 10745 jcc(Assembler::notEqual, slow); 10746 10747 // Pop lock-stack. 10748 DEBUG_ONLY(movptr(Address(thread, top, Address::times_1, -oopSize), 0);) 10749 subl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize); 10750 10751 // Check if recursive. 10752 cmpptr(obj, Address(thread, top, Address::times_1, -2 * oopSize)); 10753 jcc(Assembler::equal, unlocked); 10754 10755 // Not recursive. Check header for monitor (0b10). 10756 movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes())); 10757 testptr(reg_rax, markWord::monitor_value); 10758 jcc(Assembler::notZero, push_and_slow); 10759 10760 #ifdef ASSERT 10761 // Check header not unlocked (0b01). 10762 Label not_unlocked; 10763 testptr(reg_rax, markWord::unlocked_value); 10764 jcc(Assembler::zero, not_unlocked); 10765 stop("lightweight_unlock already unlocked"); 10766 bind(not_unlocked); 10767 #endif 10768 10769 // Try to unlock. Transition lock bits 0b00 => 0b01 10770 movptr(tmp, reg_rax); 10771 orptr(tmp, markWord::unlocked_value); 10772 lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes())); 10773 jcc(Assembler::equal, unlocked); 10774 10775 bind(push_and_slow); 10776 // Restore lock-stack and handle the unlock in runtime. 10777 if (thread == reg_rax) { 10778 // On x86_32 we may lose the thread. 10779 get_thread(thread); 10780 } 10781 #ifdef ASSERT 10782 movl(top, Address(thread, JavaThread::lock_stack_top_offset())); 10783 movptr(Address(thread, top), obj); 10784 #endif 10785 addl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize); 10786 jmp(slow); 10787 10788 bind(unlocked); 10789 }