1 /* 2 * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "asm/assembler.inline.hpp" 28 #include "compiler/compiler_globals.hpp" 29 #include "compiler/disassembler.hpp" 30 #include "ci/ciInlineKlass.hpp" 31 #include "crc32c.h" 32 #include "gc/shared/barrierSet.hpp" 33 #include "gc/shared/barrierSetAssembler.hpp" 34 #include "gc/shared/collectedHeap.inline.hpp" 35 #include "gc/shared/tlab_globals.hpp" 36 #include "interpreter/bytecodeHistogram.hpp" 37 #include "interpreter/interpreter.hpp" 38 #include "jvm.h" 39 #include "memory/resourceArea.hpp" 40 #include "memory/universe.hpp" 41 #include "oops/accessDecorators.hpp" 42 #include "oops/compressedKlass.inline.hpp" 43 #include "oops/compressedOops.inline.hpp" 44 #include "oops/klass.inline.hpp" 45 #include "oops/resolvedFieldEntry.hpp" 46 #include "prims/methodHandles.hpp" 47 #include "runtime/continuation.hpp" 48 #include "runtime/interfaceSupport.inline.hpp" 49 #include "runtime/javaThread.hpp" 50 #include "runtime/jniHandles.hpp" 51 #include "runtime/objectMonitor.hpp" 52 #include "runtime/os.hpp" 53 #include "runtime/safepoint.hpp" 54 #include "runtime/safepointMechanism.hpp" 55 #include "runtime/sharedRuntime.hpp" 56 #include "runtime/signature_cc.hpp" 57 #include "runtime/stubRoutines.hpp" 58 #include "utilities/checkedCast.hpp" 59 #include "utilities/macros.hpp" 60 #include "vmreg_x86.inline.hpp" 61 #ifdef COMPILER2 62 #include "opto/output.hpp" 63 #endif 64 65 #ifdef PRODUCT 66 #define BLOCK_COMMENT(str) /* nothing */ 67 #define STOP(error) stop(error) 68 #else 69 #define BLOCK_COMMENT(str) block_comment(str) 70 #define STOP(error) block_comment(error); stop(error) 71 #endif 72 73 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 74 75 #ifdef ASSERT 76 bool AbstractAssembler::pd_check_instruction_mark() { return true; } 77 #endif 78 79 static const Assembler::Condition reverse[] = { 80 Assembler::noOverflow /* overflow = 0x0 */ , 81 Assembler::overflow /* noOverflow = 0x1 */ , 82 Assembler::aboveEqual /* carrySet = 0x2, below = 0x2 */ , 83 Assembler::below /* aboveEqual = 0x3, carryClear = 0x3 */ , 84 Assembler::notZero /* zero = 0x4, equal = 0x4 */ , 85 Assembler::zero /* notZero = 0x5, notEqual = 0x5 */ , 86 Assembler::above /* belowEqual = 0x6 */ , 87 Assembler::belowEqual /* above = 0x7 */ , 88 Assembler::positive /* negative = 0x8 */ , 89 Assembler::negative /* positive = 0x9 */ , 90 Assembler::noParity /* parity = 0xa */ , 91 Assembler::parity /* noParity = 0xb */ , 92 Assembler::greaterEqual /* less = 0xc */ , 93 Assembler::less /* greaterEqual = 0xd */ , 94 Assembler::greater /* lessEqual = 0xe */ , 95 Assembler::lessEqual /* greater = 0xf, */ 96 97 }; 98 99 100 // Implementation of MacroAssembler 101 102 // First all the versions that have distinct versions depending on 32/64 bit 103 // Unless the difference is trivial (1 line or so). 104 105 #ifndef _LP64 106 107 // 32bit versions 108 109 Address MacroAssembler::as_Address(AddressLiteral adr) { 110 return Address(adr.target(), adr.rspec()); 111 } 112 113 Address MacroAssembler::as_Address(ArrayAddress adr, Register rscratch) { 114 assert(rscratch == noreg, ""); 115 return Address::make_array(adr); 116 } 117 118 void MacroAssembler::call_VM_leaf_base(address entry_point, 119 int number_of_arguments) { 120 call(RuntimeAddress(entry_point)); 121 increment(rsp, number_of_arguments * wordSize); 122 } 123 124 void MacroAssembler::cmpklass(Address src1, Metadata* obj) { 125 cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate()); 126 } 127 128 129 void MacroAssembler::cmpklass(Register src1, Metadata* obj) { 130 cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate()); 131 } 132 133 void MacroAssembler::cmpoop(Address src1, jobject obj) { 134 cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate()); 135 } 136 137 void MacroAssembler::cmpoop(Register src1, jobject obj, Register rscratch) { 138 assert(rscratch == noreg, "redundant"); 139 cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate()); 140 } 141 142 void MacroAssembler::extend_sign(Register hi, Register lo) { 143 // According to Intel Doc. AP-526, "Integer Divide", p.18. 144 if (VM_Version::is_P6() && hi == rdx && lo == rax) { 145 cdql(); 146 } else { 147 movl(hi, lo); 148 sarl(hi, 31); 149 } 150 } 151 152 void MacroAssembler::jC2(Register tmp, Label& L) { 153 // set parity bit if FPU flag C2 is set (via rax) 154 save_rax(tmp); 155 fwait(); fnstsw_ax(); 156 sahf(); 157 restore_rax(tmp); 158 // branch 159 jcc(Assembler::parity, L); 160 } 161 162 void MacroAssembler::jnC2(Register tmp, Label& L) { 163 // set parity bit if FPU flag C2 is set (via rax) 164 save_rax(tmp); 165 fwait(); fnstsw_ax(); 166 sahf(); 167 restore_rax(tmp); 168 // branch 169 jcc(Assembler::noParity, L); 170 } 171 172 // 32bit can do a case table jump in one instruction but we no longer allow the base 173 // to be installed in the Address class 174 void MacroAssembler::jump(ArrayAddress entry, Register rscratch) { 175 assert(rscratch == noreg, "not needed"); 176 jmp(as_Address(entry, noreg)); 177 } 178 179 // Note: y_lo will be destroyed 180 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) { 181 // Long compare for Java (semantics as described in JVM spec.) 182 Label high, low, done; 183 184 cmpl(x_hi, y_hi); 185 jcc(Assembler::less, low); 186 jcc(Assembler::greater, high); 187 // x_hi is the return register 188 xorl(x_hi, x_hi); 189 cmpl(x_lo, y_lo); 190 jcc(Assembler::below, low); 191 jcc(Assembler::equal, done); 192 193 bind(high); 194 xorl(x_hi, x_hi); 195 increment(x_hi); 196 jmp(done); 197 198 bind(low); 199 xorl(x_hi, x_hi); 200 decrementl(x_hi); 201 202 bind(done); 203 } 204 205 void MacroAssembler::lea(Register dst, AddressLiteral src) { 206 mov_literal32(dst, (int32_t)src.target(), src.rspec()); 207 } 208 209 void MacroAssembler::lea(Address dst, AddressLiteral adr, Register rscratch) { 210 assert(rscratch == noreg, "not needed"); 211 212 // leal(dst, as_Address(adr)); 213 // see note in movl as to why we must use a move 214 mov_literal32(dst, (int32_t)adr.target(), adr.rspec()); 215 } 216 217 void MacroAssembler::leave() { 218 mov(rsp, rbp); 219 pop(rbp); 220 } 221 222 void MacroAssembler::lmul(int x_rsp_offset, int y_rsp_offset) { 223 // Multiplication of two Java long values stored on the stack 224 // as illustrated below. Result is in rdx:rax. 225 // 226 // rsp ---> [ ?? ] \ \ 227 // .... | y_rsp_offset | 228 // [ y_lo ] / (in bytes) | x_rsp_offset 229 // [ y_hi ] | (in bytes) 230 // .... | 231 // [ x_lo ] / 232 // [ x_hi ] 233 // .... 234 // 235 // Basic idea: lo(result) = lo(x_lo * y_lo) 236 // hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) + lo(x_lo * y_hi) 237 Address x_hi(rsp, x_rsp_offset + wordSize); Address x_lo(rsp, x_rsp_offset); 238 Address y_hi(rsp, y_rsp_offset + wordSize); Address y_lo(rsp, y_rsp_offset); 239 Label quick; 240 // load x_hi, y_hi and check if quick 241 // multiplication is possible 242 movl(rbx, x_hi); 243 movl(rcx, y_hi); 244 movl(rax, rbx); 245 orl(rbx, rcx); // rbx, = 0 <=> x_hi = 0 and y_hi = 0 246 jcc(Assembler::zero, quick); // if rbx, = 0 do quick multiply 247 // do full multiplication 248 // 1st step 249 mull(y_lo); // x_hi * y_lo 250 movl(rbx, rax); // save lo(x_hi * y_lo) in rbx, 251 // 2nd step 252 movl(rax, x_lo); 253 mull(rcx); // x_lo * y_hi 254 addl(rbx, rax); // add lo(x_lo * y_hi) to rbx, 255 // 3rd step 256 bind(quick); // note: rbx, = 0 if quick multiply! 257 movl(rax, x_lo); 258 mull(y_lo); // x_lo * y_lo 259 addl(rdx, rbx); // correct hi(x_lo * y_lo) 260 } 261 262 void MacroAssembler::lneg(Register hi, Register lo) { 263 negl(lo); 264 adcl(hi, 0); 265 negl(hi); 266 } 267 268 void MacroAssembler::lshl(Register hi, Register lo) { 269 // Java shift left long support (semantics as described in JVM spec., p.305) 270 // (basic idea for shift counts s >= n: x << s == (x << n) << (s - n)) 271 // shift value is in rcx ! 272 assert(hi != rcx, "must not use rcx"); 273 assert(lo != rcx, "must not use rcx"); 274 const Register s = rcx; // shift count 275 const int n = BitsPerWord; 276 Label L; 277 andl(s, 0x3f); // s := s & 0x3f (s < 0x40) 278 cmpl(s, n); // if (s < n) 279 jcc(Assembler::less, L); // else (s >= n) 280 movl(hi, lo); // x := x << n 281 xorl(lo, lo); 282 // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n! 283 bind(L); // s (mod n) < n 284 shldl(hi, lo); // x := x << s 285 shll(lo); 286 } 287 288 289 void MacroAssembler::lshr(Register hi, Register lo, bool sign_extension) { 290 // Java shift right long support (semantics as described in JVM spec., p.306 & p.310) 291 // (basic idea for shift counts s >= n: x >> s == (x >> n) >> (s - n)) 292 assert(hi != rcx, "must not use rcx"); 293 assert(lo != rcx, "must not use rcx"); 294 const Register s = rcx; // shift count 295 const int n = BitsPerWord; 296 Label L; 297 andl(s, 0x3f); // s := s & 0x3f (s < 0x40) 298 cmpl(s, n); // if (s < n) 299 jcc(Assembler::less, L); // else (s >= n) 300 movl(lo, hi); // x := x >> n 301 if (sign_extension) sarl(hi, 31); 302 else xorl(hi, hi); 303 // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n! 304 bind(L); // s (mod n) < n 305 shrdl(lo, hi); // x := x >> s 306 if (sign_extension) sarl(hi); 307 else shrl(hi); 308 } 309 310 void MacroAssembler::movoop(Register dst, jobject obj) { 311 mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate()); 312 } 313 314 void MacroAssembler::movoop(Address dst, jobject obj, Register rscratch) { 315 assert(rscratch == noreg, "redundant"); 316 mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate()); 317 } 318 319 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { 320 mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate()); 321 } 322 323 void MacroAssembler::mov_metadata(Address dst, Metadata* obj, Register rscratch) { 324 assert(rscratch == noreg, "redundant"); 325 mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate()); 326 } 327 328 void MacroAssembler::movptr(Register dst, AddressLiteral src) { 329 if (src.is_lval()) { 330 mov_literal32(dst, (intptr_t)src.target(), src.rspec()); 331 } else { 332 movl(dst, as_Address(src)); 333 } 334 } 335 336 void MacroAssembler::movptr(ArrayAddress dst, Register src, Register rscratch) { 337 assert(rscratch == noreg, "redundant"); 338 movl(as_Address(dst, noreg), src); 339 } 340 341 void MacroAssembler::movptr(Register dst, ArrayAddress src) { 342 movl(dst, as_Address(src, noreg)); 343 } 344 345 void MacroAssembler::movptr(Address dst, intptr_t src, Register rscratch) { 346 assert(rscratch == noreg, "redundant"); 347 movl(dst, src); 348 } 349 350 void MacroAssembler::pushoop(jobject obj, Register rscratch) { 351 assert(rscratch == noreg, "redundant"); 352 push_literal32((int32_t)obj, oop_Relocation::spec_for_immediate()); 353 } 354 355 void MacroAssembler::pushklass(Metadata* obj, Register rscratch) { 356 assert(rscratch == noreg, "redundant"); 357 push_literal32((int32_t)obj, metadata_Relocation::spec_for_immediate()); 358 } 359 360 void MacroAssembler::pushptr(AddressLiteral src, Register rscratch) { 361 assert(rscratch == noreg, "redundant"); 362 if (src.is_lval()) { 363 push_literal32((int32_t)src.target(), src.rspec()); 364 } else { 365 pushl(as_Address(src)); 366 } 367 } 368 369 static void pass_arg0(MacroAssembler* masm, Register arg) { 370 masm->push(arg); 371 } 372 373 static void pass_arg1(MacroAssembler* masm, Register arg) { 374 masm->push(arg); 375 } 376 377 static void pass_arg2(MacroAssembler* masm, Register arg) { 378 masm->push(arg); 379 } 380 381 static void pass_arg3(MacroAssembler* masm, Register arg) { 382 masm->push(arg); 383 } 384 385 #ifndef PRODUCT 386 extern "C" void findpc(intptr_t x); 387 #endif 388 389 void MacroAssembler::debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg) { 390 // In order to get locks to work, we need to fake a in_VM state 391 JavaThread* thread = JavaThread::current(); 392 JavaThreadState saved_state = thread->thread_state(); 393 thread->set_thread_state(_thread_in_vm); 394 if (ShowMessageBoxOnError) { 395 JavaThread* thread = JavaThread::current(); 396 JavaThreadState saved_state = thread->thread_state(); 397 thread->set_thread_state(_thread_in_vm); 398 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 399 ttyLocker ttyl; 400 BytecodeCounter::print(); 401 } 402 // To see where a verify_oop failed, get $ebx+40/X for this frame. 403 // This is the value of eip which points to where verify_oop will return. 404 if (os::message_box(msg, "Execution stopped, print registers?")) { 405 print_state32(rdi, rsi, rbp, rsp, rbx, rdx, rcx, rax, eip); 406 BREAKPOINT; 407 } 408 } 409 fatal("DEBUG MESSAGE: %s", msg); 410 } 411 412 void MacroAssembler::print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip) { 413 ttyLocker ttyl; 414 DebuggingContext debugging{}; 415 tty->print_cr("eip = 0x%08x", eip); 416 #ifndef PRODUCT 417 if ((WizardMode || Verbose) && PrintMiscellaneous) { 418 tty->cr(); 419 findpc(eip); 420 tty->cr(); 421 } 422 #endif 423 #define PRINT_REG(rax) \ 424 { tty->print("%s = ", #rax); os::print_location(tty, rax); } 425 PRINT_REG(rax); 426 PRINT_REG(rbx); 427 PRINT_REG(rcx); 428 PRINT_REG(rdx); 429 PRINT_REG(rdi); 430 PRINT_REG(rsi); 431 PRINT_REG(rbp); 432 PRINT_REG(rsp); 433 #undef PRINT_REG 434 // Print some words near top of staack. 435 int* dump_sp = (int*) rsp; 436 for (int col1 = 0; col1 < 8; col1++) { 437 tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 438 os::print_location(tty, *dump_sp++); 439 } 440 for (int row = 0; row < 16; row++) { 441 tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 442 for (int col = 0; col < 8; col++) { 443 tty->print(" 0x%08x", *dump_sp++); 444 } 445 tty->cr(); 446 } 447 // Print some instructions around pc: 448 Disassembler::decode((address)eip-64, (address)eip); 449 tty->print_cr("--------"); 450 Disassembler::decode((address)eip, (address)eip+32); 451 } 452 453 void MacroAssembler::stop(const char* msg) { 454 // push address of message 455 ExternalAddress message((address)msg); 456 pushptr(message.addr(), noreg); 457 { Label L; call(L, relocInfo::none); bind(L); } // push eip 458 pusha(); // push registers 459 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32))); 460 hlt(); 461 } 462 463 void MacroAssembler::warn(const char* msg) { 464 push_CPU_state(); 465 466 // push address of message 467 ExternalAddress message((address)msg); 468 pushptr(message.addr(), noreg); 469 470 call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning))); 471 addl(rsp, wordSize); // discard argument 472 pop_CPU_state(); 473 } 474 475 void MacroAssembler::print_state() { 476 { Label L; call(L, relocInfo::none); bind(L); } // push eip 477 pusha(); // push registers 478 479 push_CPU_state(); 480 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::print_state32))); 481 pop_CPU_state(); 482 483 popa(); 484 addl(rsp, wordSize); 485 } 486 487 #else // _LP64 488 489 // 64 bit versions 490 491 Address MacroAssembler::as_Address(AddressLiteral adr) { 492 // amd64 always does this as a pc-rel 493 // we can be absolute or disp based on the instruction type 494 // jmp/call are displacements others are absolute 495 assert(!adr.is_lval(), "must be rval"); 496 assert(reachable(adr), "must be"); 497 return Address(checked_cast<int32_t>(adr.target() - pc()), adr.target(), adr.reloc()); 498 499 } 500 501 Address MacroAssembler::as_Address(ArrayAddress adr, Register rscratch) { 502 AddressLiteral base = adr.base(); 503 lea(rscratch, base); 504 Address index = adr.index(); 505 assert(index._disp == 0, "must not have disp"); // maybe it can? 506 Address array(rscratch, index._index, index._scale, index._disp); 507 return array; 508 } 509 510 void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) { 511 Label L, E; 512 513 #ifdef _WIN64 514 // Windows always allocates space for it's register args 515 assert(num_args <= 4, "only register arguments supported"); 516 subq(rsp, frame::arg_reg_save_area_bytes); 517 #endif 518 519 // Align stack if necessary 520 testl(rsp, 15); 521 jcc(Assembler::zero, L); 522 523 subq(rsp, 8); 524 call(RuntimeAddress(entry_point)); 525 addq(rsp, 8); 526 jmp(E); 527 528 bind(L); 529 call(RuntimeAddress(entry_point)); 530 531 bind(E); 532 533 #ifdef _WIN64 534 // restore stack pointer 535 addq(rsp, frame::arg_reg_save_area_bytes); 536 #endif 537 538 } 539 540 void MacroAssembler::cmp64(Register src1, AddressLiteral src2, Register rscratch) { 541 assert(!src2.is_lval(), "should use cmpptr"); 542 assert(rscratch != noreg || always_reachable(src2), "missing"); 543 544 if (reachable(src2)) { 545 cmpq(src1, as_Address(src2)); 546 } else { 547 lea(rscratch, src2); 548 Assembler::cmpq(src1, Address(rscratch, 0)); 549 } 550 } 551 552 int MacroAssembler::corrected_idivq(Register reg) { 553 // Full implementation of Java ldiv and lrem; checks for special 554 // case as described in JVM spec., p.243 & p.271. The function 555 // returns the (pc) offset of the idivl instruction - may be needed 556 // for implicit exceptions. 557 // 558 // normal case special case 559 // 560 // input : rax: dividend min_long 561 // reg: divisor (may not be eax/edx) -1 562 // 563 // output: rax: quotient (= rax idiv reg) min_long 564 // rdx: remainder (= rax irem reg) 0 565 assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register"); 566 static const int64_t min_long = 0x8000000000000000; 567 Label normal_case, special_case; 568 569 // check for special case 570 cmp64(rax, ExternalAddress((address) &min_long), rdx /*rscratch*/); 571 jcc(Assembler::notEqual, normal_case); 572 xorl(rdx, rdx); // prepare rdx for possible special case (where 573 // remainder = 0) 574 cmpq(reg, -1); 575 jcc(Assembler::equal, special_case); 576 577 // handle normal case 578 bind(normal_case); 579 cdqq(); 580 int idivq_offset = offset(); 581 idivq(reg); 582 583 // normal and special case exit 584 bind(special_case); 585 586 return idivq_offset; 587 } 588 589 void MacroAssembler::decrementq(Register reg, int value) { 590 if (value == min_jint) { subq(reg, value); return; } 591 if (value < 0) { incrementq(reg, -value); return; } 592 if (value == 0) { ; return; } 593 if (value == 1 && UseIncDec) { decq(reg) ; return; } 594 /* else */ { subq(reg, value) ; return; } 595 } 596 597 void MacroAssembler::decrementq(Address dst, int value) { 598 if (value == min_jint) { subq(dst, value); return; } 599 if (value < 0) { incrementq(dst, -value); return; } 600 if (value == 0) { ; return; } 601 if (value == 1 && UseIncDec) { decq(dst) ; return; } 602 /* else */ { subq(dst, value) ; return; } 603 } 604 605 void MacroAssembler::incrementq(AddressLiteral dst, Register rscratch) { 606 assert(rscratch != noreg || always_reachable(dst), "missing"); 607 608 if (reachable(dst)) { 609 incrementq(as_Address(dst)); 610 } else { 611 lea(rscratch, dst); 612 incrementq(Address(rscratch, 0)); 613 } 614 } 615 616 void MacroAssembler::incrementq(Register reg, int value) { 617 if (value == min_jint) { addq(reg, value); return; } 618 if (value < 0) { decrementq(reg, -value); return; } 619 if (value == 0) { ; return; } 620 if (value == 1 && UseIncDec) { incq(reg) ; return; } 621 /* else */ { addq(reg, value) ; return; } 622 } 623 624 void MacroAssembler::incrementq(Address dst, int value) { 625 if (value == min_jint) { addq(dst, value); return; } 626 if (value < 0) { decrementq(dst, -value); return; } 627 if (value == 0) { ; return; } 628 if (value == 1 && UseIncDec) { incq(dst) ; return; } 629 /* else */ { addq(dst, value) ; return; } 630 } 631 632 // 32bit can do a case table jump in one instruction but we no longer allow the base 633 // to be installed in the Address class 634 void MacroAssembler::jump(ArrayAddress entry, Register rscratch) { 635 lea(rscratch, entry.base()); 636 Address dispatch = entry.index(); 637 assert(dispatch._base == noreg, "must be"); 638 dispatch._base = rscratch; 639 jmp(dispatch); 640 } 641 642 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) { 643 ShouldNotReachHere(); // 64bit doesn't use two regs 644 cmpq(x_lo, y_lo); 645 } 646 647 void MacroAssembler::lea(Register dst, AddressLiteral src) { 648 mov_literal64(dst, (intptr_t)src.target(), src.rspec()); 649 } 650 651 void MacroAssembler::lea(Address dst, AddressLiteral adr, Register rscratch) { 652 lea(rscratch, adr); 653 movptr(dst, rscratch); 654 } 655 656 void MacroAssembler::leave() { 657 // %%% is this really better? Why not on 32bit too? 658 emit_int8((unsigned char)0xC9); // LEAVE 659 } 660 661 void MacroAssembler::lneg(Register hi, Register lo) { 662 ShouldNotReachHere(); // 64bit doesn't use two regs 663 negq(lo); 664 } 665 666 void MacroAssembler::movoop(Register dst, jobject obj) { 667 mov_literal64(dst, (intptr_t)obj, oop_Relocation::spec_for_immediate()); 668 } 669 670 void MacroAssembler::movoop(Address dst, jobject obj, Register rscratch) { 671 mov_literal64(rscratch, (intptr_t)obj, oop_Relocation::spec_for_immediate()); 672 movq(dst, rscratch); 673 } 674 675 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { 676 mov_literal64(dst, (intptr_t)obj, metadata_Relocation::spec_for_immediate()); 677 } 678 679 void MacroAssembler::mov_metadata(Address dst, Metadata* obj, Register rscratch) { 680 mov_literal64(rscratch, (intptr_t)obj, metadata_Relocation::spec_for_immediate()); 681 movq(dst, rscratch); 682 } 683 684 void MacroAssembler::movptr(Register dst, AddressLiteral src) { 685 if (src.is_lval()) { 686 mov_literal64(dst, (intptr_t)src.target(), src.rspec()); 687 } else { 688 if (reachable(src)) { 689 movq(dst, as_Address(src)); 690 } else { 691 lea(dst, src); 692 movq(dst, Address(dst, 0)); 693 } 694 } 695 } 696 697 void MacroAssembler::movptr(ArrayAddress dst, Register src, Register rscratch) { 698 movq(as_Address(dst, rscratch), src); 699 } 700 701 void MacroAssembler::movptr(Register dst, ArrayAddress src) { 702 movq(dst, as_Address(src, dst /*rscratch*/)); 703 } 704 705 // src should NEVER be a real pointer. Use AddressLiteral for true pointers 706 void MacroAssembler::movptr(Address dst, intptr_t src, Register rscratch) { 707 if (is_simm32(src)) { 708 movptr(dst, checked_cast<int32_t>(src)); 709 } else { 710 mov64(rscratch, src); 711 movq(dst, rscratch); 712 } 713 } 714 715 void MacroAssembler::pushoop(jobject obj, Register rscratch) { 716 movoop(rscratch, obj); 717 push(rscratch); 718 } 719 720 void MacroAssembler::pushklass(Metadata* obj, Register rscratch) { 721 mov_metadata(rscratch, obj); 722 push(rscratch); 723 } 724 725 void MacroAssembler::pushptr(AddressLiteral src, Register rscratch) { 726 lea(rscratch, src); 727 if (src.is_lval()) { 728 push(rscratch); 729 } else { 730 pushq(Address(rscratch, 0)); 731 } 732 } 733 734 void MacroAssembler::reset_last_Java_frame(bool clear_fp) { 735 reset_last_Java_frame(r15_thread, clear_fp); 736 } 737 738 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 739 Register last_java_fp, 740 address last_java_pc, 741 Register rscratch) { 742 set_last_Java_frame(r15_thread, last_java_sp, last_java_fp, last_java_pc, rscratch); 743 } 744 745 static void pass_arg0(MacroAssembler* masm, Register arg) { 746 if (c_rarg0 != arg ) { 747 masm->mov(c_rarg0, arg); 748 } 749 } 750 751 static void pass_arg1(MacroAssembler* masm, Register arg) { 752 if (c_rarg1 != arg ) { 753 masm->mov(c_rarg1, arg); 754 } 755 } 756 757 static void pass_arg2(MacroAssembler* masm, Register arg) { 758 if (c_rarg2 != arg ) { 759 masm->mov(c_rarg2, arg); 760 } 761 } 762 763 static void pass_arg3(MacroAssembler* masm, Register arg) { 764 if (c_rarg3 != arg ) { 765 masm->mov(c_rarg3, arg); 766 } 767 } 768 769 void MacroAssembler::stop(const char* msg) { 770 if (ShowMessageBoxOnError) { 771 address rip = pc(); 772 pusha(); // get regs on stack 773 lea(c_rarg1, InternalAddress(rip)); 774 movq(c_rarg2, rsp); // pass pointer to regs array 775 } 776 lea(c_rarg0, ExternalAddress((address) msg)); 777 andq(rsp, -16); // align stack as required by ABI 778 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); 779 hlt(); 780 } 781 782 void MacroAssembler::warn(const char* msg) { 783 push(rbp); 784 movq(rbp, rsp); 785 andq(rsp, -16); // align stack as required by push_CPU_state and call 786 push_CPU_state(); // keeps alignment at 16 bytes 787 788 lea(c_rarg0, ExternalAddress((address) msg)); 789 call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning))); 790 791 pop_CPU_state(); 792 mov(rsp, rbp); 793 pop(rbp); 794 } 795 796 void MacroAssembler::print_state() { 797 address rip = pc(); 798 pusha(); // get regs on stack 799 push(rbp); 800 movq(rbp, rsp); 801 andq(rsp, -16); // align stack as required by push_CPU_state and call 802 push_CPU_state(); // keeps alignment at 16 bytes 803 804 lea(c_rarg0, InternalAddress(rip)); 805 lea(c_rarg1, Address(rbp, wordSize)); // pass pointer to regs array 806 call_VM_leaf(CAST_FROM_FN_PTR(address, MacroAssembler::print_state64), c_rarg0, c_rarg1); 807 808 pop_CPU_state(); 809 mov(rsp, rbp); 810 pop(rbp); 811 popa(); 812 } 813 814 #ifndef PRODUCT 815 extern "C" void findpc(intptr_t x); 816 #endif 817 818 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) { 819 // In order to get locks to work, we need to fake a in_VM state 820 if (ShowMessageBoxOnError) { 821 JavaThread* thread = JavaThread::current(); 822 JavaThreadState saved_state = thread->thread_state(); 823 thread->set_thread_state(_thread_in_vm); 824 #ifndef PRODUCT 825 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 826 ttyLocker ttyl; 827 BytecodeCounter::print(); 828 } 829 #endif 830 // To see where a verify_oop failed, get $ebx+40/X for this frame. 831 // XXX correct this offset for amd64 832 // This is the value of eip which points to where verify_oop will return. 833 if (os::message_box(msg, "Execution stopped, print registers?")) { 834 print_state64(pc, regs); 835 BREAKPOINT; 836 } 837 } 838 fatal("DEBUG MESSAGE: %s", msg); 839 } 840 841 void MacroAssembler::print_state64(int64_t pc, int64_t regs[]) { 842 ttyLocker ttyl; 843 DebuggingContext debugging{}; 844 tty->print_cr("rip = 0x%016lx", (intptr_t)pc); 845 #ifndef PRODUCT 846 tty->cr(); 847 findpc(pc); 848 tty->cr(); 849 #endif 850 #define PRINT_REG(rax, value) \ 851 { tty->print("%s = ", #rax); os::print_location(tty, value); } 852 PRINT_REG(rax, regs[15]); 853 PRINT_REG(rbx, regs[12]); 854 PRINT_REG(rcx, regs[14]); 855 PRINT_REG(rdx, regs[13]); 856 PRINT_REG(rdi, regs[8]); 857 PRINT_REG(rsi, regs[9]); 858 PRINT_REG(rbp, regs[10]); 859 // rsp is actually not stored by pusha(), compute the old rsp from regs (rsp after pusha): regs + 16 = old rsp 860 PRINT_REG(rsp, (intptr_t)(®s[16])); 861 PRINT_REG(r8 , regs[7]); 862 PRINT_REG(r9 , regs[6]); 863 PRINT_REG(r10, regs[5]); 864 PRINT_REG(r11, regs[4]); 865 PRINT_REG(r12, regs[3]); 866 PRINT_REG(r13, regs[2]); 867 PRINT_REG(r14, regs[1]); 868 PRINT_REG(r15, regs[0]); 869 #undef PRINT_REG 870 // Print some words near the top of the stack. 871 int64_t* rsp = ®s[16]; 872 int64_t* dump_sp = rsp; 873 for (int col1 = 0; col1 < 8; col1++) { 874 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 875 os::print_location(tty, *dump_sp++); 876 } 877 for (int row = 0; row < 25; row++) { 878 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 879 for (int col = 0; col < 4; col++) { 880 tty->print(" 0x%016lx", (intptr_t)*dump_sp++); 881 } 882 tty->cr(); 883 } 884 // Print some instructions around pc: 885 Disassembler::decode((address)pc-64, (address)pc); 886 tty->print_cr("--------"); 887 Disassembler::decode((address)pc, (address)pc+32); 888 } 889 890 // The java_calling_convention describes stack locations as ideal slots on 891 // a frame with no abi restrictions. Since we must observe abi restrictions 892 // (like the placement of the register window) the slots must be biased by 893 // the following value. 894 static int reg2offset_in(VMReg r) { 895 // Account for saved rbp and return address 896 // This should really be in_preserve_stack_slots 897 return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size; 898 } 899 900 static int reg2offset_out(VMReg r) { 901 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 902 } 903 904 // A long move 905 void MacroAssembler::long_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 906 907 // The calling conventions assures us that each VMregpair is either 908 // all really one physical register or adjacent stack slots. 909 910 if (src.is_single_phys_reg() ) { 911 if (dst.is_single_phys_reg()) { 912 if (dst.first() != src.first()) { 913 mov(dst.first()->as_Register(), src.first()->as_Register()); 914 } 915 } else { 916 assert(dst.is_single_reg(), "not a stack pair: (%s, %s), (%s, %s)", 917 src.first()->name(), src.second()->name(), dst.first()->name(), dst.second()->name()); 918 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_Register()); 919 } 920 } else if (dst.is_single_phys_reg()) { 921 assert(src.is_single_reg(), "not a stack pair"); 922 movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 923 } else { 924 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs"); 925 movq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 926 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 927 } 928 } 929 930 // A double move 931 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 932 933 // The calling conventions assures us that each VMregpair is either 934 // all really one physical register or adjacent stack slots. 935 936 if (src.is_single_phys_reg() ) { 937 if (dst.is_single_phys_reg()) { 938 // In theory these overlap but the ordering is such that this is likely a nop 939 if ( src.first() != dst.first()) { 940 movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister()); 941 } 942 } else { 943 assert(dst.is_single_reg(), "not a stack pair"); 944 movdbl(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_XMMRegister()); 945 } 946 } else if (dst.is_single_phys_reg()) { 947 assert(src.is_single_reg(), "not a stack pair"); 948 movdbl(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 949 } else { 950 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs"); 951 movq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 952 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 953 } 954 } 955 956 957 // A float arg may have to do float reg int reg conversion 958 void MacroAssembler::float_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 959 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move"); 960 961 // The calling conventions assures us that each VMregpair is either 962 // all really one physical register or adjacent stack slots. 963 964 if (src.first()->is_stack()) { 965 if (dst.first()->is_stack()) { 966 movl(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 967 movptr(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 968 } else { 969 // stack to reg 970 assert(dst.first()->is_XMMRegister(), "only expect xmm registers as parameters"); 971 movflt(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 972 } 973 } else if (dst.first()->is_stack()) { 974 // reg to stack 975 assert(src.first()->is_XMMRegister(), "only expect xmm registers as parameters"); 976 movflt(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_XMMRegister()); 977 } else { 978 // reg to reg 979 // In theory these overlap but the ordering is such that this is likely a nop 980 if ( src.first() != dst.first()) { 981 movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister()); 982 } 983 } 984 } 985 986 // On 64 bit we will store integer like items to the stack as 987 // 64 bits items (x86_32/64 abi) even though java would only store 988 // 32bits for a parameter. On 32bit it will simply be 32 bits 989 // So this routine will do 32->32 on 32bit and 32->64 on 64bit 990 void MacroAssembler::move32_64(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 991 if (src.first()->is_stack()) { 992 if (dst.first()->is_stack()) { 993 // stack to stack 994 movslq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 995 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 996 } else { 997 // stack to reg 998 movslq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 999 } 1000 } else if (dst.first()->is_stack()) { 1001 // reg to stack 1002 // Do we really have to sign extend??? 1003 // __ movslq(src.first()->as_Register(), src.first()->as_Register()); 1004 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_Register()); 1005 } else { 1006 // Do we really have to sign extend??? 1007 // __ movslq(dst.first()->as_Register(), src.first()->as_Register()); 1008 if (dst.first() != src.first()) { 1009 movq(dst.first()->as_Register(), src.first()->as_Register()); 1010 } 1011 } 1012 } 1013 1014 void MacroAssembler::move_ptr(VMRegPair src, VMRegPair dst) { 1015 if (src.first()->is_stack()) { 1016 if (dst.first()->is_stack()) { 1017 // stack to stack 1018 movq(rax, Address(rbp, reg2offset_in(src.first()))); 1019 movq(Address(rsp, reg2offset_out(dst.first())), rax); 1020 } else { 1021 // stack to reg 1022 movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()))); 1023 } 1024 } else if (dst.first()->is_stack()) { 1025 // reg to stack 1026 movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register()); 1027 } else { 1028 if (dst.first() != src.first()) { 1029 movq(dst.first()->as_Register(), src.first()->as_Register()); 1030 } 1031 } 1032 } 1033 1034 // An oop arg. Must pass a handle not the oop itself 1035 void MacroAssembler::object_move(OopMap* map, 1036 int oop_handle_offset, 1037 int framesize_in_slots, 1038 VMRegPair src, 1039 VMRegPair dst, 1040 bool is_receiver, 1041 int* receiver_offset) { 1042 1043 // must pass a handle. First figure out the location we use as a handle 1044 1045 Register rHandle = dst.first()->is_stack() ? rax : dst.first()->as_Register(); 1046 1047 // See if oop is null if it is we need no handle 1048 1049 if (src.first()->is_stack()) { 1050 1051 // Oop is already on the stack as an argument 1052 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 1053 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots)); 1054 if (is_receiver) { 1055 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size; 1056 } 1057 1058 cmpptr(Address(rbp, reg2offset_in(src.first())), NULL_WORD); 1059 lea(rHandle, Address(rbp, reg2offset_in(src.first()))); 1060 // conditionally move a null 1061 cmovptr(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first()))); 1062 } else { 1063 1064 // Oop is in a register we must store it to the space we reserve 1065 // on the stack for oop_handles and pass a handle if oop is non-null 1066 1067 const Register rOop = src.first()->as_Register(); 1068 int oop_slot; 1069 if (rOop == j_rarg0) 1070 oop_slot = 0; 1071 else if (rOop == j_rarg1) 1072 oop_slot = 1; 1073 else if (rOop == j_rarg2) 1074 oop_slot = 2; 1075 else if (rOop == j_rarg3) 1076 oop_slot = 3; 1077 else if (rOop == j_rarg4) 1078 oop_slot = 4; 1079 else { 1080 assert(rOop == j_rarg5, "wrong register"); 1081 oop_slot = 5; 1082 } 1083 1084 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset; 1085 int offset = oop_slot*VMRegImpl::stack_slot_size; 1086 1087 map->set_oop(VMRegImpl::stack2reg(oop_slot)); 1088 // Store oop in handle area, may be null 1089 movptr(Address(rsp, offset), rOop); 1090 if (is_receiver) { 1091 *receiver_offset = offset; 1092 } 1093 1094 cmpptr(rOop, NULL_WORD); 1095 lea(rHandle, Address(rsp, offset)); 1096 // conditionally move a null from the handle area where it was just stored 1097 cmovptr(Assembler::equal, rHandle, Address(rsp, offset)); 1098 } 1099 1100 // If arg is on the stack then place it otherwise it is already in correct reg. 1101 if (dst.first()->is_stack()) { 1102 movptr(Address(rsp, reg2offset_out(dst.first())), rHandle); 1103 } 1104 } 1105 1106 #endif // _LP64 1107 1108 // Now versions that are common to 32/64 bit 1109 1110 void MacroAssembler::addptr(Register dst, int32_t imm32) { 1111 LP64_ONLY(addq(dst, imm32)) NOT_LP64(addl(dst, imm32)); 1112 } 1113 1114 void MacroAssembler::addptr(Register dst, Register src) { 1115 LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); 1116 } 1117 1118 void MacroAssembler::addptr(Address dst, Register src) { 1119 LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); 1120 } 1121 1122 void MacroAssembler::addsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 1123 assert(rscratch != noreg || always_reachable(src), "missing"); 1124 1125 if (reachable(src)) { 1126 Assembler::addsd(dst, as_Address(src)); 1127 } else { 1128 lea(rscratch, src); 1129 Assembler::addsd(dst, Address(rscratch, 0)); 1130 } 1131 } 1132 1133 void MacroAssembler::addss(XMMRegister dst, AddressLiteral src, Register rscratch) { 1134 assert(rscratch != noreg || always_reachable(src), "missing"); 1135 1136 if (reachable(src)) { 1137 addss(dst, as_Address(src)); 1138 } else { 1139 lea(rscratch, src); 1140 addss(dst, Address(rscratch, 0)); 1141 } 1142 } 1143 1144 void MacroAssembler::addpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 1145 assert(rscratch != noreg || always_reachable(src), "missing"); 1146 1147 if (reachable(src)) { 1148 Assembler::addpd(dst, as_Address(src)); 1149 } else { 1150 lea(rscratch, src); 1151 Assembler::addpd(dst, Address(rscratch, 0)); 1152 } 1153 } 1154 1155 // See 8273459. Function for ensuring 64-byte alignment, intended for stubs only. 1156 // Stub code is generated once and never copied. 1157 // NMethods can't use this because they get copied and we can't force alignment > 32 bytes. 1158 void MacroAssembler::align64() { 1159 align(64, (unsigned long long) pc()); 1160 } 1161 1162 void MacroAssembler::align32() { 1163 align(32, (unsigned long long) pc()); 1164 } 1165 1166 void MacroAssembler::align(int modulus) { 1167 // 8273459: Ensure alignment is possible with current segment alignment 1168 assert(modulus <= CodeEntryAlignment, "Alignment must be <= CodeEntryAlignment"); 1169 align(modulus, offset()); 1170 } 1171 1172 void MacroAssembler::align(int modulus, int target) { 1173 if (target % modulus != 0) { 1174 nop(modulus - (target % modulus)); 1175 } 1176 } 1177 1178 void MacroAssembler::push_f(XMMRegister r) { 1179 subptr(rsp, wordSize); 1180 movflt(Address(rsp, 0), r); 1181 } 1182 1183 void MacroAssembler::pop_f(XMMRegister r) { 1184 movflt(r, Address(rsp, 0)); 1185 addptr(rsp, wordSize); 1186 } 1187 1188 void MacroAssembler::push_d(XMMRegister r) { 1189 subptr(rsp, 2 * wordSize); 1190 movdbl(Address(rsp, 0), r); 1191 } 1192 1193 void MacroAssembler::pop_d(XMMRegister r) { 1194 movdbl(r, Address(rsp, 0)); 1195 addptr(rsp, 2 * Interpreter::stackElementSize); 1196 } 1197 1198 void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 1199 // Used in sign-masking with aligned address. 1200 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 1201 assert(rscratch != noreg || always_reachable(src), "missing"); 1202 1203 if (reachable(src)) { 1204 Assembler::andpd(dst, as_Address(src)); 1205 } else { 1206 lea(rscratch, src); 1207 Assembler::andpd(dst, Address(rscratch, 0)); 1208 } 1209 } 1210 1211 void MacroAssembler::andps(XMMRegister dst, AddressLiteral src, Register rscratch) { 1212 // Used in sign-masking with aligned address. 1213 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 1214 assert(rscratch != noreg || always_reachable(src), "missing"); 1215 1216 if (reachable(src)) { 1217 Assembler::andps(dst, as_Address(src)); 1218 } else { 1219 lea(rscratch, src); 1220 Assembler::andps(dst, Address(rscratch, 0)); 1221 } 1222 } 1223 1224 void MacroAssembler::andptr(Register dst, int32_t imm32) { 1225 LP64_ONLY(andq(dst, imm32)) NOT_LP64(andl(dst, imm32)); 1226 } 1227 1228 #ifdef _LP64 1229 void MacroAssembler::andq(Register dst, AddressLiteral src, Register rscratch) { 1230 assert(rscratch != noreg || always_reachable(src), "missing"); 1231 1232 if (reachable(src)) { 1233 andq(dst, as_Address(src)); 1234 } else { 1235 lea(rscratch, src); 1236 andq(dst, Address(rscratch, 0)); 1237 } 1238 } 1239 #endif 1240 1241 void MacroAssembler::atomic_incl(Address counter_addr) { 1242 lock(); 1243 incrementl(counter_addr); 1244 } 1245 1246 void MacroAssembler::atomic_incl(AddressLiteral counter_addr, Register rscratch) { 1247 assert(rscratch != noreg || always_reachable(counter_addr), "missing"); 1248 1249 if (reachable(counter_addr)) { 1250 atomic_incl(as_Address(counter_addr)); 1251 } else { 1252 lea(rscratch, counter_addr); 1253 atomic_incl(Address(rscratch, 0)); 1254 } 1255 } 1256 1257 #ifdef _LP64 1258 void MacroAssembler::atomic_incq(Address counter_addr) { 1259 lock(); 1260 incrementq(counter_addr); 1261 } 1262 1263 void MacroAssembler::atomic_incq(AddressLiteral counter_addr, Register rscratch) { 1264 assert(rscratch != noreg || always_reachable(counter_addr), "missing"); 1265 1266 if (reachable(counter_addr)) { 1267 atomic_incq(as_Address(counter_addr)); 1268 } else { 1269 lea(rscratch, counter_addr); 1270 atomic_incq(Address(rscratch, 0)); 1271 } 1272 } 1273 #endif 1274 1275 // Writes to stack successive pages until offset reached to check for 1276 // stack overflow + shadow pages. This clobbers tmp. 1277 void MacroAssembler::bang_stack_size(Register size, Register tmp) { 1278 movptr(tmp, rsp); 1279 // Bang stack for total size given plus shadow page size. 1280 // Bang one page at a time because large size can bang beyond yellow and 1281 // red zones. 1282 Label loop; 1283 bind(loop); 1284 movl(Address(tmp, (-(int)os::vm_page_size())), size ); 1285 subptr(tmp, (int)os::vm_page_size()); 1286 subl(size, (int)os::vm_page_size()); 1287 jcc(Assembler::greater, loop); 1288 1289 // Bang down shadow pages too. 1290 // At this point, (tmp-0) is the last address touched, so don't 1291 // touch it again. (It was touched as (tmp-pagesize) but then tmp 1292 // was post-decremented.) Skip this address by starting at i=1, and 1293 // touch a few more pages below. N.B. It is important to touch all 1294 // the way down including all pages in the shadow zone. 1295 for (int i = 1; i < ((int)StackOverflow::stack_shadow_zone_size() / (int)os::vm_page_size()); i++) { 1296 // this could be any sized move but this is can be a debugging crumb 1297 // so the bigger the better. 1298 movptr(Address(tmp, (-i*(int)os::vm_page_size())), size ); 1299 } 1300 } 1301 1302 void MacroAssembler::reserved_stack_check() { 1303 // testing if reserved zone needs to be enabled 1304 Label no_reserved_zone_enabling; 1305 Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); 1306 NOT_LP64(get_thread(rsi);) 1307 1308 cmpptr(rsp, Address(thread, JavaThread::reserved_stack_activation_offset())); 1309 jcc(Assembler::below, no_reserved_zone_enabling); 1310 1311 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), thread); 1312 jump(RuntimeAddress(StubRoutines::throw_delayed_StackOverflowError_entry())); 1313 should_not_reach_here(); 1314 1315 bind(no_reserved_zone_enabling); 1316 } 1317 1318 void MacroAssembler::c2bool(Register x) { 1319 // implements x == 0 ? 0 : 1 1320 // note: must only look at least-significant byte of x 1321 // since C-style booleans are stored in one byte 1322 // only! (was bug) 1323 andl(x, 0xFF); 1324 setb(Assembler::notZero, x); 1325 } 1326 1327 // Wouldn't need if AddressLiteral version had new name 1328 void MacroAssembler::call(Label& L, relocInfo::relocType rtype) { 1329 Assembler::call(L, rtype); 1330 } 1331 1332 void MacroAssembler::call(Register entry) { 1333 Assembler::call(entry); 1334 } 1335 1336 void MacroAssembler::call(AddressLiteral entry, Register rscratch) { 1337 assert(rscratch != noreg || always_reachable(entry), "missing"); 1338 1339 if (reachable(entry)) { 1340 Assembler::call_literal(entry.target(), entry.rspec()); 1341 } else { 1342 lea(rscratch, entry); 1343 Assembler::call(rscratch); 1344 } 1345 } 1346 1347 void MacroAssembler::ic_call(address entry, jint method_index) { 1348 RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index); 1349 #ifdef _LP64 1350 // Needs full 64-bit immediate for later patching. 1351 mov64(rax, (intptr_t)Universe::non_oop_word()); 1352 #else 1353 movptr(rax, (intptr_t)Universe::non_oop_word()); 1354 #endif 1355 call(AddressLiteral(entry, rh)); 1356 } 1357 1358 void MacroAssembler::emit_static_call_stub() { 1359 // Static stub relocation also tags the Method* in the code-stream. 1360 mov_metadata(rbx, (Metadata*) nullptr); // Method is zapped till fixup time. 1361 // This is recognized as unresolved by relocs/nativeinst/ic code. 1362 jump(RuntimeAddress(pc())); 1363 } 1364 1365 // Implementation of call_VM versions 1366 1367 void MacroAssembler::call_VM(Register oop_result, 1368 address entry_point, 1369 bool check_exceptions) { 1370 Label C, E; 1371 call(C, relocInfo::none); 1372 jmp(E); 1373 1374 bind(C); 1375 call_VM_helper(oop_result, entry_point, 0, check_exceptions); 1376 ret(0); 1377 1378 bind(E); 1379 } 1380 1381 void MacroAssembler::call_VM(Register oop_result, 1382 address entry_point, 1383 Register arg_1, 1384 bool check_exceptions) { 1385 Label C, E; 1386 call(C, relocInfo::none); 1387 jmp(E); 1388 1389 bind(C); 1390 pass_arg1(this, arg_1); 1391 call_VM_helper(oop_result, entry_point, 1, check_exceptions); 1392 ret(0); 1393 1394 bind(E); 1395 } 1396 1397 void MacroAssembler::call_VM(Register oop_result, 1398 address entry_point, 1399 Register arg_1, 1400 Register arg_2, 1401 bool check_exceptions) { 1402 Label C, E; 1403 call(C, relocInfo::none); 1404 jmp(E); 1405 1406 bind(C); 1407 1408 LP64_ONLY(assert_different_registers(arg_1, c_rarg2)); 1409 1410 pass_arg2(this, arg_2); 1411 pass_arg1(this, arg_1); 1412 call_VM_helper(oop_result, entry_point, 2, check_exceptions); 1413 ret(0); 1414 1415 bind(E); 1416 } 1417 1418 void MacroAssembler::call_VM(Register oop_result, 1419 address entry_point, 1420 Register arg_1, 1421 Register arg_2, 1422 Register arg_3, 1423 bool check_exceptions) { 1424 Label C, E; 1425 call(C, relocInfo::none); 1426 jmp(E); 1427 1428 bind(C); 1429 1430 LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3)); 1431 LP64_ONLY(assert_different_registers(arg_2, c_rarg3)); 1432 pass_arg3(this, arg_3); 1433 pass_arg2(this, arg_2); 1434 pass_arg1(this, arg_1); 1435 call_VM_helper(oop_result, entry_point, 3, check_exceptions); 1436 ret(0); 1437 1438 bind(E); 1439 } 1440 1441 void MacroAssembler::call_VM(Register oop_result, 1442 Register last_java_sp, 1443 address entry_point, 1444 int number_of_arguments, 1445 bool check_exceptions) { 1446 Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg); 1447 call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions); 1448 } 1449 1450 void MacroAssembler::call_VM(Register oop_result, 1451 Register last_java_sp, 1452 address entry_point, 1453 Register arg_1, 1454 bool check_exceptions) { 1455 pass_arg1(this, arg_1); 1456 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 1457 } 1458 1459 void MacroAssembler::call_VM(Register oop_result, 1460 Register last_java_sp, 1461 address entry_point, 1462 Register arg_1, 1463 Register arg_2, 1464 bool check_exceptions) { 1465 1466 LP64_ONLY(assert_different_registers(arg_1, c_rarg2)); 1467 pass_arg2(this, arg_2); 1468 pass_arg1(this, arg_1); 1469 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 1470 } 1471 1472 void MacroAssembler::call_VM(Register oop_result, 1473 Register last_java_sp, 1474 address entry_point, 1475 Register arg_1, 1476 Register arg_2, 1477 Register arg_3, 1478 bool check_exceptions) { 1479 LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3)); 1480 LP64_ONLY(assert_different_registers(arg_2, c_rarg3)); 1481 pass_arg3(this, arg_3); 1482 pass_arg2(this, arg_2); 1483 pass_arg1(this, arg_1); 1484 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 1485 } 1486 1487 void MacroAssembler::super_call_VM(Register oop_result, 1488 Register last_java_sp, 1489 address entry_point, 1490 int number_of_arguments, 1491 bool check_exceptions) { 1492 Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg); 1493 MacroAssembler::call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions); 1494 } 1495 1496 void MacroAssembler::super_call_VM(Register oop_result, 1497 Register last_java_sp, 1498 address entry_point, 1499 Register arg_1, 1500 bool check_exceptions) { 1501 pass_arg1(this, arg_1); 1502 super_call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 1503 } 1504 1505 void MacroAssembler::super_call_VM(Register oop_result, 1506 Register last_java_sp, 1507 address entry_point, 1508 Register arg_1, 1509 Register arg_2, 1510 bool check_exceptions) { 1511 1512 LP64_ONLY(assert_different_registers(arg_1, c_rarg2)); 1513 pass_arg2(this, arg_2); 1514 pass_arg1(this, arg_1); 1515 super_call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 1516 } 1517 1518 void MacroAssembler::super_call_VM(Register oop_result, 1519 Register last_java_sp, 1520 address entry_point, 1521 Register arg_1, 1522 Register arg_2, 1523 Register arg_3, 1524 bool check_exceptions) { 1525 LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3)); 1526 LP64_ONLY(assert_different_registers(arg_2, c_rarg3)); 1527 pass_arg3(this, arg_3); 1528 pass_arg2(this, arg_2); 1529 pass_arg1(this, arg_1); 1530 super_call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 1531 } 1532 1533 void MacroAssembler::call_VM_base(Register oop_result, 1534 Register java_thread, 1535 Register last_java_sp, 1536 address entry_point, 1537 int number_of_arguments, 1538 bool check_exceptions) { 1539 // determine java_thread register 1540 if (!java_thread->is_valid()) { 1541 #ifdef _LP64 1542 java_thread = r15_thread; 1543 #else 1544 java_thread = rdi; 1545 get_thread(java_thread); 1546 #endif // LP64 1547 } 1548 // determine last_java_sp register 1549 if (!last_java_sp->is_valid()) { 1550 last_java_sp = rsp; 1551 } 1552 // debugging support 1553 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 1554 LP64_ONLY(assert(java_thread == r15_thread, "unexpected register")); 1555 #ifdef ASSERT 1556 // TraceBytecodes does not use r12 but saves it over the call, so don't verify 1557 // r12 is the heapbase. 1558 LP64_ONLY(if (UseCompressedOops && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");) 1559 #endif // ASSERT 1560 1561 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"); 1562 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp"); 1563 1564 // push java thread (becomes first argument of C function) 1565 1566 NOT_LP64(push(java_thread); number_of_arguments++); 1567 LP64_ONLY(mov(c_rarg0, r15_thread)); 1568 1569 // set last Java frame before call 1570 assert(last_java_sp != rbp, "can't use ebp/rbp"); 1571 1572 // Only interpreter should have to set fp 1573 set_last_Java_frame(java_thread, last_java_sp, rbp, nullptr, rscratch1); 1574 1575 // do the call, remove parameters 1576 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments); 1577 1578 // restore the thread (cannot use the pushed argument since arguments 1579 // may be overwritten by C code generated by an optimizing compiler); 1580 // however can use the register value directly if it is callee saved. 1581 if (LP64_ONLY(true ||) java_thread == rdi || java_thread == rsi) { 1582 // rdi & rsi (also r15) are callee saved -> nothing to do 1583 #ifdef ASSERT 1584 guarantee(java_thread != rax, "change this code"); 1585 push(rax); 1586 { Label L; 1587 get_thread(rax); 1588 cmpptr(java_thread, rax); 1589 jcc(Assembler::equal, L); 1590 STOP("MacroAssembler::call_VM_base: rdi not callee saved?"); 1591 bind(L); 1592 } 1593 pop(rax); 1594 #endif 1595 } else { 1596 get_thread(java_thread); 1597 } 1598 // reset last Java frame 1599 // Only interpreter should have to clear fp 1600 reset_last_Java_frame(java_thread, true); 1601 1602 // C++ interp handles this in the interpreter 1603 check_and_handle_popframe(java_thread); 1604 check_and_handle_earlyret(java_thread); 1605 1606 if (check_exceptions) { 1607 // check for pending exceptions (java_thread is set upon return) 1608 cmpptr(Address(java_thread, Thread::pending_exception_offset()), NULL_WORD); 1609 #ifndef _LP64 1610 jump_cc(Assembler::notEqual, 1611 RuntimeAddress(StubRoutines::forward_exception_entry())); 1612 #else 1613 // This used to conditionally jump to forward_exception however it is 1614 // possible if we relocate that the branch will not reach. So we must jump 1615 // around so we can always reach 1616 1617 Label ok; 1618 jcc(Assembler::equal, ok); 1619 jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 1620 bind(ok); 1621 #endif // LP64 1622 } 1623 1624 // get oop result if there is one and reset the value in the thread 1625 if (oop_result->is_valid()) { 1626 get_vm_result(oop_result, java_thread); 1627 } 1628 } 1629 1630 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 1631 1632 // Calculate the value for last_Java_sp 1633 // somewhat subtle. call_VM does an intermediate call 1634 // which places a return address on the stack just under the 1635 // stack pointer as the user finished with it. This allows 1636 // use to retrieve last_Java_pc from last_Java_sp[-1]. 1637 // On 32bit we then have to push additional args on the stack to accomplish 1638 // the actual requested call. On 64bit call_VM only can use register args 1639 // so the only extra space is the return address that call_VM created. 1640 // This hopefully explains the calculations here. 1641 1642 #ifdef _LP64 1643 // We've pushed one address, correct last_Java_sp 1644 lea(rax, Address(rsp, wordSize)); 1645 #else 1646 lea(rax, Address(rsp, (1 + number_of_arguments) * wordSize)); 1647 #endif // LP64 1648 1649 call_VM_base(oop_result, noreg, rax, entry_point, number_of_arguments, check_exceptions); 1650 1651 } 1652 1653 // Use this method when MacroAssembler version of call_VM_leaf_base() should be called from Interpreter. 1654 void MacroAssembler::call_VM_leaf0(address entry_point) { 1655 MacroAssembler::call_VM_leaf_base(entry_point, 0); 1656 } 1657 1658 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) { 1659 call_VM_leaf_base(entry_point, number_of_arguments); 1660 } 1661 1662 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) { 1663 pass_arg0(this, arg_0); 1664 call_VM_leaf(entry_point, 1); 1665 } 1666 1667 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 1668 1669 LP64_ONLY(assert_different_registers(arg_0, c_rarg1)); 1670 pass_arg1(this, arg_1); 1671 pass_arg0(this, arg_0); 1672 call_VM_leaf(entry_point, 2); 1673 } 1674 1675 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { 1676 LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2)); 1677 LP64_ONLY(assert_different_registers(arg_1, c_rarg2)); 1678 pass_arg2(this, arg_2); 1679 pass_arg1(this, arg_1); 1680 pass_arg0(this, arg_0); 1681 call_VM_leaf(entry_point, 3); 1682 } 1683 1684 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { 1685 LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3)); 1686 LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3)); 1687 LP64_ONLY(assert_different_registers(arg_2, c_rarg3)); 1688 pass_arg3(this, arg_3); 1689 pass_arg2(this, arg_2); 1690 pass_arg1(this, arg_1); 1691 pass_arg0(this, arg_0); 1692 call_VM_leaf(entry_point, 3); 1693 } 1694 1695 void MacroAssembler::super_call_VM_leaf(address entry_point) { 1696 MacroAssembler::call_VM_leaf_base(entry_point, 1); 1697 } 1698 1699 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) { 1700 pass_arg0(this, arg_0); 1701 MacroAssembler::call_VM_leaf_base(entry_point, 1); 1702 } 1703 1704 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 1705 LP64_ONLY(assert_different_registers(arg_0, c_rarg1)); 1706 pass_arg1(this, arg_1); 1707 pass_arg0(this, arg_0); 1708 MacroAssembler::call_VM_leaf_base(entry_point, 2); 1709 } 1710 1711 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { 1712 LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2)); 1713 LP64_ONLY(assert_different_registers(arg_1, c_rarg2)); 1714 pass_arg2(this, arg_2); 1715 pass_arg1(this, arg_1); 1716 pass_arg0(this, arg_0); 1717 MacroAssembler::call_VM_leaf_base(entry_point, 3); 1718 } 1719 1720 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { 1721 LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3)); 1722 LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3)); 1723 LP64_ONLY(assert_different_registers(arg_2, c_rarg3)); 1724 pass_arg3(this, arg_3); 1725 pass_arg2(this, arg_2); 1726 pass_arg1(this, arg_1); 1727 pass_arg0(this, arg_0); 1728 MacroAssembler::call_VM_leaf_base(entry_point, 4); 1729 } 1730 1731 void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) { 1732 movptr(oop_result, Address(java_thread, JavaThread::vm_result_offset())); 1733 movptr(Address(java_thread, JavaThread::vm_result_offset()), NULL_WORD); 1734 verify_oop_msg(oop_result, "broken oop in call_VM_base"); 1735 } 1736 1737 void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) { 1738 movptr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset())); 1739 movptr(Address(java_thread, JavaThread::vm_result_2_offset()), NULL_WORD); 1740 } 1741 1742 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { 1743 } 1744 1745 void MacroAssembler::check_and_handle_popframe(Register java_thread) { 1746 } 1747 1748 void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm, Register rscratch) { 1749 assert(rscratch != noreg || always_reachable(src1), "missing"); 1750 1751 if (reachable(src1)) { 1752 cmpl(as_Address(src1), imm); 1753 } else { 1754 lea(rscratch, src1); 1755 cmpl(Address(rscratch, 0), imm); 1756 } 1757 } 1758 1759 void MacroAssembler::cmp32(Register src1, AddressLiteral src2, Register rscratch) { 1760 assert(!src2.is_lval(), "use cmpptr"); 1761 assert(rscratch != noreg || always_reachable(src2), "missing"); 1762 1763 if (reachable(src2)) { 1764 cmpl(src1, as_Address(src2)); 1765 } else { 1766 lea(rscratch, src2); 1767 cmpl(src1, Address(rscratch, 0)); 1768 } 1769 } 1770 1771 void MacroAssembler::cmp32(Register src1, int32_t imm) { 1772 Assembler::cmpl(src1, imm); 1773 } 1774 1775 void MacroAssembler::cmp32(Register src1, Address src2) { 1776 Assembler::cmpl(src1, src2); 1777 } 1778 1779 void MacroAssembler::cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) { 1780 ucomisd(opr1, opr2); 1781 1782 Label L; 1783 if (unordered_is_less) { 1784 movl(dst, -1); 1785 jcc(Assembler::parity, L); 1786 jcc(Assembler::below , L); 1787 movl(dst, 0); 1788 jcc(Assembler::equal , L); 1789 increment(dst); 1790 } else { // unordered is greater 1791 movl(dst, 1); 1792 jcc(Assembler::parity, L); 1793 jcc(Assembler::above , L); 1794 movl(dst, 0); 1795 jcc(Assembler::equal , L); 1796 decrementl(dst); 1797 } 1798 bind(L); 1799 } 1800 1801 void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) { 1802 ucomiss(opr1, opr2); 1803 1804 Label L; 1805 if (unordered_is_less) { 1806 movl(dst, -1); 1807 jcc(Assembler::parity, L); 1808 jcc(Assembler::below , L); 1809 movl(dst, 0); 1810 jcc(Assembler::equal , L); 1811 increment(dst); 1812 } else { // unordered is greater 1813 movl(dst, 1); 1814 jcc(Assembler::parity, L); 1815 jcc(Assembler::above , L); 1816 movl(dst, 0); 1817 jcc(Assembler::equal , L); 1818 decrementl(dst); 1819 } 1820 bind(L); 1821 } 1822 1823 1824 void MacroAssembler::cmp8(AddressLiteral src1, int imm, Register rscratch) { 1825 assert(rscratch != noreg || always_reachable(src1), "missing"); 1826 1827 if (reachable(src1)) { 1828 cmpb(as_Address(src1), imm); 1829 } else { 1830 lea(rscratch, src1); 1831 cmpb(Address(rscratch, 0), imm); 1832 } 1833 } 1834 1835 void MacroAssembler::cmpptr(Register src1, AddressLiteral src2, Register rscratch) { 1836 #ifdef _LP64 1837 assert(rscratch != noreg || always_reachable(src2), "missing"); 1838 1839 if (src2.is_lval()) { 1840 movptr(rscratch, src2); 1841 Assembler::cmpq(src1, rscratch); 1842 } else if (reachable(src2)) { 1843 cmpq(src1, as_Address(src2)); 1844 } else { 1845 lea(rscratch, src2); 1846 Assembler::cmpq(src1, Address(rscratch, 0)); 1847 } 1848 #else 1849 assert(rscratch == noreg, "not needed"); 1850 if (src2.is_lval()) { 1851 cmp_literal32(src1, (int32_t)src2.target(), src2.rspec()); 1852 } else { 1853 cmpl(src1, as_Address(src2)); 1854 } 1855 #endif // _LP64 1856 } 1857 1858 void MacroAssembler::cmpptr(Address src1, AddressLiteral src2, Register rscratch) { 1859 assert(src2.is_lval(), "not a mem-mem compare"); 1860 #ifdef _LP64 1861 // moves src2's literal address 1862 movptr(rscratch, src2); 1863 Assembler::cmpq(src1, rscratch); 1864 #else 1865 assert(rscratch == noreg, "not needed"); 1866 cmp_literal32(src1, (int32_t)src2.target(), src2.rspec()); 1867 #endif // _LP64 1868 } 1869 1870 void MacroAssembler::cmpoop(Register src1, Register src2) { 1871 cmpptr(src1, src2); 1872 } 1873 1874 void MacroAssembler::cmpoop(Register src1, Address src2) { 1875 cmpptr(src1, src2); 1876 } 1877 1878 #ifdef _LP64 1879 void MacroAssembler::cmpoop(Register src1, jobject src2, Register rscratch) { 1880 movoop(rscratch, src2); 1881 cmpptr(src1, rscratch); 1882 } 1883 #endif 1884 1885 void MacroAssembler::cvtss2sd(XMMRegister dst, XMMRegister src) { 1886 if ((UseAVX > 0) && (dst != src)) { 1887 xorpd(dst, dst); 1888 } 1889 Assembler::cvtss2sd(dst, src); 1890 } 1891 1892 void MacroAssembler::cvtss2sd(XMMRegister dst, Address src) { 1893 if (UseAVX > 0) { 1894 xorpd(dst, dst); 1895 } 1896 Assembler::cvtss2sd(dst, src); 1897 } 1898 1899 void MacroAssembler::cvtsd2ss(XMMRegister dst, XMMRegister src) { 1900 if ((UseAVX > 0) && (dst != src)) { 1901 xorps(dst, dst); 1902 } 1903 Assembler::cvtsd2ss(dst, src); 1904 } 1905 1906 void MacroAssembler::cvtsd2ss(XMMRegister dst, Address src) { 1907 if (UseAVX > 0) { 1908 xorps(dst, dst); 1909 } 1910 Assembler::cvtsd2ss(dst, src); 1911 } 1912 1913 void MacroAssembler::cvtsi2sdl(XMMRegister dst, Register src) { 1914 if (UseAVX > 0) { 1915 xorpd(dst, dst); 1916 } 1917 Assembler::cvtsi2sdl(dst, src); 1918 } 1919 1920 void MacroAssembler::cvtsi2sdl(XMMRegister dst, Address src) { 1921 if (UseAVX > 0) { 1922 xorpd(dst, dst); 1923 } 1924 Assembler::cvtsi2sdl(dst, src); 1925 } 1926 1927 void MacroAssembler::cvtsi2ssl(XMMRegister dst, Register src) { 1928 if (UseAVX > 0) { 1929 xorps(dst, dst); 1930 } 1931 Assembler::cvtsi2ssl(dst, src); 1932 } 1933 1934 void MacroAssembler::cvtsi2ssl(XMMRegister dst, Address src) { 1935 if (UseAVX > 0) { 1936 xorps(dst, dst); 1937 } 1938 Assembler::cvtsi2ssl(dst, src); 1939 } 1940 1941 #ifdef _LP64 1942 void MacroAssembler::cvtsi2sdq(XMMRegister dst, Register src) { 1943 if (UseAVX > 0) { 1944 xorpd(dst, dst); 1945 } 1946 Assembler::cvtsi2sdq(dst, src); 1947 } 1948 1949 void MacroAssembler::cvtsi2sdq(XMMRegister dst, Address src) { 1950 if (UseAVX > 0) { 1951 xorpd(dst, dst); 1952 } 1953 Assembler::cvtsi2sdq(dst, src); 1954 } 1955 1956 void MacroAssembler::cvtsi2ssq(XMMRegister dst, Register src) { 1957 if (UseAVX > 0) { 1958 xorps(dst, dst); 1959 } 1960 Assembler::cvtsi2ssq(dst, src); 1961 } 1962 1963 void MacroAssembler::cvtsi2ssq(XMMRegister dst, Address src) { 1964 if (UseAVX > 0) { 1965 xorps(dst, dst); 1966 } 1967 Assembler::cvtsi2ssq(dst, src); 1968 } 1969 #endif // _LP64 1970 1971 void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch) { 1972 assert(rscratch != noreg || always_reachable(adr), "missing"); 1973 1974 if (reachable(adr)) { 1975 lock(); 1976 cmpxchgptr(reg, as_Address(adr)); 1977 } else { 1978 lea(rscratch, adr); 1979 lock(); 1980 cmpxchgptr(reg, Address(rscratch, 0)); 1981 } 1982 } 1983 1984 void MacroAssembler::cmpxchgptr(Register reg, Address adr) { 1985 LP64_ONLY(cmpxchgq(reg, adr)) NOT_LP64(cmpxchgl(reg, adr)); 1986 } 1987 1988 void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src, Register rscratch) { 1989 assert(rscratch != noreg || always_reachable(src), "missing"); 1990 1991 if (reachable(src)) { 1992 Assembler::comisd(dst, as_Address(src)); 1993 } else { 1994 lea(rscratch, src); 1995 Assembler::comisd(dst, Address(rscratch, 0)); 1996 } 1997 } 1998 1999 void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src, Register rscratch) { 2000 assert(rscratch != noreg || always_reachable(src), "missing"); 2001 2002 if (reachable(src)) { 2003 Assembler::comiss(dst, as_Address(src)); 2004 } else { 2005 lea(rscratch, src); 2006 Assembler::comiss(dst, Address(rscratch, 0)); 2007 } 2008 } 2009 2010 2011 void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr, Register rscratch) { 2012 assert(rscratch != noreg || always_reachable(counter_addr), "missing"); 2013 2014 Condition negated_cond = negate_condition(cond); 2015 Label L; 2016 jcc(negated_cond, L); 2017 pushf(); // Preserve flags 2018 atomic_incl(counter_addr, rscratch); 2019 popf(); 2020 bind(L); 2021 } 2022 2023 int MacroAssembler::corrected_idivl(Register reg) { 2024 // Full implementation of Java idiv and irem; checks for 2025 // special case as described in JVM spec., p.243 & p.271. 2026 // The function returns the (pc) offset of the idivl 2027 // instruction - may be needed for implicit exceptions. 2028 // 2029 // normal case special case 2030 // 2031 // input : rax,: dividend min_int 2032 // reg: divisor (may not be rax,/rdx) -1 2033 // 2034 // output: rax,: quotient (= rax, idiv reg) min_int 2035 // rdx: remainder (= rax, irem reg) 0 2036 assert(reg != rax && reg != rdx, "reg cannot be rax, or rdx register"); 2037 const int min_int = 0x80000000; 2038 Label normal_case, special_case; 2039 2040 // check for special case 2041 cmpl(rax, min_int); 2042 jcc(Assembler::notEqual, normal_case); 2043 xorl(rdx, rdx); // prepare rdx for possible special case (where remainder = 0) 2044 cmpl(reg, -1); 2045 jcc(Assembler::equal, special_case); 2046 2047 // handle normal case 2048 bind(normal_case); 2049 cdql(); 2050 int idivl_offset = offset(); 2051 idivl(reg); 2052 2053 // normal and special case exit 2054 bind(special_case); 2055 2056 return idivl_offset; 2057 } 2058 2059 2060 2061 void MacroAssembler::decrementl(Register reg, int value) { 2062 if (value == min_jint) {subl(reg, value) ; return; } 2063 if (value < 0) { incrementl(reg, -value); return; } 2064 if (value == 0) { ; return; } 2065 if (value == 1 && UseIncDec) { decl(reg) ; return; } 2066 /* else */ { subl(reg, value) ; return; } 2067 } 2068 2069 void MacroAssembler::decrementl(Address dst, int value) { 2070 if (value == min_jint) {subl(dst, value) ; return; } 2071 if (value < 0) { incrementl(dst, -value); return; } 2072 if (value == 0) { ; return; } 2073 if (value == 1 && UseIncDec) { decl(dst) ; return; } 2074 /* else */ { subl(dst, value) ; return; } 2075 } 2076 2077 void MacroAssembler::division_with_shift (Register reg, int shift_value) { 2078 assert(shift_value > 0, "illegal shift value"); 2079 Label _is_positive; 2080 testl (reg, reg); 2081 jcc (Assembler::positive, _is_positive); 2082 int offset = (1 << shift_value) - 1 ; 2083 2084 if (offset == 1) { 2085 incrementl(reg); 2086 } else { 2087 addl(reg, offset); 2088 } 2089 2090 bind (_is_positive); 2091 sarl(reg, shift_value); 2092 } 2093 2094 void MacroAssembler::divsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2095 assert(rscratch != noreg || always_reachable(src), "missing"); 2096 2097 if (reachable(src)) { 2098 Assembler::divsd(dst, as_Address(src)); 2099 } else { 2100 lea(rscratch, src); 2101 Assembler::divsd(dst, Address(rscratch, 0)); 2102 } 2103 } 2104 2105 void MacroAssembler::divss(XMMRegister dst, AddressLiteral src, Register rscratch) { 2106 assert(rscratch != noreg || always_reachable(src), "missing"); 2107 2108 if (reachable(src)) { 2109 Assembler::divss(dst, as_Address(src)); 2110 } else { 2111 lea(rscratch, src); 2112 Assembler::divss(dst, Address(rscratch, 0)); 2113 } 2114 } 2115 2116 void MacroAssembler::enter() { 2117 push(rbp); 2118 mov(rbp, rsp); 2119 } 2120 2121 void MacroAssembler::post_call_nop() { 2122 if (!Continuations::enabled()) { 2123 return; 2124 } 2125 InstructionMark im(this); 2126 relocate(post_call_nop_Relocation::spec()); 2127 InlineSkippedInstructionsCounter skipCounter(this); 2128 emit_int8((uint8_t)0x0f); 2129 emit_int8((uint8_t)0x1f); 2130 emit_int8((uint8_t)0x84); 2131 emit_int8((uint8_t)0x00); 2132 emit_int32(0x00); 2133 } 2134 2135 // A 5 byte nop that is safe for patching (see patch_verified_entry) 2136 void MacroAssembler::fat_nop() { 2137 if (UseAddressNop) { 2138 addr_nop_5(); 2139 } else { 2140 emit_int8((uint8_t)0x26); // es: 2141 emit_int8((uint8_t)0x2e); // cs: 2142 emit_int8((uint8_t)0x64); // fs: 2143 emit_int8((uint8_t)0x65); // gs: 2144 emit_int8((uint8_t)0x90); 2145 } 2146 } 2147 2148 #ifndef _LP64 2149 void MacroAssembler::fcmp(Register tmp) { 2150 fcmp(tmp, 1, true, true); 2151 } 2152 2153 void MacroAssembler::fcmp(Register tmp, int index, bool pop_left, bool pop_right) { 2154 assert(!pop_right || pop_left, "usage error"); 2155 if (VM_Version::supports_cmov()) { 2156 assert(tmp == noreg, "unneeded temp"); 2157 if (pop_left) { 2158 fucomip(index); 2159 } else { 2160 fucomi(index); 2161 } 2162 if (pop_right) { 2163 fpop(); 2164 } 2165 } else { 2166 assert(tmp != noreg, "need temp"); 2167 if (pop_left) { 2168 if (pop_right) { 2169 fcompp(); 2170 } else { 2171 fcomp(index); 2172 } 2173 } else { 2174 fcom(index); 2175 } 2176 // convert FPU condition into eflags condition via rax, 2177 save_rax(tmp); 2178 fwait(); fnstsw_ax(); 2179 sahf(); 2180 restore_rax(tmp); 2181 } 2182 // condition codes set as follows: 2183 // 2184 // CF (corresponds to C0) if x < y 2185 // PF (corresponds to C2) if unordered 2186 // ZF (corresponds to C3) if x = y 2187 } 2188 2189 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less) { 2190 fcmp2int(dst, unordered_is_less, 1, true, true); 2191 } 2192 2193 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right) { 2194 fcmp(VM_Version::supports_cmov() ? noreg : dst, index, pop_left, pop_right); 2195 Label L; 2196 if (unordered_is_less) { 2197 movl(dst, -1); 2198 jcc(Assembler::parity, L); 2199 jcc(Assembler::below , L); 2200 movl(dst, 0); 2201 jcc(Assembler::equal , L); 2202 increment(dst); 2203 } else { // unordered is greater 2204 movl(dst, 1); 2205 jcc(Assembler::parity, L); 2206 jcc(Assembler::above , L); 2207 movl(dst, 0); 2208 jcc(Assembler::equal , L); 2209 decrementl(dst); 2210 } 2211 bind(L); 2212 } 2213 2214 void MacroAssembler::fld_d(AddressLiteral src) { 2215 fld_d(as_Address(src)); 2216 } 2217 2218 void MacroAssembler::fld_s(AddressLiteral src) { 2219 fld_s(as_Address(src)); 2220 } 2221 2222 void MacroAssembler::fldcw(AddressLiteral src) { 2223 fldcw(as_Address(src)); 2224 } 2225 2226 void MacroAssembler::fpop() { 2227 ffree(); 2228 fincstp(); 2229 } 2230 2231 void MacroAssembler::fremr(Register tmp) { 2232 save_rax(tmp); 2233 { Label L; 2234 bind(L); 2235 fprem(); 2236 fwait(); fnstsw_ax(); 2237 sahf(); 2238 jcc(Assembler::parity, L); 2239 } 2240 restore_rax(tmp); 2241 // Result is in ST0. 2242 // Note: fxch & fpop to get rid of ST1 2243 // (otherwise FPU stack could overflow eventually) 2244 fxch(1); 2245 fpop(); 2246 } 2247 2248 void MacroAssembler::empty_FPU_stack() { 2249 if (VM_Version::supports_mmx()) { 2250 emms(); 2251 } else { 2252 for (int i = 8; i-- > 0; ) ffree(i); 2253 } 2254 } 2255 #endif // !LP64 2256 2257 void MacroAssembler::mulpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2258 assert(rscratch != noreg || always_reachable(src), "missing"); 2259 if (reachable(src)) { 2260 Assembler::mulpd(dst, as_Address(src)); 2261 } else { 2262 lea(rscratch, src); 2263 Assembler::mulpd(dst, Address(rscratch, 0)); 2264 } 2265 } 2266 2267 void MacroAssembler::load_float(Address src) { 2268 #ifdef _LP64 2269 movflt(xmm0, src); 2270 #else 2271 if (UseSSE >= 1) { 2272 movflt(xmm0, src); 2273 } else { 2274 fld_s(src); 2275 } 2276 #endif // LP64 2277 } 2278 2279 void MacroAssembler::store_float(Address dst) { 2280 #ifdef _LP64 2281 movflt(dst, xmm0); 2282 #else 2283 if (UseSSE >= 1) { 2284 movflt(dst, xmm0); 2285 } else { 2286 fstp_s(dst); 2287 } 2288 #endif // LP64 2289 } 2290 2291 void MacroAssembler::load_double(Address src) { 2292 #ifdef _LP64 2293 movdbl(xmm0, src); 2294 #else 2295 if (UseSSE >= 2) { 2296 movdbl(xmm0, src); 2297 } else { 2298 fld_d(src); 2299 } 2300 #endif // LP64 2301 } 2302 2303 void MacroAssembler::store_double(Address dst) { 2304 #ifdef _LP64 2305 movdbl(dst, xmm0); 2306 #else 2307 if (UseSSE >= 2) { 2308 movdbl(dst, xmm0); 2309 } else { 2310 fstp_d(dst); 2311 } 2312 #endif // LP64 2313 } 2314 2315 // dst = c = a * b + c 2316 void MacroAssembler::fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c) { 2317 Assembler::vfmadd231sd(c, a, b); 2318 if (dst != c) { 2319 movdbl(dst, c); 2320 } 2321 } 2322 2323 // dst = c = a * b + c 2324 void MacroAssembler::fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c) { 2325 Assembler::vfmadd231ss(c, a, b); 2326 if (dst != c) { 2327 movflt(dst, c); 2328 } 2329 } 2330 2331 // dst = c = a * b + c 2332 void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) { 2333 Assembler::vfmadd231pd(c, a, b, vector_len); 2334 if (dst != c) { 2335 vmovdqu(dst, c); 2336 } 2337 } 2338 2339 // dst = c = a * b + c 2340 void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) { 2341 Assembler::vfmadd231ps(c, a, b, vector_len); 2342 if (dst != c) { 2343 vmovdqu(dst, c); 2344 } 2345 } 2346 2347 // dst = c = a * b + c 2348 void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) { 2349 Assembler::vfmadd231pd(c, a, b, vector_len); 2350 if (dst != c) { 2351 vmovdqu(dst, c); 2352 } 2353 } 2354 2355 // dst = c = a * b + c 2356 void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) { 2357 Assembler::vfmadd231ps(c, a, b, vector_len); 2358 if (dst != c) { 2359 vmovdqu(dst, c); 2360 } 2361 } 2362 2363 void MacroAssembler::incrementl(AddressLiteral dst, Register rscratch) { 2364 assert(rscratch != noreg || always_reachable(dst), "missing"); 2365 2366 if (reachable(dst)) { 2367 incrementl(as_Address(dst)); 2368 } else { 2369 lea(rscratch, dst); 2370 incrementl(Address(rscratch, 0)); 2371 } 2372 } 2373 2374 void MacroAssembler::incrementl(ArrayAddress dst, Register rscratch) { 2375 incrementl(as_Address(dst, rscratch)); 2376 } 2377 2378 void MacroAssembler::incrementl(Register reg, int value) { 2379 if (value == min_jint) {addl(reg, value) ; return; } 2380 if (value < 0) { decrementl(reg, -value); return; } 2381 if (value == 0) { ; return; } 2382 if (value == 1 && UseIncDec) { incl(reg) ; return; } 2383 /* else */ { addl(reg, value) ; return; } 2384 } 2385 2386 void MacroAssembler::incrementl(Address dst, int value) { 2387 if (value == min_jint) {addl(dst, value) ; return; } 2388 if (value < 0) { decrementl(dst, -value); return; } 2389 if (value == 0) { ; return; } 2390 if (value == 1 && UseIncDec) { incl(dst) ; return; } 2391 /* else */ { addl(dst, value) ; return; } 2392 } 2393 2394 void MacroAssembler::jump(AddressLiteral dst, Register rscratch) { 2395 assert(rscratch != noreg || always_reachable(dst), "missing"); 2396 2397 if (reachable(dst)) { 2398 jmp_literal(dst.target(), dst.rspec()); 2399 } else { 2400 lea(rscratch, dst); 2401 jmp(rscratch); 2402 } 2403 } 2404 2405 void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst, Register rscratch) { 2406 assert(rscratch != noreg || always_reachable(dst), "missing"); 2407 2408 if (reachable(dst)) { 2409 InstructionMark im(this); 2410 relocate(dst.reloc()); 2411 const int short_size = 2; 2412 const int long_size = 6; 2413 int offs = (intptr_t)dst.target() - ((intptr_t)pc()); 2414 if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) { 2415 // 0111 tttn #8-bit disp 2416 emit_int8(0x70 | cc); 2417 emit_int8((offs - short_size) & 0xFF); 2418 } else { 2419 // 0000 1111 1000 tttn #32-bit disp 2420 emit_int8(0x0F); 2421 emit_int8((unsigned char)(0x80 | cc)); 2422 emit_int32(offs - long_size); 2423 } 2424 } else { 2425 #ifdef ASSERT 2426 warning("reversing conditional branch"); 2427 #endif /* ASSERT */ 2428 Label skip; 2429 jccb(reverse[cc], skip); 2430 lea(rscratch, dst); 2431 Assembler::jmp(rscratch); 2432 bind(skip); 2433 } 2434 } 2435 2436 void MacroAssembler::ldmxcsr(AddressLiteral src, Register rscratch) { 2437 assert(rscratch != noreg || always_reachable(src), "missing"); 2438 2439 if (reachable(src)) { 2440 Assembler::ldmxcsr(as_Address(src)); 2441 } else { 2442 lea(rscratch, src); 2443 Assembler::ldmxcsr(Address(rscratch, 0)); 2444 } 2445 } 2446 2447 int MacroAssembler::load_signed_byte(Register dst, Address src) { 2448 int off; 2449 if (LP64_ONLY(true ||) VM_Version::is_P6()) { 2450 off = offset(); 2451 movsbl(dst, src); // movsxb 2452 } else { 2453 off = load_unsigned_byte(dst, src); 2454 shll(dst, 24); 2455 sarl(dst, 24); 2456 } 2457 return off; 2458 } 2459 2460 // Note: load_signed_short used to be called load_signed_word. 2461 // Although the 'w' in x86 opcodes refers to the term "word" in the assembler 2462 // manual, which means 16 bits, that usage is found nowhere in HotSpot code. 2463 // The term "word" in HotSpot means a 32- or 64-bit machine word. 2464 int MacroAssembler::load_signed_short(Register dst, Address src) { 2465 int off; 2466 if (LP64_ONLY(true ||) VM_Version::is_P6()) { 2467 // This is dubious to me since it seems safe to do a signed 16 => 64 bit 2468 // version but this is what 64bit has always done. This seems to imply 2469 // that users are only using 32bits worth. 2470 off = offset(); 2471 movswl(dst, src); // movsxw 2472 } else { 2473 off = load_unsigned_short(dst, src); 2474 shll(dst, 16); 2475 sarl(dst, 16); 2476 } 2477 return off; 2478 } 2479 2480 int MacroAssembler::load_unsigned_byte(Register dst, Address src) { 2481 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16, 2482 // and "3.9 Partial Register Penalties", p. 22). 2483 int off; 2484 if (LP64_ONLY(true || ) VM_Version::is_P6() || src.uses(dst)) { 2485 off = offset(); 2486 movzbl(dst, src); // movzxb 2487 } else { 2488 xorl(dst, dst); 2489 off = offset(); 2490 movb(dst, src); 2491 } 2492 return off; 2493 } 2494 2495 // Note: load_unsigned_short used to be called load_unsigned_word. 2496 int MacroAssembler::load_unsigned_short(Register dst, Address src) { 2497 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16, 2498 // and "3.9 Partial Register Penalties", p. 22). 2499 int off; 2500 if (LP64_ONLY(true ||) VM_Version::is_P6() || src.uses(dst)) { 2501 off = offset(); 2502 movzwl(dst, src); // movzxw 2503 } else { 2504 xorl(dst, dst); 2505 off = offset(); 2506 movw(dst, src); 2507 } 2508 return off; 2509 } 2510 2511 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) { 2512 switch (size_in_bytes) { 2513 #ifndef _LP64 2514 case 8: 2515 assert(dst2 != noreg, "second dest register required"); 2516 movl(dst, src); 2517 movl(dst2, src.plus_disp(BytesPerInt)); 2518 break; 2519 #else 2520 case 8: movq(dst, src); break; 2521 #endif 2522 case 4: movl(dst, src); break; 2523 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break; 2524 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break; 2525 default: ShouldNotReachHere(); 2526 } 2527 } 2528 2529 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) { 2530 switch (size_in_bytes) { 2531 #ifndef _LP64 2532 case 8: 2533 assert(src2 != noreg, "second source register required"); 2534 movl(dst, src); 2535 movl(dst.plus_disp(BytesPerInt), src2); 2536 break; 2537 #else 2538 case 8: movq(dst, src); break; 2539 #endif 2540 case 4: movl(dst, src); break; 2541 case 2: movw(dst, src); break; 2542 case 1: movb(dst, src); break; 2543 default: ShouldNotReachHere(); 2544 } 2545 } 2546 2547 void MacroAssembler::mov32(AddressLiteral dst, Register src, Register rscratch) { 2548 assert(rscratch != noreg || always_reachable(dst), "missing"); 2549 2550 if (reachable(dst)) { 2551 movl(as_Address(dst), src); 2552 } else { 2553 lea(rscratch, dst); 2554 movl(Address(rscratch, 0), src); 2555 } 2556 } 2557 2558 void MacroAssembler::mov32(Register dst, AddressLiteral src) { 2559 if (reachable(src)) { 2560 movl(dst, as_Address(src)); 2561 } else { 2562 lea(dst, src); 2563 movl(dst, Address(dst, 0)); 2564 } 2565 } 2566 2567 // C++ bool manipulation 2568 2569 void MacroAssembler::movbool(Register dst, Address src) { 2570 if(sizeof(bool) == 1) 2571 movb(dst, src); 2572 else if(sizeof(bool) == 2) 2573 movw(dst, src); 2574 else if(sizeof(bool) == 4) 2575 movl(dst, src); 2576 else 2577 // unsupported 2578 ShouldNotReachHere(); 2579 } 2580 2581 void MacroAssembler::movbool(Address dst, bool boolconst) { 2582 if(sizeof(bool) == 1) 2583 movb(dst, (int) boolconst); 2584 else if(sizeof(bool) == 2) 2585 movw(dst, (int) boolconst); 2586 else if(sizeof(bool) == 4) 2587 movl(dst, (int) boolconst); 2588 else 2589 // unsupported 2590 ShouldNotReachHere(); 2591 } 2592 2593 void MacroAssembler::movbool(Address dst, Register src) { 2594 if(sizeof(bool) == 1) 2595 movb(dst, src); 2596 else if(sizeof(bool) == 2) 2597 movw(dst, src); 2598 else if(sizeof(bool) == 4) 2599 movl(dst, src); 2600 else 2601 // unsupported 2602 ShouldNotReachHere(); 2603 } 2604 2605 void MacroAssembler::movdl(XMMRegister dst, AddressLiteral src, Register rscratch) { 2606 assert(rscratch != noreg || always_reachable(src), "missing"); 2607 2608 if (reachable(src)) { 2609 movdl(dst, as_Address(src)); 2610 } else { 2611 lea(rscratch, src); 2612 movdl(dst, Address(rscratch, 0)); 2613 } 2614 } 2615 2616 void MacroAssembler::movq(XMMRegister dst, AddressLiteral src, Register rscratch) { 2617 assert(rscratch != noreg || always_reachable(src), "missing"); 2618 2619 if (reachable(src)) { 2620 movq(dst, as_Address(src)); 2621 } else { 2622 lea(rscratch, src); 2623 movq(dst, Address(rscratch, 0)); 2624 } 2625 } 2626 2627 void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src, Register rscratch) { 2628 assert(rscratch != noreg || always_reachable(src), "missing"); 2629 2630 if (reachable(src)) { 2631 if (UseXmmLoadAndClearUpper) { 2632 movsd (dst, as_Address(src)); 2633 } else { 2634 movlpd(dst, as_Address(src)); 2635 } 2636 } else { 2637 lea(rscratch, src); 2638 if (UseXmmLoadAndClearUpper) { 2639 movsd (dst, Address(rscratch, 0)); 2640 } else { 2641 movlpd(dst, Address(rscratch, 0)); 2642 } 2643 } 2644 } 2645 2646 void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src, Register rscratch) { 2647 assert(rscratch != noreg || always_reachable(src), "missing"); 2648 2649 if (reachable(src)) { 2650 movss(dst, as_Address(src)); 2651 } else { 2652 lea(rscratch, src); 2653 movss(dst, Address(rscratch, 0)); 2654 } 2655 } 2656 2657 void MacroAssembler::movptr(Register dst, Register src) { 2658 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); 2659 } 2660 2661 void MacroAssembler::movptr(Register dst, Address src) { 2662 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); 2663 } 2664 2665 // src should NEVER be a real pointer. Use AddressLiteral for true pointers 2666 void MacroAssembler::movptr(Register dst, intptr_t src) { 2667 #ifdef _LP64 2668 if (is_simm32(src)) { 2669 movq(dst, checked_cast<int32_t>(src)); 2670 } else { 2671 mov64(dst, src); 2672 } 2673 #else 2674 movl(dst, src); 2675 #endif 2676 } 2677 2678 void MacroAssembler::movptr(Address dst, Register src) { 2679 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); 2680 } 2681 2682 void MacroAssembler::movptr(Address dst, int32_t src) { 2683 LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); 2684 } 2685 2686 void MacroAssembler::movdqu(Address dst, XMMRegister src) { 2687 assert(((src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2688 Assembler::movdqu(dst, src); 2689 } 2690 2691 void MacroAssembler::movdqu(XMMRegister dst, Address src) { 2692 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2693 Assembler::movdqu(dst, src); 2694 } 2695 2696 void MacroAssembler::movdqu(XMMRegister dst, XMMRegister src) { 2697 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2698 Assembler::movdqu(dst, src); 2699 } 2700 2701 void MacroAssembler::movdqu(XMMRegister dst, AddressLiteral src, Register rscratch) { 2702 assert(rscratch != noreg || always_reachable(src), "missing"); 2703 2704 if (reachable(src)) { 2705 movdqu(dst, as_Address(src)); 2706 } else { 2707 lea(rscratch, src); 2708 movdqu(dst, Address(rscratch, 0)); 2709 } 2710 } 2711 2712 void MacroAssembler::vmovdqu(Address dst, XMMRegister src) { 2713 assert(((src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2714 Assembler::vmovdqu(dst, src); 2715 } 2716 2717 void MacroAssembler::vmovdqu(XMMRegister dst, Address src) { 2718 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2719 Assembler::vmovdqu(dst, src); 2720 } 2721 2722 void MacroAssembler::vmovdqu(XMMRegister dst, XMMRegister src) { 2723 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2724 Assembler::vmovdqu(dst, src); 2725 } 2726 2727 void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src, Register rscratch) { 2728 assert(rscratch != noreg || always_reachable(src), "missing"); 2729 2730 if (reachable(src)) { 2731 vmovdqu(dst, as_Address(src)); 2732 } 2733 else { 2734 lea(rscratch, src); 2735 vmovdqu(dst, Address(rscratch, 0)); 2736 } 2737 } 2738 2739 void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2740 assert(rscratch != noreg || always_reachable(src), "missing"); 2741 2742 if (vector_len == AVX_512bit) { 2743 evmovdquq(dst, src, AVX_512bit, rscratch); 2744 } else if (vector_len == AVX_256bit) { 2745 vmovdqu(dst, src, rscratch); 2746 } else { 2747 movdqu(dst, src, rscratch); 2748 } 2749 } 2750 2751 void MacroAssembler::kmov(KRegister dst, Address src) { 2752 if (VM_Version::supports_avx512bw()) { 2753 kmovql(dst, src); 2754 } else { 2755 assert(VM_Version::supports_evex(), ""); 2756 kmovwl(dst, src); 2757 } 2758 } 2759 2760 void MacroAssembler::kmov(Address dst, KRegister src) { 2761 if (VM_Version::supports_avx512bw()) { 2762 kmovql(dst, src); 2763 } else { 2764 assert(VM_Version::supports_evex(), ""); 2765 kmovwl(dst, src); 2766 } 2767 } 2768 2769 void MacroAssembler::kmov(KRegister dst, KRegister src) { 2770 if (VM_Version::supports_avx512bw()) { 2771 kmovql(dst, src); 2772 } else { 2773 assert(VM_Version::supports_evex(), ""); 2774 kmovwl(dst, src); 2775 } 2776 } 2777 2778 void MacroAssembler::kmov(Register dst, KRegister src) { 2779 if (VM_Version::supports_avx512bw()) { 2780 kmovql(dst, src); 2781 } else { 2782 assert(VM_Version::supports_evex(), ""); 2783 kmovwl(dst, src); 2784 } 2785 } 2786 2787 void MacroAssembler::kmov(KRegister dst, Register src) { 2788 if (VM_Version::supports_avx512bw()) { 2789 kmovql(dst, src); 2790 } else { 2791 assert(VM_Version::supports_evex(), ""); 2792 kmovwl(dst, src); 2793 } 2794 } 2795 2796 void MacroAssembler::kmovql(KRegister dst, AddressLiteral src, Register rscratch) { 2797 assert(rscratch != noreg || always_reachable(src), "missing"); 2798 2799 if (reachable(src)) { 2800 kmovql(dst, as_Address(src)); 2801 } else { 2802 lea(rscratch, src); 2803 kmovql(dst, Address(rscratch, 0)); 2804 } 2805 } 2806 2807 void MacroAssembler::kmovwl(KRegister dst, AddressLiteral src, Register rscratch) { 2808 assert(rscratch != noreg || always_reachable(src), "missing"); 2809 2810 if (reachable(src)) { 2811 kmovwl(dst, as_Address(src)); 2812 } else { 2813 lea(rscratch, src); 2814 kmovwl(dst, Address(rscratch, 0)); 2815 } 2816 } 2817 2818 void MacroAssembler::evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, 2819 int vector_len, Register rscratch) { 2820 assert(rscratch != noreg || always_reachable(src), "missing"); 2821 2822 if (reachable(src)) { 2823 Assembler::evmovdqub(dst, mask, as_Address(src), merge, vector_len); 2824 } else { 2825 lea(rscratch, src); 2826 Assembler::evmovdqub(dst, mask, Address(rscratch, 0), merge, vector_len); 2827 } 2828 } 2829 2830 void MacroAssembler::evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, 2831 int vector_len, Register rscratch) { 2832 assert(rscratch != noreg || always_reachable(src), "missing"); 2833 2834 if (reachable(src)) { 2835 Assembler::evmovdquw(dst, mask, as_Address(src), merge, vector_len); 2836 } else { 2837 lea(rscratch, src); 2838 Assembler::evmovdquw(dst, mask, Address(rscratch, 0), merge, vector_len); 2839 } 2840 } 2841 2842 void MacroAssembler::evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) { 2843 assert(rscratch != noreg || always_reachable(src), "missing"); 2844 2845 if (reachable(src)) { 2846 Assembler::evmovdqul(dst, mask, as_Address(src), merge, vector_len); 2847 } else { 2848 lea(rscratch, src); 2849 Assembler::evmovdqul(dst, mask, Address(rscratch, 0), merge, vector_len); 2850 } 2851 } 2852 2853 void MacroAssembler::evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) { 2854 assert(rscratch != noreg || always_reachable(src), "missing"); 2855 2856 if (reachable(src)) { 2857 Assembler::evmovdquq(dst, mask, as_Address(src), merge, vector_len); 2858 } else { 2859 lea(rscratch, src); 2860 Assembler::evmovdquq(dst, mask, Address(rscratch, 0), merge, vector_len); 2861 } 2862 } 2863 2864 void MacroAssembler::evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2865 assert(rscratch != noreg || always_reachable(src), "missing"); 2866 2867 if (reachable(src)) { 2868 Assembler::evmovdquq(dst, as_Address(src), vector_len); 2869 } else { 2870 lea(rscratch, src); 2871 Assembler::evmovdquq(dst, Address(rscratch, 0), vector_len); 2872 } 2873 } 2874 2875 void MacroAssembler::movdqa(XMMRegister dst, AddressLiteral src, Register rscratch) { 2876 assert(rscratch != noreg || always_reachable(src), "missing"); 2877 2878 if (reachable(src)) { 2879 Assembler::movdqa(dst, as_Address(src)); 2880 } else { 2881 lea(rscratch, src); 2882 Assembler::movdqa(dst, Address(rscratch, 0)); 2883 } 2884 } 2885 2886 void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2887 assert(rscratch != noreg || always_reachable(src), "missing"); 2888 2889 if (reachable(src)) { 2890 Assembler::movsd(dst, as_Address(src)); 2891 } else { 2892 lea(rscratch, src); 2893 Assembler::movsd(dst, Address(rscratch, 0)); 2894 } 2895 } 2896 2897 void MacroAssembler::movss(XMMRegister dst, AddressLiteral src, Register rscratch) { 2898 assert(rscratch != noreg || always_reachable(src), "missing"); 2899 2900 if (reachable(src)) { 2901 Assembler::movss(dst, as_Address(src)); 2902 } else { 2903 lea(rscratch, src); 2904 Assembler::movss(dst, Address(rscratch, 0)); 2905 } 2906 } 2907 2908 void MacroAssembler::movddup(XMMRegister dst, AddressLiteral src, Register rscratch) { 2909 assert(rscratch != noreg || always_reachable(src), "missing"); 2910 2911 if (reachable(src)) { 2912 Assembler::movddup(dst, as_Address(src)); 2913 } else { 2914 lea(rscratch, src); 2915 Assembler::movddup(dst, Address(rscratch, 0)); 2916 } 2917 } 2918 2919 void MacroAssembler::vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2920 assert(rscratch != noreg || always_reachable(src), "missing"); 2921 2922 if (reachable(src)) { 2923 Assembler::vmovddup(dst, as_Address(src), vector_len); 2924 } else { 2925 lea(rscratch, src); 2926 Assembler::vmovddup(dst, Address(rscratch, 0), vector_len); 2927 } 2928 } 2929 2930 void MacroAssembler::mulsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2931 assert(rscratch != noreg || always_reachable(src), "missing"); 2932 2933 if (reachable(src)) { 2934 Assembler::mulsd(dst, as_Address(src)); 2935 } else { 2936 lea(rscratch, src); 2937 Assembler::mulsd(dst, Address(rscratch, 0)); 2938 } 2939 } 2940 2941 void MacroAssembler::mulss(XMMRegister dst, AddressLiteral src, Register rscratch) { 2942 assert(rscratch != noreg || always_reachable(src), "missing"); 2943 2944 if (reachable(src)) { 2945 Assembler::mulss(dst, as_Address(src)); 2946 } else { 2947 lea(rscratch, src); 2948 Assembler::mulss(dst, Address(rscratch, 0)); 2949 } 2950 } 2951 2952 void MacroAssembler::null_check(Register reg, int offset) { 2953 if (needs_explicit_null_check(offset)) { 2954 // provoke OS null exception if reg is null by 2955 // accessing M[reg] w/o changing any (non-CC) registers 2956 // NOTE: cmpl is plenty here to provoke a segv 2957 cmpptr(rax, Address(reg, 0)); 2958 // Note: should probably use testl(rax, Address(reg, 0)); 2959 // may be shorter code (however, this version of 2960 // testl needs to be implemented first) 2961 } else { 2962 // nothing to do, (later) access of M[reg + offset] 2963 // will provoke OS null exception if reg is null 2964 } 2965 } 2966 2967 void MacroAssembler::test_markword_is_inline_type(Register markword, Label& is_inline_type) { 2968 andptr(markword, markWord::inline_type_mask_in_place); 2969 cmpptr(markword, markWord::inline_type_pattern); 2970 jcc(Assembler::equal, is_inline_type); 2971 } 2972 2973 void MacroAssembler::test_klass_is_inline_type(Register klass, Register temp_reg, Label& is_inline_type) { 2974 movl(temp_reg, Address(klass, Klass::access_flags_offset())); 2975 testl(temp_reg, JVM_ACC_IDENTITY); 2976 jcc(Assembler::zero, is_inline_type); 2977 } 2978 2979 void MacroAssembler::test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type) { 2980 testptr(object, object); 2981 jcc(Assembler::zero, not_inline_type); 2982 const int is_inline_type_mask = markWord::inline_type_pattern; 2983 movptr(tmp, Address(object, oopDesc::mark_offset_in_bytes())); 2984 andptr(tmp, is_inline_type_mask); 2985 cmpptr(tmp, is_inline_type_mask); 2986 jcc(Assembler::notEqual, not_inline_type); 2987 } 2988 2989 void MacroAssembler::test_klass_is_empty_inline_type(Register klass, Register temp_reg, Label& is_empty_inline_type) { 2990 #ifdef ASSERT 2991 { 2992 Label done_check; 2993 test_klass_is_inline_type(klass, temp_reg, done_check); 2994 stop("test_klass_is_empty_inline_type with non inline type klass"); 2995 bind(done_check); 2996 } 2997 #endif 2998 movl(temp_reg, Address(klass, InstanceKlass::misc_flags_offset())); 2999 testl(temp_reg, InstanceKlassFlags::is_empty_inline_type_value()); 3000 jcc(Assembler::notZero, is_empty_inline_type); 3001 } 3002 3003 void MacroAssembler::test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free_inline_type) { 3004 movl(temp_reg, flags); 3005 testl(temp_reg, 1 << ResolvedFieldEntry::is_null_free_inline_type_shift); 3006 jcc(Assembler::notEqual, is_null_free_inline_type); 3007 } 3008 3009 void MacroAssembler::test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free_inline_type) { 3010 movl(temp_reg, flags); 3011 testl(temp_reg, 1 << ResolvedFieldEntry::is_null_free_inline_type_shift); 3012 jcc(Assembler::equal, not_null_free_inline_type); 3013 } 3014 3015 void MacroAssembler::test_field_is_flat(Register flags, Register temp_reg, Label& is_flat) { 3016 movl(temp_reg, flags); 3017 testl(temp_reg, 1 << ResolvedFieldEntry::is_flat_shift); 3018 jcc(Assembler::notEqual, is_flat); 3019 } 3020 3021 void MacroAssembler::test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label) { 3022 Label test_mark_word; 3023 // load mark word 3024 movptr(temp_reg, Address(oop, oopDesc::mark_offset_in_bytes())); 3025 // check displaced 3026 testl(temp_reg, markWord::unlocked_value); 3027 jccb(Assembler::notZero, test_mark_word); 3028 // slow path use klass prototype 3029 push(rscratch1); 3030 load_prototype_header(temp_reg, oop, rscratch1); 3031 pop(rscratch1); 3032 3033 bind(test_mark_word); 3034 testl(temp_reg, test_bit); 3035 jcc((jmp_set) ? Assembler::notZero : Assembler::zero, jmp_label); 3036 } 3037 3038 void MacroAssembler::test_flat_array_oop(Register oop, Register temp_reg, 3039 Label& is_flat_array) { 3040 #ifdef _LP64 3041 test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, true, is_flat_array); 3042 #else 3043 load_klass(temp_reg, oop, noreg); 3044 movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset())); 3045 test_flat_array_layout(temp_reg, is_flat_array); 3046 #endif 3047 } 3048 3049 void MacroAssembler::test_non_flat_array_oop(Register oop, Register temp_reg, 3050 Label& is_non_flat_array) { 3051 #ifdef _LP64 3052 test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, false, is_non_flat_array); 3053 #else 3054 load_klass(temp_reg, oop, noreg); 3055 movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset())); 3056 test_non_flat_array_layout(temp_reg, is_non_flat_array); 3057 #endif 3058 } 3059 3060 void MacroAssembler::test_null_free_array_oop(Register oop, Register temp_reg, Label&is_null_free_array) { 3061 #ifdef _LP64 3062 test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, true, is_null_free_array); 3063 #else 3064 load_klass(temp_reg, oop, noreg); 3065 movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset())); 3066 test_null_free_array_layout(temp_reg, is_null_free_array); 3067 #endif 3068 } 3069 3070 void MacroAssembler::test_non_null_free_array_oop(Register oop, Register temp_reg, Label&is_non_null_free_array) { 3071 #ifdef _LP64 3072 test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, false, is_non_null_free_array); 3073 #else 3074 load_klass(temp_reg, oop, noreg); 3075 movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset())); 3076 test_non_null_free_array_layout(temp_reg, is_non_null_free_array); 3077 #endif 3078 } 3079 3080 void MacroAssembler::test_flat_array_layout(Register lh, Label& is_flat_array) { 3081 testl(lh, Klass::_lh_array_tag_flat_value_bit_inplace); 3082 jcc(Assembler::notZero, is_flat_array); 3083 } 3084 3085 void MacroAssembler::test_non_flat_array_layout(Register lh, Label& is_non_flat_array) { 3086 testl(lh, Klass::_lh_array_tag_flat_value_bit_inplace); 3087 jcc(Assembler::zero, is_non_flat_array); 3088 } 3089 3090 void MacroAssembler::test_null_free_array_layout(Register lh, Label& is_null_free_array) { 3091 testl(lh, Klass::_lh_null_free_array_bit_inplace); 3092 jcc(Assembler::notZero, is_null_free_array); 3093 } 3094 3095 void MacroAssembler::test_non_null_free_array_layout(Register lh, Label& is_non_null_free_array) { 3096 testl(lh, Klass::_lh_null_free_array_bit_inplace); 3097 jcc(Assembler::zero, is_non_null_free_array); 3098 } 3099 3100 3101 void MacroAssembler::os_breakpoint() { 3102 // instead of directly emitting a breakpoint, call os:breakpoint for better debugability 3103 // (e.g., MSVC can't call ps() otherwise) 3104 call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint))); 3105 } 3106 3107 void MacroAssembler::unimplemented(const char* what) { 3108 const char* buf = nullptr; 3109 { 3110 ResourceMark rm; 3111 stringStream ss; 3112 ss.print("unimplemented: %s", what); 3113 buf = code_string(ss.as_string()); 3114 } 3115 stop(buf); 3116 } 3117 3118 #ifdef _LP64 3119 #define XSTATE_BV 0x200 3120 #endif 3121 3122 void MacroAssembler::pop_CPU_state() { 3123 pop_FPU_state(); 3124 pop_IU_state(); 3125 } 3126 3127 void MacroAssembler::pop_FPU_state() { 3128 #ifndef _LP64 3129 frstor(Address(rsp, 0)); 3130 #else 3131 fxrstor(Address(rsp, 0)); 3132 #endif 3133 addptr(rsp, FPUStateSizeInWords * wordSize); 3134 } 3135 3136 void MacroAssembler::pop_IU_state() { 3137 popa(); 3138 LP64_ONLY(addq(rsp, 8)); 3139 popf(); 3140 } 3141 3142 // Save Integer and Float state 3143 // Warning: Stack must be 16 byte aligned (64bit) 3144 void MacroAssembler::push_CPU_state() { 3145 push_IU_state(); 3146 push_FPU_state(); 3147 } 3148 3149 void MacroAssembler::push_FPU_state() { 3150 subptr(rsp, FPUStateSizeInWords * wordSize); 3151 #ifndef _LP64 3152 fnsave(Address(rsp, 0)); 3153 fwait(); 3154 #else 3155 fxsave(Address(rsp, 0)); 3156 #endif // LP64 3157 } 3158 3159 void MacroAssembler::push_IU_state() { 3160 // Push flags first because pusha kills them 3161 pushf(); 3162 // Make sure rsp stays 16-byte aligned 3163 LP64_ONLY(subq(rsp, 8)); 3164 pusha(); 3165 } 3166 3167 void MacroAssembler::push_cont_fastpath() { 3168 if (!Continuations::enabled()) return; 3169 3170 #ifndef _LP64 3171 Register rthread = rax; 3172 Register rrealsp = rbx; 3173 push(rthread); 3174 push(rrealsp); 3175 3176 get_thread(rthread); 3177 3178 // The code below wants the original RSP. 3179 // Move it back after the pushes above. 3180 movptr(rrealsp, rsp); 3181 addptr(rrealsp, 2*wordSize); 3182 #else 3183 Register rthread = r15_thread; 3184 Register rrealsp = rsp; 3185 #endif 3186 3187 Label done; 3188 cmpptr(rrealsp, Address(rthread, JavaThread::cont_fastpath_offset())); 3189 jccb(Assembler::belowEqual, done); 3190 movptr(Address(rthread, JavaThread::cont_fastpath_offset()), rrealsp); 3191 bind(done); 3192 3193 #ifndef _LP64 3194 pop(rrealsp); 3195 pop(rthread); 3196 #endif 3197 } 3198 3199 void MacroAssembler::pop_cont_fastpath() { 3200 if (!Continuations::enabled()) return; 3201 3202 #ifndef _LP64 3203 Register rthread = rax; 3204 Register rrealsp = rbx; 3205 push(rthread); 3206 push(rrealsp); 3207 3208 get_thread(rthread); 3209 3210 // The code below wants the original RSP. 3211 // Move it back after the pushes above. 3212 movptr(rrealsp, rsp); 3213 addptr(rrealsp, 2*wordSize); 3214 #else 3215 Register rthread = r15_thread; 3216 Register rrealsp = rsp; 3217 #endif 3218 3219 Label done; 3220 cmpptr(rrealsp, Address(rthread, JavaThread::cont_fastpath_offset())); 3221 jccb(Assembler::below, done); 3222 movptr(Address(rthread, JavaThread::cont_fastpath_offset()), 0); 3223 bind(done); 3224 3225 #ifndef _LP64 3226 pop(rrealsp); 3227 pop(rthread); 3228 #endif 3229 } 3230 3231 void MacroAssembler::inc_held_monitor_count() { 3232 #ifndef _LP64 3233 Register thread = rax; 3234 push(thread); 3235 get_thread(thread); 3236 incrementl(Address(thread, JavaThread::held_monitor_count_offset())); 3237 pop(thread); 3238 #else // LP64 3239 incrementq(Address(r15_thread, JavaThread::held_monitor_count_offset())); 3240 #endif 3241 } 3242 3243 void MacroAssembler::dec_held_monitor_count() { 3244 #ifndef _LP64 3245 Register thread = rax; 3246 push(thread); 3247 get_thread(thread); 3248 decrementl(Address(thread, JavaThread::held_monitor_count_offset())); 3249 pop(thread); 3250 #else // LP64 3251 decrementq(Address(r15_thread, JavaThread::held_monitor_count_offset())); 3252 #endif 3253 } 3254 3255 #ifdef ASSERT 3256 void MacroAssembler::stop_if_in_cont(Register cont, const char* name) { 3257 #ifdef _LP64 3258 Label no_cont; 3259 movptr(cont, Address(r15_thread, JavaThread::cont_entry_offset())); 3260 testl(cont, cont); 3261 jcc(Assembler::zero, no_cont); 3262 stop(name); 3263 bind(no_cont); 3264 #else 3265 Unimplemented(); 3266 #endif 3267 } 3268 #endif 3269 3270 void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp) { // determine java_thread register 3271 if (!java_thread->is_valid()) { 3272 java_thread = rdi; 3273 get_thread(java_thread); 3274 } 3275 // we must set sp to zero to clear frame 3276 movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), NULL_WORD); 3277 // must clear fp, so that compiled frames are not confused; it is 3278 // possible that we need it only for debugging 3279 if (clear_fp) { 3280 movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), NULL_WORD); 3281 } 3282 // Always clear the pc because it could have been set by make_walkable() 3283 movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), NULL_WORD); 3284 vzeroupper(); 3285 } 3286 3287 void MacroAssembler::restore_rax(Register tmp) { 3288 if (tmp == noreg) pop(rax); 3289 else if (tmp != rax) mov(rax, tmp); 3290 } 3291 3292 void MacroAssembler::round_to(Register reg, int modulus) { 3293 addptr(reg, modulus - 1); 3294 andptr(reg, -modulus); 3295 } 3296 3297 void MacroAssembler::save_rax(Register tmp) { 3298 if (tmp == noreg) push(rax); 3299 else if (tmp != rax) mov(tmp, rax); 3300 } 3301 3302 void MacroAssembler::safepoint_poll(Label& slow_path, Register thread_reg, bool at_return, bool in_nmethod) { 3303 if (at_return) { 3304 // Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore, 3305 // we may safely use rsp instead to perform the stack watermark check. 3306 cmpptr(in_nmethod ? rsp : rbp, Address(thread_reg, JavaThread::polling_word_offset())); 3307 jcc(Assembler::above, slow_path); 3308 return; 3309 } 3310 testb(Address(thread_reg, JavaThread::polling_word_offset()), SafepointMechanism::poll_bit()); 3311 jcc(Assembler::notZero, slow_path); // handshake bit set implies poll 3312 } 3313 3314 // Calls to C land 3315 // 3316 // When entering C land, the rbp, & rsp of the last Java frame have to be recorded 3317 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp 3318 // has to be reset to 0. This is required to allow proper stack traversal. 3319 void MacroAssembler::set_last_Java_frame(Register java_thread, 3320 Register last_java_sp, 3321 Register last_java_fp, 3322 address last_java_pc, 3323 Register rscratch) { 3324 vzeroupper(); 3325 // determine java_thread register 3326 if (!java_thread->is_valid()) { 3327 java_thread = rdi; 3328 get_thread(java_thread); 3329 } 3330 // determine last_java_sp register 3331 if (!last_java_sp->is_valid()) { 3332 last_java_sp = rsp; 3333 } 3334 // last_java_fp is optional 3335 if (last_java_fp->is_valid()) { 3336 movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), last_java_fp); 3337 } 3338 // last_java_pc is optional 3339 if (last_java_pc != nullptr) { 3340 Address java_pc(java_thread, 3341 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()); 3342 lea(java_pc, InternalAddress(last_java_pc), rscratch); 3343 } 3344 movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), last_java_sp); 3345 } 3346 3347 void MacroAssembler::shlptr(Register dst, int imm8) { 3348 LP64_ONLY(shlq(dst, imm8)) NOT_LP64(shll(dst, imm8)); 3349 } 3350 3351 void MacroAssembler::shrptr(Register dst, int imm8) { 3352 LP64_ONLY(shrq(dst, imm8)) NOT_LP64(shrl(dst, imm8)); 3353 } 3354 3355 void MacroAssembler::sign_extend_byte(Register reg) { 3356 if (LP64_ONLY(true ||) (VM_Version::is_P6() && reg->has_byte_register())) { 3357 movsbl(reg, reg); // movsxb 3358 } else { 3359 shll(reg, 24); 3360 sarl(reg, 24); 3361 } 3362 } 3363 3364 void MacroAssembler::sign_extend_short(Register reg) { 3365 if (LP64_ONLY(true ||) VM_Version::is_P6()) { 3366 movswl(reg, reg); // movsxw 3367 } else { 3368 shll(reg, 16); 3369 sarl(reg, 16); 3370 } 3371 } 3372 3373 void MacroAssembler::testl(Address dst, int32_t imm32) { 3374 if (imm32 >= 0 && is8bit(imm32)) { 3375 testb(dst, imm32); 3376 } else { 3377 Assembler::testl(dst, imm32); 3378 } 3379 } 3380 3381 void MacroAssembler::testl(Register dst, int32_t imm32) { 3382 if (imm32 >= 0 && is8bit(imm32) && dst->has_byte_register()) { 3383 testb(dst, imm32); 3384 } else { 3385 Assembler::testl(dst, imm32); 3386 } 3387 } 3388 3389 void MacroAssembler::testl(Register dst, AddressLiteral src) { 3390 assert(always_reachable(src), "Address should be reachable"); 3391 testl(dst, as_Address(src)); 3392 } 3393 3394 #ifdef _LP64 3395 3396 void MacroAssembler::testq(Address dst, int32_t imm32) { 3397 if (imm32 >= 0) { 3398 testl(dst, imm32); 3399 } else { 3400 Assembler::testq(dst, imm32); 3401 } 3402 } 3403 3404 void MacroAssembler::testq(Register dst, int32_t imm32) { 3405 if (imm32 >= 0) { 3406 testl(dst, imm32); 3407 } else { 3408 Assembler::testq(dst, imm32); 3409 } 3410 } 3411 3412 #endif 3413 3414 void MacroAssembler::pcmpeqb(XMMRegister dst, XMMRegister src) { 3415 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3416 Assembler::pcmpeqb(dst, src); 3417 } 3418 3419 void MacroAssembler::pcmpeqw(XMMRegister dst, XMMRegister src) { 3420 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3421 Assembler::pcmpeqw(dst, src); 3422 } 3423 3424 void MacroAssembler::pcmpestri(XMMRegister dst, Address src, int imm8) { 3425 assert((dst->encoding() < 16),"XMM register should be 0-15"); 3426 Assembler::pcmpestri(dst, src, imm8); 3427 } 3428 3429 void MacroAssembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) { 3430 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15"); 3431 Assembler::pcmpestri(dst, src, imm8); 3432 } 3433 3434 void MacroAssembler::pmovzxbw(XMMRegister dst, XMMRegister src) { 3435 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3436 Assembler::pmovzxbw(dst, src); 3437 } 3438 3439 void MacroAssembler::pmovzxbw(XMMRegister dst, Address src) { 3440 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3441 Assembler::pmovzxbw(dst, src); 3442 } 3443 3444 void MacroAssembler::pmovmskb(Register dst, XMMRegister src) { 3445 assert((src->encoding() < 16),"XMM register should be 0-15"); 3446 Assembler::pmovmskb(dst, src); 3447 } 3448 3449 void MacroAssembler::ptest(XMMRegister dst, XMMRegister src) { 3450 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15"); 3451 Assembler::ptest(dst, src); 3452 } 3453 3454 void MacroAssembler::sqrtss(XMMRegister dst, AddressLiteral src, Register rscratch) { 3455 assert(rscratch != noreg || always_reachable(src), "missing"); 3456 3457 if (reachable(src)) { 3458 Assembler::sqrtss(dst, as_Address(src)); 3459 } else { 3460 lea(rscratch, src); 3461 Assembler::sqrtss(dst, Address(rscratch, 0)); 3462 } 3463 } 3464 3465 void MacroAssembler::subsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 3466 assert(rscratch != noreg || always_reachable(src), "missing"); 3467 3468 if (reachable(src)) { 3469 Assembler::subsd(dst, as_Address(src)); 3470 } else { 3471 lea(rscratch, src); 3472 Assembler::subsd(dst, Address(rscratch, 0)); 3473 } 3474 } 3475 3476 void MacroAssembler::roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register rscratch) { 3477 assert(rscratch != noreg || always_reachable(src), "missing"); 3478 3479 if (reachable(src)) { 3480 Assembler::roundsd(dst, as_Address(src), rmode); 3481 } else { 3482 lea(rscratch, src); 3483 Assembler::roundsd(dst, Address(rscratch, 0), rmode); 3484 } 3485 } 3486 3487 void MacroAssembler::subss(XMMRegister dst, AddressLiteral src, Register rscratch) { 3488 assert(rscratch != noreg || always_reachable(src), "missing"); 3489 3490 if (reachable(src)) { 3491 Assembler::subss(dst, as_Address(src)); 3492 } else { 3493 lea(rscratch, src); 3494 Assembler::subss(dst, Address(rscratch, 0)); 3495 } 3496 } 3497 3498 void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch) { 3499 assert(rscratch != noreg || always_reachable(src), "missing"); 3500 3501 if (reachable(src)) { 3502 Assembler::ucomisd(dst, as_Address(src)); 3503 } else { 3504 lea(rscratch, src); 3505 Assembler::ucomisd(dst, Address(rscratch, 0)); 3506 } 3507 } 3508 3509 void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch) { 3510 assert(rscratch != noreg || always_reachable(src), "missing"); 3511 3512 if (reachable(src)) { 3513 Assembler::ucomiss(dst, as_Address(src)); 3514 } else { 3515 lea(rscratch, src); 3516 Assembler::ucomiss(dst, Address(rscratch, 0)); 3517 } 3518 } 3519 3520 void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 3521 assert(rscratch != noreg || always_reachable(src), "missing"); 3522 3523 // Used in sign-bit flipping with aligned address. 3524 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 3525 if (reachable(src)) { 3526 Assembler::xorpd(dst, as_Address(src)); 3527 } else { 3528 lea(rscratch, src); 3529 Assembler::xorpd(dst, Address(rscratch, 0)); 3530 } 3531 } 3532 3533 void MacroAssembler::xorpd(XMMRegister dst, XMMRegister src) { 3534 if (UseAVX > 2 && !VM_Version::supports_avx512dq() && (dst->encoding() == src->encoding())) { 3535 Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit); 3536 } 3537 else { 3538 Assembler::xorpd(dst, src); 3539 } 3540 } 3541 3542 void MacroAssembler::xorps(XMMRegister dst, XMMRegister src) { 3543 if (UseAVX > 2 && !VM_Version::supports_avx512dq() && (dst->encoding() == src->encoding())) { 3544 Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit); 3545 } else { 3546 Assembler::xorps(dst, src); 3547 } 3548 } 3549 3550 void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src, Register rscratch) { 3551 assert(rscratch != noreg || always_reachable(src), "missing"); 3552 3553 // Used in sign-bit flipping with aligned address. 3554 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 3555 if (reachable(src)) { 3556 Assembler::xorps(dst, as_Address(src)); 3557 } else { 3558 lea(rscratch, src); 3559 Assembler::xorps(dst, Address(rscratch, 0)); 3560 } 3561 } 3562 3563 void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src, Register rscratch) { 3564 assert(rscratch != noreg || always_reachable(src), "missing"); 3565 3566 // Used in sign-bit flipping with aligned address. 3567 bool aligned_adr = (((intptr_t)src.target() & 15) == 0); 3568 assert((UseAVX > 0) || aligned_adr, "SSE mode requires address alignment 16 bytes"); 3569 if (reachable(src)) { 3570 Assembler::pshufb(dst, as_Address(src)); 3571 } else { 3572 lea(rscratch, src); 3573 Assembler::pshufb(dst, Address(rscratch, 0)); 3574 } 3575 } 3576 3577 // AVX 3-operands instructions 3578 3579 void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3580 assert(rscratch != noreg || always_reachable(src), "missing"); 3581 3582 if (reachable(src)) { 3583 vaddsd(dst, nds, as_Address(src)); 3584 } else { 3585 lea(rscratch, src); 3586 vaddsd(dst, nds, Address(rscratch, 0)); 3587 } 3588 } 3589 3590 void MacroAssembler::vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3591 assert(rscratch != noreg || always_reachable(src), "missing"); 3592 3593 if (reachable(src)) { 3594 vaddss(dst, nds, as_Address(src)); 3595 } else { 3596 lea(rscratch, src); 3597 vaddss(dst, nds, Address(rscratch, 0)); 3598 } 3599 } 3600 3601 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3602 assert(UseAVX > 0, "requires some form of AVX"); 3603 assert(rscratch != noreg || always_reachable(src), "missing"); 3604 3605 if (reachable(src)) { 3606 Assembler::vpaddb(dst, nds, as_Address(src), vector_len); 3607 } else { 3608 lea(rscratch, src); 3609 Assembler::vpaddb(dst, nds, Address(rscratch, 0), vector_len); 3610 } 3611 } 3612 3613 void MacroAssembler::vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3614 assert(UseAVX > 0, "requires some form of AVX"); 3615 assert(rscratch != noreg || always_reachable(src), "missing"); 3616 3617 if (reachable(src)) { 3618 Assembler::vpaddd(dst, nds, as_Address(src), vector_len); 3619 } else { 3620 lea(rscratch, src); 3621 Assembler::vpaddd(dst, nds, Address(rscratch, 0), vector_len); 3622 } 3623 } 3624 3625 void MacroAssembler::vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch) { 3626 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 3627 assert(rscratch != noreg || always_reachable(negate_field), "missing"); 3628 3629 vandps(dst, nds, negate_field, vector_len, rscratch); 3630 } 3631 3632 void MacroAssembler::vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch) { 3633 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 3634 assert(rscratch != noreg || always_reachable(negate_field), "missing"); 3635 3636 vandpd(dst, nds, negate_field, vector_len, rscratch); 3637 } 3638 3639 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3640 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3641 Assembler::vpaddb(dst, nds, src, vector_len); 3642 } 3643 3644 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3645 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3646 Assembler::vpaddb(dst, nds, src, vector_len); 3647 } 3648 3649 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3650 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3651 Assembler::vpaddw(dst, nds, src, vector_len); 3652 } 3653 3654 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3655 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3656 Assembler::vpaddw(dst, nds, src, vector_len); 3657 } 3658 3659 void MacroAssembler::vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3660 assert(rscratch != noreg || always_reachable(src), "missing"); 3661 3662 if (reachable(src)) { 3663 Assembler::vpand(dst, nds, as_Address(src), vector_len); 3664 } else { 3665 lea(rscratch, src); 3666 Assembler::vpand(dst, nds, Address(rscratch, 0), vector_len); 3667 } 3668 } 3669 3670 void MacroAssembler::vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 3671 assert(rscratch != noreg || always_reachable(src), "missing"); 3672 3673 if (reachable(src)) { 3674 Assembler::vpbroadcastd(dst, as_Address(src), vector_len); 3675 } else { 3676 lea(rscratch, src); 3677 Assembler::vpbroadcastd(dst, Address(rscratch, 0), vector_len); 3678 } 3679 } 3680 3681 void MacroAssembler::vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 3682 assert(rscratch != noreg || always_reachable(src), "missing"); 3683 3684 if (reachable(src)) { 3685 Assembler::vpbroadcastq(dst, as_Address(src), vector_len); 3686 } else { 3687 lea(rscratch, src); 3688 Assembler::vpbroadcastq(dst, Address(rscratch, 0), vector_len); 3689 } 3690 } 3691 3692 void MacroAssembler::vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 3693 assert(rscratch != noreg || always_reachable(src), "missing"); 3694 3695 if (reachable(src)) { 3696 Assembler::vbroadcastsd(dst, as_Address(src), vector_len); 3697 } else { 3698 lea(rscratch, src); 3699 Assembler::vbroadcastsd(dst, Address(rscratch, 0), vector_len); 3700 } 3701 } 3702 3703 void MacroAssembler::vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 3704 assert(rscratch != noreg || always_reachable(src), "missing"); 3705 3706 if (reachable(src)) { 3707 Assembler::vbroadcastss(dst, as_Address(src), vector_len); 3708 } else { 3709 lea(rscratch, src); 3710 Assembler::vbroadcastss(dst, Address(rscratch, 0), vector_len); 3711 } 3712 } 3713 3714 void MacroAssembler::vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3715 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3716 Assembler::vpcmpeqb(dst, nds, src, vector_len); 3717 } 3718 3719 void MacroAssembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3720 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3721 Assembler::vpcmpeqw(dst, nds, src, vector_len); 3722 } 3723 3724 void MacroAssembler::evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3725 assert(rscratch != noreg || always_reachable(src), "missing"); 3726 3727 if (reachable(src)) { 3728 Assembler::evpcmpeqd(kdst, mask, nds, as_Address(src), vector_len); 3729 } else { 3730 lea(rscratch, src); 3731 Assembler::evpcmpeqd(kdst, mask, nds, Address(rscratch, 0), vector_len); 3732 } 3733 } 3734 3735 void MacroAssembler::evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 3736 int comparison, bool is_signed, int vector_len, Register rscratch) { 3737 assert(rscratch != noreg || always_reachable(src), "missing"); 3738 3739 if (reachable(src)) { 3740 Assembler::evpcmpd(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 3741 } else { 3742 lea(rscratch, src); 3743 Assembler::evpcmpd(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 3744 } 3745 } 3746 3747 void MacroAssembler::evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 3748 int comparison, bool is_signed, int vector_len, Register rscratch) { 3749 assert(rscratch != noreg || always_reachable(src), "missing"); 3750 3751 if (reachable(src)) { 3752 Assembler::evpcmpq(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 3753 } else { 3754 lea(rscratch, src); 3755 Assembler::evpcmpq(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 3756 } 3757 } 3758 3759 void MacroAssembler::evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 3760 int comparison, bool is_signed, int vector_len, Register rscratch) { 3761 assert(rscratch != noreg || always_reachable(src), "missing"); 3762 3763 if (reachable(src)) { 3764 Assembler::evpcmpb(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 3765 } else { 3766 lea(rscratch, src); 3767 Assembler::evpcmpb(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 3768 } 3769 } 3770 3771 void MacroAssembler::evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 3772 int comparison, bool is_signed, int vector_len, Register rscratch) { 3773 assert(rscratch != noreg || always_reachable(src), "missing"); 3774 3775 if (reachable(src)) { 3776 Assembler::evpcmpw(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 3777 } else { 3778 lea(rscratch, src); 3779 Assembler::evpcmpw(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 3780 } 3781 } 3782 3783 void MacroAssembler::vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len) { 3784 if (width == Assembler::Q) { 3785 Assembler::vpcmpCCq(dst, nds, src, cond_encoding, vector_len); 3786 } else { 3787 Assembler::vpcmpCCbwd(dst, nds, src, cond_encoding, vector_len); 3788 } 3789 } 3790 3791 void MacroAssembler::vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister xtmp, ComparisonPredicate cond, Width width, int vector_len) { 3792 int eq_cond_enc = 0x29; 3793 int gt_cond_enc = 0x37; 3794 if (width != Assembler::Q) { 3795 eq_cond_enc = 0x74 + width; 3796 gt_cond_enc = 0x64 + width; 3797 } 3798 switch (cond) { 3799 case eq: 3800 vpcmpCC(dst, nds, src, eq_cond_enc, width, vector_len); 3801 break; 3802 case neq: 3803 vpcmpCC(dst, nds, src, eq_cond_enc, width, vector_len); 3804 vallones(xtmp, vector_len); 3805 vpxor(dst, xtmp, dst, vector_len); 3806 break; 3807 case le: 3808 vpcmpCC(dst, nds, src, gt_cond_enc, width, vector_len); 3809 vallones(xtmp, vector_len); 3810 vpxor(dst, xtmp, dst, vector_len); 3811 break; 3812 case nlt: 3813 vpcmpCC(dst, src, nds, gt_cond_enc, width, vector_len); 3814 vallones(xtmp, vector_len); 3815 vpxor(dst, xtmp, dst, vector_len); 3816 break; 3817 case lt: 3818 vpcmpCC(dst, src, nds, gt_cond_enc, width, vector_len); 3819 break; 3820 case nle: 3821 vpcmpCC(dst, nds, src, gt_cond_enc, width, vector_len); 3822 break; 3823 default: 3824 assert(false, "Should not reach here"); 3825 } 3826 } 3827 3828 void MacroAssembler::vpmovzxbw(XMMRegister dst, Address src, int vector_len) { 3829 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3830 Assembler::vpmovzxbw(dst, src, vector_len); 3831 } 3832 3833 void MacroAssembler::vpmovmskb(Register dst, XMMRegister src, int vector_len) { 3834 assert((src->encoding() < 16),"XMM register should be 0-15"); 3835 Assembler::vpmovmskb(dst, src, vector_len); 3836 } 3837 3838 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3839 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3840 Assembler::vpmullw(dst, nds, src, vector_len); 3841 } 3842 3843 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3844 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3845 Assembler::vpmullw(dst, nds, src, vector_len); 3846 } 3847 3848 void MacroAssembler::vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3849 assert((UseAVX > 0), "AVX support is needed"); 3850 assert(rscratch != noreg || always_reachable(src), "missing"); 3851 3852 if (reachable(src)) { 3853 Assembler::vpmulld(dst, nds, as_Address(src), vector_len); 3854 } else { 3855 lea(rscratch, src); 3856 Assembler::vpmulld(dst, nds, Address(rscratch, 0), vector_len); 3857 } 3858 } 3859 3860 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3861 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3862 Assembler::vpsubb(dst, nds, src, vector_len); 3863 } 3864 3865 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3866 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3867 Assembler::vpsubb(dst, nds, src, vector_len); 3868 } 3869 3870 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3871 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3872 Assembler::vpsubw(dst, nds, src, vector_len); 3873 } 3874 3875 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3876 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3877 Assembler::vpsubw(dst, nds, src, vector_len); 3878 } 3879 3880 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3881 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3882 Assembler::vpsraw(dst, nds, shift, vector_len); 3883 } 3884 3885 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3886 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3887 Assembler::vpsraw(dst, nds, shift, vector_len); 3888 } 3889 3890 void MacroAssembler::evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3891 assert(UseAVX > 2,""); 3892 if (!VM_Version::supports_avx512vl() && vector_len < 2) { 3893 vector_len = 2; 3894 } 3895 Assembler::evpsraq(dst, nds, shift, vector_len); 3896 } 3897 3898 void MacroAssembler::evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3899 assert(UseAVX > 2,""); 3900 if (!VM_Version::supports_avx512vl() && vector_len < 2) { 3901 vector_len = 2; 3902 } 3903 Assembler::evpsraq(dst, nds, shift, vector_len); 3904 } 3905 3906 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3907 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3908 Assembler::vpsrlw(dst, nds, shift, vector_len); 3909 } 3910 3911 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3912 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3913 Assembler::vpsrlw(dst, nds, shift, vector_len); 3914 } 3915 3916 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3917 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3918 Assembler::vpsllw(dst, nds, shift, vector_len); 3919 } 3920 3921 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3922 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3923 Assembler::vpsllw(dst, nds, shift, vector_len); 3924 } 3925 3926 void MacroAssembler::vptest(XMMRegister dst, XMMRegister src) { 3927 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15"); 3928 Assembler::vptest(dst, src); 3929 } 3930 3931 void MacroAssembler::punpcklbw(XMMRegister dst, XMMRegister src) { 3932 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3933 Assembler::punpcklbw(dst, src); 3934 } 3935 3936 void MacroAssembler::pshufd(XMMRegister dst, Address src, int mode) { 3937 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 3938 Assembler::pshufd(dst, src, mode); 3939 } 3940 3941 void MacroAssembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) { 3942 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3943 Assembler::pshuflw(dst, src, mode); 3944 } 3945 3946 void MacroAssembler::vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3947 assert(rscratch != noreg || always_reachable(src), "missing"); 3948 3949 if (reachable(src)) { 3950 vandpd(dst, nds, as_Address(src), vector_len); 3951 } else { 3952 lea(rscratch, src); 3953 vandpd(dst, nds, Address(rscratch, 0), vector_len); 3954 } 3955 } 3956 3957 void MacroAssembler::vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3958 assert(rscratch != noreg || always_reachable(src), "missing"); 3959 3960 if (reachable(src)) { 3961 vandps(dst, nds, as_Address(src), vector_len); 3962 } else { 3963 lea(rscratch, src); 3964 vandps(dst, nds, Address(rscratch, 0), vector_len); 3965 } 3966 } 3967 3968 void MacroAssembler::evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, 3969 bool merge, int vector_len, Register rscratch) { 3970 assert(rscratch != noreg || always_reachable(src), "missing"); 3971 3972 if (reachable(src)) { 3973 Assembler::evpord(dst, mask, nds, as_Address(src), merge, vector_len); 3974 } else { 3975 lea(rscratch, src); 3976 Assembler::evpord(dst, mask, nds, Address(rscratch, 0), merge, vector_len); 3977 } 3978 } 3979 3980 void MacroAssembler::vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3981 assert(rscratch != noreg || always_reachable(src), "missing"); 3982 3983 if (reachable(src)) { 3984 vdivsd(dst, nds, as_Address(src)); 3985 } else { 3986 lea(rscratch, src); 3987 vdivsd(dst, nds, Address(rscratch, 0)); 3988 } 3989 } 3990 3991 void MacroAssembler::vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3992 assert(rscratch != noreg || always_reachable(src), "missing"); 3993 3994 if (reachable(src)) { 3995 vdivss(dst, nds, as_Address(src)); 3996 } else { 3997 lea(rscratch, src); 3998 vdivss(dst, nds, Address(rscratch, 0)); 3999 } 4000 } 4001 4002 void MacroAssembler::vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 4003 assert(rscratch != noreg || always_reachable(src), "missing"); 4004 4005 if (reachable(src)) { 4006 vmulsd(dst, nds, as_Address(src)); 4007 } else { 4008 lea(rscratch, src); 4009 vmulsd(dst, nds, Address(rscratch, 0)); 4010 } 4011 } 4012 4013 void MacroAssembler::vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 4014 assert(rscratch != noreg || always_reachable(src), "missing"); 4015 4016 if (reachable(src)) { 4017 vmulss(dst, nds, as_Address(src)); 4018 } else { 4019 lea(rscratch, src); 4020 vmulss(dst, nds, Address(rscratch, 0)); 4021 } 4022 } 4023 4024 void MacroAssembler::vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 4025 assert(rscratch != noreg || always_reachable(src), "missing"); 4026 4027 if (reachable(src)) { 4028 vsubsd(dst, nds, as_Address(src)); 4029 } else { 4030 lea(rscratch, src); 4031 vsubsd(dst, nds, Address(rscratch, 0)); 4032 } 4033 } 4034 4035 void MacroAssembler::vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 4036 assert(rscratch != noreg || always_reachable(src), "missing"); 4037 4038 if (reachable(src)) { 4039 vsubss(dst, nds, as_Address(src)); 4040 } else { 4041 lea(rscratch, src); 4042 vsubss(dst, nds, Address(rscratch, 0)); 4043 } 4044 } 4045 4046 void MacroAssembler::vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 4047 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 4048 assert(rscratch != noreg || always_reachable(src), "missing"); 4049 4050 vxorps(dst, nds, src, Assembler::AVX_128bit, rscratch); 4051 } 4052 4053 void MacroAssembler::vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 4054 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 4055 assert(rscratch != noreg || always_reachable(src), "missing"); 4056 4057 vxorpd(dst, nds, src, Assembler::AVX_128bit, rscratch); 4058 } 4059 4060 void MacroAssembler::vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 4061 assert(rscratch != noreg || always_reachable(src), "missing"); 4062 4063 if (reachable(src)) { 4064 vxorpd(dst, nds, as_Address(src), vector_len); 4065 } else { 4066 lea(rscratch, src); 4067 vxorpd(dst, nds, Address(rscratch, 0), vector_len); 4068 } 4069 } 4070 4071 void MacroAssembler::vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 4072 assert(rscratch != noreg || always_reachable(src), "missing"); 4073 4074 if (reachable(src)) { 4075 vxorps(dst, nds, as_Address(src), vector_len); 4076 } else { 4077 lea(rscratch, src); 4078 vxorps(dst, nds, Address(rscratch, 0), vector_len); 4079 } 4080 } 4081 4082 void MacroAssembler::vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 4083 assert(rscratch != noreg || always_reachable(src), "missing"); 4084 4085 if (UseAVX > 1 || (vector_len < 1)) { 4086 if (reachable(src)) { 4087 Assembler::vpxor(dst, nds, as_Address(src), vector_len); 4088 } else { 4089 lea(rscratch, src); 4090 Assembler::vpxor(dst, nds, Address(rscratch, 0), vector_len); 4091 } 4092 } else { 4093 MacroAssembler::vxorpd(dst, nds, src, vector_len, rscratch); 4094 } 4095 } 4096 4097 void MacroAssembler::vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 4098 assert(rscratch != noreg || always_reachable(src), "missing"); 4099 4100 if (reachable(src)) { 4101 Assembler::vpermd(dst, nds, as_Address(src), vector_len); 4102 } else { 4103 lea(rscratch, src); 4104 Assembler::vpermd(dst, nds, Address(rscratch, 0), vector_len); 4105 } 4106 } 4107 4108 void MacroAssembler::clear_jobject_tag(Register possibly_non_local) { 4109 const int32_t inverted_mask = ~static_cast<int32_t>(JNIHandles::tag_mask); 4110 STATIC_ASSERT(inverted_mask == -4); // otherwise check this code 4111 // The inverted mask is sign-extended 4112 andptr(possibly_non_local, inverted_mask); 4113 } 4114 4115 void MacroAssembler::resolve_jobject(Register value, 4116 Register thread, 4117 Register tmp) { 4118 assert_different_registers(value, thread, tmp); 4119 Label done, tagged, weak_tagged; 4120 testptr(value, value); 4121 jcc(Assembler::zero, done); // Use null as-is. 4122 testptr(value, JNIHandles::tag_mask); // Test for tag. 4123 jcc(Assembler::notZero, tagged); 4124 4125 // Resolve local handle 4126 access_load_at(T_OBJECT, IN_NATIVE | AS_RAW, value, Address(value, 0), tmp, thread); 4127 verify_oop(value); 4128 jmp(done); 4129 4130 bind(tagged); 4131 testptr(value, JNIHandles::TypeTag::weak_global); // Test for weak tag. 4132 jcc(Assembler::notZero, weak_tagged); 4133 4134 // Resolve global handle 4135 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp, thread); 4136 verify_oop(value); 4137 jmp(done); 4138 4139 bind(weak_tagged); 4140 // Resolve jweak. 4141 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 4142 value, Address(value, -JNIHandles::TypeTag::weak_global), tmp, thread); 4143 verify_oop(value); 4144 4145 bind(done); 4146 } 4147 4148 void MacroAssembler::resolve_global_jobject(Register value, 4149 Register thread, 4150 Register tmp) { 4151 assert_different_registers(value, thread, tmp); 4152 Label done; 4153 4154 testptr(value, value); 4155 jcc(Assembler::zero, done); // Use null as-is. 4156 4157 #ifdef ASSERT 4158 { 4159 Label valid_global_tag; 4160 testptr(value, JNIHandles::TypeTag::global); // Test for global tag. 4161 jcc(Assembler::notZero, valid_global_tag); 4162 stop("non global jobject using resolve_global_jobject"); 4163 bind(valid_global_tag); 4164 } 4165 #endif 4166 4167 // Resolve global handle 4168 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp, thread); 4169 verify_oop(value); 4170 4171 bind(done); 4172 } 4173 4174 void MacroAssembler::subptr(Register dst, int32_t imm32) { 4175 LP64_ONLY(subq(dst, imm32)) NOT_LP64(subl(dst, imm32)); 4176 } 4177 4178 // Force generation of a 4 byte immediate value even if it fits into 8bit 4179 void MacroAssembler::subptr_imm32(Register dst, int32_t imm32) { 4180 LP64_ONLY(subq_imm32(dst, imm32)) NOT_LP64(subl_imm32(dst, imm32)); 4181 } 4182 4183 void MacroAssembler::subptr(Register dst, Register src) { 4184 LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); 4185 } 4186 4187 // C++ bool manipulation 4188 void MacroAssembler::testbool(Register dst) { 4189 if(sizeof(bool) == 1) 4190 testb(dst, 0xff); 4191 else if(sizeof(bool) == 2) { 4192 // testw implementation needed for two byte bools 4193 ShouldNotReachHere(); 4194 } else if(sizeof(bool) == 4) 4195 testl(dst, dst); 4196 else 4197 // unsupported 4198 ShouldNotReachHere(); 4199 } 4200 4201 void MacroAssembler::testptr(Register dst, Register src) { 4202 LP64_ONLY(testq(dst, src)) NOT_LP64(testl(dst, src)); 4203 } 4204 4205 // Object / value buffer allocation... 4206 // 4207 // Kills klass and rsi on LP64 4208 void MacroAssembler::allocate_instance(Register klass, Register new_obj, 4209 Register t1, Register t2, 4210 bool clear_fields, Label& alloc_failed) 4211 { 4212 Label done, initialize_header, initialize_object, slow_case, slow_case_no_pop; 4213 Register layout_size = t1; 4214 assert(new_obj == rax, "needs to be rax"); 4215 assert_different_registers(klass, new_obj, t1, t2); 4216 4217 // get instance_size in InstanceKlass (scaled to a count of bytes) 4218 movl(layout_size, Address(klass, Klass::layout_helper_offset())); 4219 // test to see if it has a finalizer or is malformed in some way 4220 testl(layout_size, Klass::_lh_instance_slow_path_bit); 4221 jcc(Assembler::notZero, slow_case_no_pop); 4222 4223 // Allocate the instance: 4224 // If TLAB is enabled: 4225 // Try to allocate in the TLAB. 4226 // If fails, go to the slow path. 4227 // Else If inline contiguous allocations are enabled: 4228 // Try to allocate in eden. 4229 // If fails due to heap end, go to slow path. 4230 // 4231 // If TLAB is enabled OR inline contiguous is enabled: 4232 // Initialize the allocation. 4233 // Exit. 4234 // 4235 // Go to slow path. 4236 4237 push(klass); 4238 const Register thread = LP64_ONLY(r15_thread) NOT_LP64(klass); 4239 #ifndef _LP64 4240 if (UseTLAB) { 4241 get_thread(thread); 4242 } 4243 #endif // _LP64 4244 4245 if (UseTLAB) { 4246 tlab_allocate(thread, new_obj, layout_size, 0, klass, t2, slow_case); 4247 if (ZeroTLAB || (!clear_fields)) { 4248 // the fields have been already cleared 4249 jmp(initialize_header); 4250 } else { 4251 // initialize both the header and fields 4252 jmp(initialize_object); 4253 } 4254 } else { 4255 jmp(slow_case); 4256 } 4257 4258 // If UseTLAB is true, the object is created above and there is an initialize need. 4259 // Otherwise, skip and go to the slow path. 4260 if (UseTLAB) { 4261 if (clear_fields) { 4262 // The object is initialized before the header. If the object size is 4263 // zero, go directly to the header initialization. 4264 bind(initialize_object); 4265 decrement(layout_size, sizeof(oopDesc)); 4266 jcc(Assembler::zero, initialize_header); 4267 4268 // Initialize topmost object field, divide size by 8, check if odd and 4269 // test if zero. 4270 Register zero = klass; 4271 xorl(zero, zero); // use zero reg to clear memory (shorter code) 4272 shrl(layout_size, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd 4273 4274 #ifdef ASSERT 4275 // make sure instance_size was multiple of 8 4276 Label L; 4277 // Ignore partial flag stall after shrl() since it is debug VM 4278 jcc(Assembler::carryClear, L); 4279 stop("object size is not multiple of 2 - adjust this code"); 4280 bind(L); 4281 // must be > 0, no extra check needed here 4282 #endif 4283 4284 // initialize remaining object fields: instance_size was a multiple of 8 4285 { 4286 Label loop; 4287 bind(loop); 4288 movptr(Address(new_obj, layout_size, Address::times_8, sizeof(oopDesc) - 1*oopSize), zero); 4289 NOT_LP64(movptr(Address(new_obj, layout_size, Address::times_8, sizeof(oopDesc) - 2*oopSize), zero)); 4290 decrement(layout_size); 4291 jcc(Assembler::notZero, loop); 4292 } 4293 } // clear_fields 4294 4295 // initialize object header only. 4296 bind(initialize_header); 4297 pop(klass); 4298 Register mark_word = t2; 4299 movptr(mark_word, Address(klass, Klass::prototype_header_offset())); 4300 movptr(Address(new_obj, oopDesc::mark_offset_in_bytes ()), mark_word); 4301 #ifdef _LP64 4302 xorl(rsi, rsi); // use zero reg to clear memory (shorter code) 4303 store_klass_gap(new_obj, rsi); // zero klass gap for compressed oops 4304 #endif 4305 movptr(t2, klass); // preserve klass 4306 store_klass(new_obj, t2, rscratch1); // src klass reg is potentially compressed 4307 4308 jmp(done); 4309 } 4310 4311 bind(slow_case); 4312 pop(klass); 4313 bind(slow_case_no_pop); 4314 jmp(alloc_failed); 4315 4316 bind(done); 4317 } 4318 4319 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. 4320 void MacroAssembler::tlab_allocate(Register thread, Register obj, 4321 Register var_size_in_bytes, 4322 int con_size_in_bytes, 4323 Register t1, 4324 Register t2, 4325 Label& slow_case) { 4326 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 4327 bs->tlab_allocate(this, thread, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case); 4328 } 4329 4330 RegSet MacroAssembler::call_clobbered_gp_registers() { 4331 RegSet regs; 4332 #ifdef _LP64 4333 regs += RegSet::of(rax, rcx, rdx); 4334 #ifndef WINDOWS 4335 regs += RegSet::of(rsi, rdi); 4336 #endif 4337 regs += RegSet::range(r8, r11); 4338 #else 4339 regs += RegSet::of(rax, rcx, rdx); 4340 #endif 4341 return regs; 4342 } 4343 4344 XMMRegSet MacroAssembler::call_clobbered_xmm_registers() { 4345 int num_xmm_registers = XMMRegister::available_xmm_registers(); 4346 #if defined(WINDOWS) && defined(_LP64) 4347 XMMRegSet result = XMMRegSet::range(xmm0, xmm5); 4348 if (num_xmm_registers > 16) { 4349 result += XMMRegSet::range(xmm16, as_XMMRegister(num_xmm_registers - 1)); 4350 } 4351 return result; 4352 #else 4353 return XMMRegSet::range(xmm0, as_XMMRegister(num_xmm_registers - 1)); 4354 #endif 4355 } 4356 4357 static int FPUSaveAreaSize = align_up(108, StackAlignmentInBytes); // 108 bytes needed for FPU state by fsave/frstor 4358 4359 #ifndef _LP64 4360 static bool use_x87_registers() { return UseSSE < 2; } 4361 #endif 4362 static bool use_xmm_registers() { return UseSSE >= 1; } 4363 4364 // C1 only ever uses the first double/float of the XMM register. 4365 static int xmm_save_size() { return UseSSE >= 2 ? sizeof(double) : sizeof(float); } 4366 4367 static void save_xmm_register(MacroAssembler* masm, int offset, XMMRegister reg) { 4368 if (UseSSE == 1) { 4369 masm->movflt(Address(rsp, offset), reg); 4370 } else { 4371 masm->movdbl(Address(rsp, offset), reg); 4372 } 4373 } 4374 4375 static void restore_xmm_register(MacroAssembler* masm, int offset, XMMRegister reg) { 4376 if (UseSSE == 1) { 4377 masm->movflt(reg, Address(rsp, offset)); 4378 } else { 4379 masm->movdbl(reg, Address(rsp, offset)); 4380 } 4381 } 4382 4383 int register_section_sizes(RegSet gp_registers, XMMRegSet xmm_registers, bool save_fpu, 4384 int& gp_area_size, int& fp_area_size, int& xmm_area_size) { 4385 4386 gp_area_size = align_up(gp_registers.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size, 4387 StackAlignmentInBytes); 4388 #ifdef _LP64 4389 fp_area_size = 0; 4390 #else 4391 fp_area_size = (save_fpu && use_x87_registers()) ? FPUSaveAreaSize : 0; 4392 #endif 4393 xmm_area_size = (save_fpu && use_xmm_registers()) ? xmm_registers.size() * xmm_save_size() : 0; 4394 4395 return gp_area_size + fp_area_size + xmm_area_size; 4396 } 4397 4398 void MacroAssembler::push_call_clobbered_registers_except(RegSet exclude, bool save_fpu) { 4399 block_comment("push_call_clobbered_registers start"); 4400 // Regular registers 4401 RegSet gp_registers_to_push = call_clobbered_gp_registers() - exclude; 4402 4403 int gp_area_size; 4404 int fp_area_size; 4405 int xmm_area_size; 4406 int total_save_size = register_section_sizes(gp_registers_to_push, call_clobbered_xmm_registers(), save_fpu, 4407 gp_area_size, fp_area_size, xmm_area_size); 4408 subptr(rsp, total_save_size); 4409 4410 push_set(gp_registers_to_push, 0); 4411 4412 #ifndef _LP64 4413 if (save_fpu && use_x87_registers()) { 4414 fnsave(Address(rsp, gp_area_size)); 4415 fwait(); 4416 } 4417 #endif 4418 if (save_fpu && use_xmm_registers()) { 4419 push_set(call_clobbered_xmm_registers(), gp_area_size + fp_area_size); 4420 } 4421 4422 block_comment("push_call_clobbered_registers end"); 4423 } 4424 4425 void MacroAssembler::pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu) { 4426 block_comment("pop_call_clobbered_registers start"); 4427 4428 RegSet gp_registers_to_pop = call_clobbered_gp_registers() - exclude; 4429 4430 int gp_area_size; 4431 int fp_area_size; 4432 int xmm_area_size; 4433 int total_save_size = register_section_sizes(gp_registers_to_pop, call_clobbered_xmm_registers(), restore_fpu, 4434 gp_area_size, fp_area_size, xmm_area_size); 4435 4436 if (restore_fpu && use_xmm_registers()) { 4437 pop_set(call_clobbered_xmm_registers(), gp_area_size + fp_area_size); 4438 } 4439 #ifndef _LP64 4440 if (restore_fpu && use_x87_registers()) { 4441 frstor(Address(rsp, gp_area_size)); 4442 } 4443 #endif 4444 4445 pop_set(gp_registers_to_pop, 0); 4446 4447 addptr(rsp, total_save_size); 4448 4449 vzeroupper(); 4450 4451 block_comment("pop_call_clobbered_registers end"); 4452 } 4453 4454 void MacroAssembler::push_set(XMMRegSet set, int offset) { 4455 assert(is_aligned(set.size() * xmm_save_size(), StackAlignmentInBytes), "must be"); 4456 int spill_offset = offset; 4457 4458 for (RegSetIterator<XMMRegister> it = set.begin(); *it != xnoreg; ++it) { 4459 save_xmm_register(this, spill_offset, *it); 4460 spill_offset += xmm_save_size(); 4461 } 4462 } 4463 4464 void MacroAssembler::pop_set(XMMRegSet set, int offset) { 4465 int restore_size = set.size() * xmm_save_size(); 4466 assert(is_aligned(restore_size, StackAlignmentInBytes), "must be"); 4467 4468 int restore_offset = offset + restore_size - xmm_save_size(); 4469 4470 for (ReverseRegSetIterator<XMMRegister> it = set.rbegin(); *it != xnoreg; ++it) { 4471 restore_xmm_register(this, restore_offset, *it); 4472 restore_offset -= xmm_save_size(); 4473 } 4474 } 4475 4476 void MacroAssembler::push_set(RegSet set, int offset) { 4477 int spill_offset; 4478 if (offset == -1) { 4479 int register_push_size = set.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size; 4480 int aligned_size = align_up(register_push_size, StackAlignmentInBytes); 4481 subptr(rsp, aligned_size); 4482 spill_offset = 0; 4483 } else { 4484 spill_offset = offset; 4485 } 4486 4487 for (RegSetIterator<Register> it = set.begin(); *it != noreg; ++it) { 4488 movptr(Address(rsp, spill_offset), *it); 4489 spill_offset += Register::max_slots_per_register * VMRegImpl::stack_slot_size; 4490 } 4491 } 4492 4493 void MacroAssembler::pop_set(RegSet set, int offset) { 4494 4495 int gp_reg_size = Register::max_slots_per_register * VMRegImpl::stack_slot_size; 4496 int restore_size = set.size() * gp_reg_size; 4497 int aligned_size = align_up(restore_size, StackAlignmentInBytes); 4498 4499 int restore_offset; 4500 if (offset == -1) { 4501 restore_offset = restore_size - gp_reg_size; 4502 } else { 4503 restore_offset = offset + restore_size - gp_reg_size; 4504 } 4505 for (ReverseRegSetIterator<Register> it = set.rbegin(); *it != noreg; ++it) { 4506 movptr(*it, Address(rsp, restore_offset)); 4507 restore_offset -= gp_reg_size; 4508 } 4509 4510 if (offset == -1) { 4511 addptr(rsp, aligned_size); 4512 } 4513 } 4514 4515 // Preserves the contents of address, destroys the contents length_in_bytes and temp. 4516 void MacroAssembler::zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp) { 4517 assert(address != length_in_bytes && address != temp && temp != length_in_bytes, "registers must be different"); 4518 assert((offset_in_bytes & (BytesPerWord - 1)) == 0, "offset must be a multiple of BytesPerWord"); 4519 Label done; 4520 4521 testptr(length_in_bytes, length_in_bytes); 4522 jcc(Assembler::zero, done); 4523 4524 // initialize topmost word, divide index by 2, check if odd and test if zero 4525 // note: for the remaining code to work, index must be a multiple of BytesPerWord 4526 #ifdef ASSERT 4527 { 4528 Label L; 4529 testptr(length_in_bytes, BytesPerWord - 1); 4530 jcc(Assembler::zero, L); 4531 stop("length must be a multiple of BytesPerWord"); 4532 bind(L); 4533 } 4534 #endif 4535 Register index = length_in_bytes; 4536 xorptr(temp, temp); // use _zero reg to clear memory (shorter code) 4537 if (UseIncDec) { 4538 shrptr(index, 3); // divide by 8/16 and set carry flag if bit 2 was set 4539 } else { 4540 shrptr(index, 2); // use 2 instructions to avoid partial flag stall 4541 shrptr(index, 1); 4542 } 4543 #ifndef _LP64 4544 // index could have not been a multiple of 8 (i.e., bit 2 was set) 4545 { 4546 Label even; 4547 // note: if index was a multiple of 8, then it cannot 4548 // be 0 now otherwise it must have been 0 before 4549 // => if it is even, we don't need to check for 0 again 4550 jcc(Assembler::carryClear, even); 4551 // clear topmost word (no jump would be needed if conditional assignment worked here) 4552 movptr(Address(address, index, Address::times_8, offset_in_bytes - 0*BytesPerWord), temp); 4553 // index could be 0 now, must check again 4554 jcc(Assembler::zero, done); 4555 bind(even); 4556 } 4557 #endif // !_LP64 4558 // initialize remaining object fields: index is a multiple of 2 now 4559 { 4560 Label loop; 4561 bind(loop); 4562 movptr(Address(address, index, Address::times_8, offset_in_bytes - 1*BytesPerWord), temp); 4563 NOT_LP64(movptr(Address(address, index, Address::times_8, offset_in_bytes - 2*BytesPerWord), temp);) 4564 decrement(index); 4565 jcc(Assembler::notZero, loop); 4566 } 4567 4568 bind(done); 4569 } 4570 4571 void MacroAssembler::get_inline_type_field_klass(Register klass, Register index, Register inline_klass) { 4572 movptr(inline_klass, Address(klass, InstanceKlass::inline_type_field_klasses_offset())); 4573 #ifdef ASSERT 4574 { 4575 Label done; 4576 cmpptr(inline_klass, 0); 4577 jcc(Assembler::notEqual, done); 4578 stop("get_inline_type_field_klass contains no inline klass"); 4579 bind(done); 4580 } 4581 #endif 4582 movptr(inline_klass, Address(inline_klass, index, Address::times_ptr, Array<InlineKlass*>::base_offset_in_bytes())); 4583 } 4584 4585 void MacroAssembler::get_default_value_oop(Register inline_klass, Register temp_reg, Register obj) { 4586 #ifdef ASSERT 4587 { 4588 Label done_check; 4589 test_klass_is_inline_type(inline_klass, temp_reg, done_check); 4590 stop("get_default_value_oop from non inline type klass"); 4591 bind(done_check); 4592 } 4593 #endif 4594 Register offset = temp_reg; 4595 // Getting the offset of the pre-allocated default value 4596 movptr(offset, Address(inline_klass, in_bytes(InstanceKlass::adr_inlineklass_fixed_block_offset()))); 4597 movl(offset, Address(offset, in_bytes(InlineKlass::default_value_offset_offset()))); 4598 4599 // Getting the mirror 4600 movptr(obj, Address(inline_klass, in_bytes(Klass::java_mirror_offset()))); 4601 resolve_oop_handle(obj, inline_klass); 4602 4603 // Getting the pre-allocated default value from the mirror 4604 Address field(obj, offset, Address::times_1); 4605 load_heap_oop(obj, field); 4606 } 4607 4608 void MacroAssembler::get_empty_inline_type_oop(Register inline_klass, Register temp_reg, Register obj) { 4609 #ifdef ASSERT 4610 { 4611 Label done_check; 4612 test_klass_is_empty_inline_type(inline_klass, temp_reg, done_check); 4613 stop("get_empty_value from non-empty inline klass"); 4614 bind(done_check); 4615 } 4616 #endif 4617 get_default_value_oop(inline_klass, temp_reg, obj); 4618 } 4619 4620 4621 // Look up the method for a megamorphic invokeinterface call. 4622 // The target method is determined by <intf_klass, itable_index>. 4623 // The receiver klass is in recv_klass. 4624 // On success, the result will be in method_result, and execution falls through. 4625 // On failure, execution transfers to the given label. 4626 void MacroAssembler::lookup_interface_method(Register recv_klass, 4627 Register intf_klass, 4628 RegisterOrConstant itable_index, 4629 Register method_result, 4630 Register scan_temp, 4631 Label& L_no_such_interface, 4632 bool return_method) { 4633 assert_different_registers(recv_klass, intf_klass, scan_temp); 4634 assert_different_registers(method_result, intf_klass, scan_temp); 4635 assert(recv_klass != method_result || !return_method, 4636 "recv_klass can be destroyed when method isn't needed"); 4637 4638 assert(itable_index.is_constant() || itable_index.as_register() == method_result, 4639 "caller must use same register for non-constant itable index as for method"); 4640 4641 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 4642 int vtable_base = in_bytes(Klass::vtable_start_offset()); 4643 int itentry_off = in_bytes(itableMethodEntry::method_offset()); 4644 int scan_step = itableOffsetEntry::size() * wordSize; 4645 int vte_size = vtableEntry::size_in_bytes(); 4646 Address::ScaleFactor times_vte_scale = Address::times_ptr; 4647 assert(vte_size == wordSize, "else adjust times_vte_scale"); 4648 4649 movl(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 4650 4651 // %%% Could store the aligned, prescaled offset in the klassoop. 4652 lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base)); 4653 4654 if (return_method) { 4655 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 4656 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 4657 lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off)); 4658 } 4659 4660 // for (scan = klass->itable(); scan->interface() != nullptr; scan += scan_step) { 4661 // if (scan->interface() == intf) { 4662 // result = (klass + scan->offset() + itable_index); 4663 // } 4664 // } 4665 Label search, found_method; 4666 4667 for (int peel = 1; peel >= 0; peel--) { 4668 movptr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset())); 4669 cmpptr(intf_klass, method_result); 4670 4671 if (peel) { 4672 jccb(Assembler::equal, found_method); 4673 } else { 4674 jccb(Assembler::notEqual, search); 4675 // (invert the test to fall through to found_method...) 4676 } 4677 4678 if (!peel) break; 4679 4680 bind(search); 4681 4682 // Check that the previous entry is non-null. A null entry means that 4683 // the receiver class doesn't implement the interface, and wasn't the 4684 // same as when the caller was compiled. 4685 testptr(method_result, method_result); 4686 jcc(Assembler::zero, L_no_such_interface); 4687 addptr(scan_temp, scan_step); 4688 } 4689 4690 bind(found_method); 4691 4692 if (return_method) { 4693 // Got a hit. 4694 movl(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset())); 4695 movptr(method_result, Address(recv_klass, scan_temp, Address::times_1)); 4696 } 4697 } 4698 4699 // Look up the method for a megamorphic invokeinterface call in a single pass over itable: 4700 // - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICHolder 4701 // - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index 4702 // The target method is determined by <holder_klass, itable_index>. 4703 // The receiver klass is in recv_klass. 4704 // On success, the result will be in method_result, and execution falls through. 4705 // On failure, execution transfers to the given label. 4706 void MacroAssembler::lookup_interface_method_stub(Register recv_klass, 4707 Register holder_klass, 4708 Register resolved_klass, 4709 Register method_result, 4710 Register scan_temp, 4711 Register temp_reg2, 4712 Register receiver, 4713 int itable_index, 4714 Label& L_no_such_interface) { 4715 assert_different_registers(recv_klass, method_result, holder_klass, resolved_klass, scan_temp, temp_reg2, receiver); 4716 Register temp_itbl_klass = method_result; 4717 Register temp_reg = (temp_reg2 == noreg ? recv_klass : temp_reg2); // reuse recv_klass register on 32-bit x86 impl 4718 4719 int vtable_base = in_bytes(Klass::vtable_start_offset()); 4720 int itentry_off = in_bytes(itableMethodEntry::method_offset()); 4721 int scan_step = itableOffsetEntry::size() * wordSize; 4722 int vte_size = vtableEntry::size_in_bytes(); 4723 int ioffset = in_bytes(itableOffsetEntry::interface_offset()); 4724 int ooffset = in_bytes(itableOffsetEntry::offset_offset()); 4725 Address::ScaleFactor times_vte_scale = Address::times_ptr; 4726 assert(vte_size == wordSize, "adjust times_vte_scale"); 4727 4728 Label L_loop_scan_resolved_entry, L_resolved_found, L_holder_found; 4729 4730 // temp_itbl_klass = recv_klass.itable[0] 4731 // scan_temp = &recv_klass.itable[0] + step 4732 movl(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 4733 movptr(temp_itbl_klass, Address(recv_klass, scan_temp, times_vte_scale, vtable_base + ioffset)); 4734 lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base + ioffset + scan_step)); 4735 xorptr(temp_reg, temp_reg); 4736 4737 // Initial checks: 4738 // - if (holder_klass != resolved_klass), go to "scan for resolved" 4739 // - if (itable[0] == 0), no such interface 4740 // - if (itable[0] == holder_klass), shortcut to "holder found" 4741 cmpptr(holder_klass, resolved_klass); 4742 jccb(Assembler::notEqual, L_loop_scan_resolved_entry); 4743 testptr(temp_itbl_klass, temp_itbl_klass); 4744 jccb(Assembler::zero, L_no_such_interface); 4745 cmpptr(holder_klass, temp_itbl_klass); 4746 jccb(Assembler::equal, L_holder_found); 4747 4748 // Loop: Look for holder_klass record in itable 4749 // do { 4750 // tmp = itable[index]; 4751 // index += step; 4752 // if (tmp == holder_klass) { 4753 // goto L_holder_found; // Found! 4754 // } 4755 // } while (tmp != 0); 4756 // goto L_no_such_interface // Not found. 4757 Label L_scan_holder; 4758 bind(L_scan_holder); 4759 movptr(temp_itbl_klass, Address(scan_temp, 0)); 4760 addptr(scan_temp, scan_step); 4761 cmpptr(holder_klass, temp_itbl_klass); 4762 jccb(Assembler::equal, L_holder_found); 4763 testptr(temp_itbl_klass, temp_itbl_klass); 4764 jccb(Assembler::notZero, L_scan_holder); 4765 4766 jmpb(L_no_such_interface); 4767 4768 // Loop: Look for resolved_class record in itable 4769 // do { 4770 // tmp = itable[index]; 4771 // index += step; 4772 // if (tmp == holder_klass) { 4773 // // Also check if we have met a holder klass 4774 // holder_tmp = itable[index-step-ioffset]; 4775 // } 4776 // if (tmp == resolved_klass) { 4777 // goto L_resolved_found; // Found! 4778 // } 4779 // } while (tmp != 0); 4780 // goto L_no_such_interface // Not found. 4781 // 4782 Label L_loop_scan_resolved; 4783 bind(L_loop_scan_resolved); 4784 movptr(temp_itbl_klass, Address(scan_temp, 0)); 4785 addptr(scan_temp, scan_step); 4786 bind(L_loop_scan_resolved_entry); 4787 cmpptr(holder_klass, temp_itbl_klass); 4788 cmovl(Assembler::equal, temp_reg, Address(scan_temp, ooffset - ioffset - scan_step)); 4789 cmpptr(resolved_klass, temp_itbl_klass); 4790 jccb(Assembler::equal, L_resolved_found); 4791 testptr(temp_itbl_klass, temp_itbl_klass); 4792 jccb(Assembler::notZero, L_loop_scan_resolved); 4793 4794 jmpb(L_no_such_interface); 4795 4796 Label L_ready; 4797 4798 // See if we already have a holder klass. If not, go and scan for it. 4799 bind(L_resolved_found); 4800 testptr(temp_reg, temp_reg); 4801 jccb(Assembler::zero, L_scan_holder); 4802 jmpb(L_ready); 4803 4804 bind(L_holder_found); 4805 movl(temp_reg, Address(scan_temp, ooffset - ioffset - scan_step)); 4806 4807 // Finally, temp_reg contains holder_klass vtable offset 4808 bind(L_ready); 4809 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 4810 if (temp_reg2 == noreg) { // recv_klass register is clobbered for 32-bit x86 impl 4811 load_klass(scan_temp, receiver, noreg); 4812 movptr(method_result, Address(scan_temp, temp_reg, Address::times_1, itable_index * wordSize + itentry_off)); 4813 } else { 4814 movptr(method_result, Address(recv_klass, temp_reg, Address::times_1, itable_index * wordSize + itentry_off)); 4815 } 4816 } 4817 4818 4819 // virtual method calling 4820 void MacroAssembler::lookup_virtual_method(Register recv_klass, 4821 RegisterOrConstant vtable_index, 4822 Register method_result) { 4823 const ByteSize base = Klass::vtable_start_offset(); 4824 assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below"); 4825 Address vtable_entry_addr(recv_klass, 4826 vtable_index, Address::times_ptr, 4827 base + vtableEntry::method_offset()); 4828 movptr(method_result, vtable_entry_addr); 4829 } 4830 4831 4832 void MacroAssembler::check_klass_subtype(Register sub_klass, 4833 Register super_klass, 4834 Register temp_reg, 4835 Label& L_success) { 4836 Label L_failure; 4837 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, nullptr); 4838 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, nullptr); 4839 bind(L_failure); 4840 } 4841 4842 4843 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 4844 Register super_klass, 4845 Register temp_reg, 4846 Label* L_success, 4847 Label* L_failure, 4848 Label* L_slow_path, 4849 RegisterOrConstant super_check_offset) { 4850 assert_different_registers(sub_klass, super_klass, temp_reg); 4851 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 4852 if (super_check_offset.is_register()) { 4853 assert_different_registers(sub_klass, super_klass, 4854 super_check_offset.as_register()); 4855 } else if (must_load_sco) { 4856 assert(temp_reg != noreg, "supply either a temp or a register offset"); 4857 } 4858 4859 Label L_fallthrough; 4860 int label_nulls = 0; 4861 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 4862 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 4863 if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; } 4864 assert(label_nulls <= 1, "at most one null in the batch"); 4865 4866 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 4867 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 4868 Address super_check_offset_addr(super_klass, sco_offset); 4869 4870 // Hacked jcc, which "knows" that L_fallthrough, at least, is in 4871 // range of a jccb. If this routine grows larger, reconsider at 4872 // least some of these. 4873 #define local_jcc(assembler_cond, label) \ 4874 if (&(label) == &L_fallthrough) jccb(assembler_cond, label); \ 4875 else jcc( assembler_cond, label) /*omit semi*/ 4876 4877 // Hacked jmp, which may only be used just before L_fallthrough. 4878 #define final_jmp(label) \ 4879 if (&(label) == &L_fallthrough) { /*do nothing*/ } \ 4880 else jmp(label) /*omit semi*/ 4881 4882 // If the pointers are equal, we are done (e.g., String[] elements). 4883 // This self-check enables sharing of secondary supertype arrays among 4884 // non-primary types such as array-of-interface. Otherwise, each such 4885 // type would need its own customized SSA. 4886 // We move this check to the front of the fast path because many 4887 // type checks are in fact trivially successful in this manner, 4888 // so we get a nicely predicted branch right at the start of the check. 4889 cmpptr(sub_klass, super_klass); 4890 local_jcc(Assembler::equal, *L_success); 4891 4892 // Check the supertype display: 4893 if (must_load_sco) { 4894 // Positive movl does right thing on LP64. 4895 movl(temp_reg, super_check_offset_addr); 4896 super_check_offset = RegisterOrConstant(temp_reg); 4897 } 4898 Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0); 4899 cmpptr(super_klass, super_check_addr); // load displayed supertype 4900 4901 // This check has worked decisively for primary supers. 4902 // Secondary supers are sought in the super_cache ('super_cache_addr'). 4903 // (Secondary supers are interfaces and very deeply nested subtypes.) 4904 // This works in the same check above because of a tricky aliasing 4905 // between the super_cache and the primary super display elements. 4906 // (The 'super_check_addr' can address either, as the case requires.) 4907 // Note that the cache is updated below if it does not help us find 4908 // what we need immediately. 4909 // So if it was a primary super, we can just fail immediately. 4910 // Otherwise, it's the slow path for us (no success at this point). 4911 4912 if (super_check_offset.is_register()) { 4913 local_jcc(Assembler::equal, *L_success); 4914 cmpl(super_check_offset.as_register(), sc_offset); 4915 if (L_failure == &L_fallthrough) { 4916 local_jcc(Assembler::equal, *L_slow_path); 4917 } else { 4918 local_jcc(Assembler::notEqual, *L_failure); 4919 final_jmp(*L_slow_path); 4920 } 4921 } else if (super_check_offset.as_constant() == sc_offset) { 4922 // Need a slow path; fast failure is impossible. 4923 if (L_slow_path == &L_fallthrough) { 4924 local_jcc(Assembler::equal, *L_success); 4925 } else { 4926 local_jcc(Assembler::notEqual, *L_slow_path); 4927 final_jmp(*L_success); 4928 } 4929 } else { 4930 // No slow path; it's a fast decision. 4931 if (L_failure == &L_fallthrough) { 4932 local_jcc(Assembler::equal, *L_success); 4933 } else { 4934 local_jcc(Assembler::notEqual, *L_failure); 4935 final_jmp(*L_success); 4936 } 4937 } 4938 4939 bind(L_fallthrough); 4940 4941 #undef local_jcc 4942 #undef final_jmp 4943 } 4944 4945 4946 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 4947 Register super_klass, 4948 Register temp_reg, 4949 Register temp2_reg, 4950 Label* L_success, 4951 Label* L_failure, 4952 bool set_cond_codes) { 4953 assert_different_registers(sub_klass, super_klass, temp_reg); 4954 if (temp2_reg != noreg) 4955 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg); 4956 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg) 4957 4958 Label L_fallthrough; 4959 int label_nulls = 0; 4960 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 4961 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 4962 assert(label_nulls <= 1, "at most one null in the batch"); 4963 4964 // a couple of useful fields in sub_klass: 4965 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 4966 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 4967 Address secondary_supers_addr(sub_klass, ss_offset); 4968 Address super_cache_addr( sub_klass, sc_offset); 4969 4970 // Do a linear scan of the secondary super-klass chain. 4971 // This code is rarely used, so simplicity is a virtue here. 4972 // The repne_scan instruction uses fixed registers, which we must spill. 4973 // Don't worry too much about pre-existing connections with the input regs. 4974 4975 assert(sub_klass != rax, "killed reg"); // killed by mov(rax, super) 4976 assert(sub_klass != rcx, "killed reg"); // killed by lea(rcx, &pst_counter) 4977 4978 // Get super_klass value into rax (even if it was in rdi or rcx). 4979 bool pushed_rax = false, pushed_rcx = false, pushed_rdi = false; 4980 if (super_klass != rax) { 4981 if (!IS_A_TEMP(rax)) { push(rax); pushed_rax = true; } 4982 mov(rax, super_klass); 4983 } 4984 if (!IS_A_TEMP(rcx)) { push(rcx); pushed_rcx = true; } 4985 if (!IS_A_TEMP(rdi)) { push(rdi); pushed_rdi = true; } 4986 4987 #ifndef PRODUCT 4988 uint* pst_counter = &SharedRuntime::_partial_subtype_ctr; 4989 ExternalAddress pst_counter_addr((address) pst_counter); 4990 NOT_LP64( incrementl(pst_counter_addr) ); 4991 LP64_ONLY( lea(rcx, pst_counter_addr) ); 4992 LP64_ONLY( incrementl(Address(rcx, 0)) ); 4993 #endif //PRODUCT 4994 4995 // We will consult the secondary-super array. 4996 movptr(rdi, secondary_supers_addr); 4997 // Load the array length. (Positive movl does right thing on LP64.) 4998 movl(rcx, Address(rdi, Array<Klass*>::length_offset_in_bytes())); 4999 // Skip to start of data. 5000 addptr(rdi, Array<Klass*>::base_offset_in_bytes()); 5001 5002 // Scan RCX words at [RDI] for an occurrence of RAX. 5003 // Set NZ/Z based on last compare. 5004 // Z flag value will not be set by 'repne' if RCX == 0 since 'repne' does 5005 // not change flags (only scas instruction which is repeated sets flags). 5006 // Set Z = 0 (not equal) before 'repne' to indicate that class was not found. 5007 5008 testptr(rax,rax); // Set Z = 0 5009 repne_scan(); 5010 5011 // Unspill the temp. registers: 5012 if (pushed_rdi) pop(rdi); 5013 if (pushed_rcx) pop(rcx); 5014 if (pushed_rax) pop(rax); 5015 5016 if (set_cond_codes) { 5017 // Special hack for the AD files: rdi is guaranteed non-zero. 5018 assert(!pushed_rdi, "rdi must be left non-null"); 5019 // Also, the condition codes are properly set Z/NZ on succeed/failure. 5020 } 5021 5022 if (L_failure == &L_fallthrough) 5023 jccb(Assembler::notEqual, *L_failure); 5024 else jcc(Assembler::notEqual, *L_failure); 5025 5026 // Success. Cache the super we found and proceed in triumph. 5027 movptr(super_cache_addr, super_klass); 5028 5029 if (L_success != &L_fallthrough) { 5030 jmp(*L_success); 5031 } 5032 5033 #undef IS_A_TEMP 5034 5035 bind(L_fallthrough); 5036 } 5037 5038 void MacroAssembler::clinit_barrier(Register klass, Register thread, Label* L_fast_path, Label* L_slow_path) { 5039 assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required"); 5040 5041 Label L_fallthrough; 5042 if (L_fast_path == nullptr) { 5043 L_fast_path = &L_fallthrough; 5044 } else if (L_slow_path == nullptr) { 5045 L_slow_path = &L_fallthrough; 5046 } 5047 5048 // Fast path check: class is fully initialized 5049 cmpb(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized); 5050 jcc(Assembler::equal, *L_fast_path); 5051 5052 // Fast path check: current thread is initializer thread 5053 cmpptr(thread, Address(klass, InstanceKlass::init_thread_offset())); 5054 if (L_slow_path == &L_fallthrough) { 5055 jcc(Assembler::equal, *L_fast_path); 5056 bind(*L_slow_path); 5057 } else if (L_fast_path == &L_fallthrough) { 5058 jcc(Assembler::notEqual, *L_slow_path); 5059 bind(*L_fast_path); 5060 } else { 5061 Unimplemented(); 5062 } 5063 } 5064 5065 void MacroAssembler::cmov32(Condition cc, Register dst, Address src) { 5066 if (VM_Version::supports_cmov()) { 5067 cmovl(cc, dst, src); 5068 } else { 5069 Label L; 5070 jccb(negate_condition(cc), L); 5071 movl(dst, src); 5072 bind(L); 5073 } 5074 } 5075 5076 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) { 5077 if (VM_Version::supports_cmov()) { 5078 cmovl(cc, dst, src); 5079 } else { 5080 Label L; 5081 jccb(negate_condition(cc), L); 5082 movl(dst, src); 5083 bind(L); 5084 } 5085 } 5086 5087 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) { 5088 if (!VerifyOops || VerifyAdapterSharing) { 5089 // Below address of the code string confuses VerifyAdapterSharing 5090 // because it may differ between otherwise equivalent adapters. 5091 return; 5092 } 5093 5094 BLOCK_COMMENT("verify_oop {"); 5095 #ifdef _LP64 5096 push(rscratch1); 5097 #endif 5098 push(rax); // save rax 5099 push(reg); // pass register argument 5100 5101 // Pass register number to verify_oop_subroutine 5102 const char* b = nullptr; 5103 { 5104 ResourceMark rm; 5105 stringStream ss; 5106 ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line); 5107 b = code_string(ss.as_string()); 5108 } 5109 ExternalAddress buffer((address) b); 5110 pushptr(buffer.addr(), rscratch1); 5111 5112 // call indirectly to solve generation ordering problem 5113 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 5114 call(rax); 5115 // Caller pops the arguments (oop, message) and restores rax, r10 5116 BLOCK_COMMENT("} verify_oop"); 5117 } 5118 5119 void MacroAssembler::vallones(XMMRegister dst, int vector_len) { 5120 if (UseAVX > 2 && (vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl())) { 5121 // Only pcmpeq has dependency breaking treatment (i.e the execution can begin without 5122 // waiting for the previous result on dst), not vpcmpeqd, so just use vpternlog 5123 vpternlogd(dst, 0xFF, dst, dst, vector_len); 5124 } else if (VM_Version::supports_avx()) { 5125 vpcmpeqd(dst, dst, dst, vector_len); 5126 } else { 5127 assert(VM_Version::supports_sse2(), ""); 5128 pcmpeqd(dst, dst); 5129 } 5130 } 5131 5132 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 5133 int extra_slot_offset) { 5134 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 5135 int stackElementSize = Interpreter::stackElementSize; 5136 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0); 5137 #ifdef ASSERT 5138 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1); 5139 assert(offset1 - offset == stackElementSize, "correct arithmetic"); 5140 #endif 5141 Register scale_reg = noreg; 5142 Address::ScaleFactor scale_factor = Address::no_scale; 5143 if (arg_slot.is_constant()) { 5144 offset += arg_slot.as_constant() * stackElementSize; 5145 } else { 5146 scale_reg = arg_slot.as_register(); 5147 scale_factor = Address::times(stackElementSize); 5148 } 5149 offset += wordSize; // return PC is on stack 5150 return Address(rsp, scale_reg, scale_factor, offset); 5151 } 5152 5153 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) { 5154 if (!VerifyOops || VerifyAdapterSharing) { 5155 // Below address of the code string confuses VerifyAdapterSharing 5156 // because it may differ between otherwise equivalent adapters. 5157 return; 5158 } 5159 5160 #ifdef _LP64 5161 push(rscratch1); 5162 #endif 5163 push(rax); // save rax, 5164 // addr may contain rsp so we will have to adjust it based on the push 5165 // we just did (and on 64 bit we do two pushes) 5166 // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which 5167 // stores rax into addr which is backwards of what was intended. 5168 if (addr.uses(rsp)) { 5169 lea(rax, addr); 5170 pushptr(Address(rax, LP64_ONLY(2 *) BytesPerWord)); 5171 } else { 5172 pushptr(addr); 5173 } 5174 5175 // Pass register number to verify_oop_subroutine 5176 const char* b = nullptr; 5177 { 5178 ResourceMark rm; 5179 stringStream ss; 5180 ss.print("verify_oop_addr: %s (%s:%d)", s, file, line); 5181 b = code_string(ss.as_string()); 5182 } 5183 ExternalAddress buffer((address) b); 5184 pushptr(buffer.addr(), rscratch1); 5185 5186 // call indirectly to solve generation ordering problem 5187 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 5188 call(rax); 5189 // Caller pops the arguments (addr, message) and restores rax, r10. 5190 } 5191 5192 void MacroAssembler::verify_tlab() { 5193 #ifdef ASSERT 5194 if (UseTLAB && VerifyOops) { 5195 Label next, ok; 5196 Register t1 = rsi; 5197 Register thread_reg = NOT_LP64(rbx) LP64_ONLY(r15_thread); 5198 5199 push(t1); 5200 NOT_LP64(push(thread_reg)); 5201 NOT_LP64(get_thread(thread_reg)); 5202 5203 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset()))); 5204 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset()))); 5205 jcc(Assembler::aboveEqual, next); 5206 STOP("assert(top >= start)"); 5207 should_not_reach_here(); 5208 5209 bind(next); 5210 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset()))); 5211 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset()))); 5212 jcc(Assembler::aboveEqual, ok); 5213 STOP("assert(top <= end)"); 5214 should_not_reach_here(); 5215 5216 bind(ok); 5217 NOT_LP64(pop(thread_reg)); 5218 pop(t1); 5219 } 5220 #endif 5221 } 5222 5223 class ControlWord { 5224 public: 5225 int32_t _value; 5226 5227 int rounding_control() const { return (_value >> 10) & 3 ; } 5228 int precision_control() const { return (_value >> 8) & 3 ; } 5229 bool precision() const { return ((_value >> 5) & 1) != 0; } 5230 bool underflow() const { return ((_value >> 4) & 1) != 0; } 5231 bool overflow() const { return ((_value >> 3) & 1) != 0; } 5232 bool zero_divide() const { return ((_value >> 2) & 1) != 0; } 5233 bool denormalized() const { return ((_value >> 1) & 1) != 0; } 5234 bool invalid() const { return ((_value >> 0) & 1) != 0; } 5235 5236 void print() const { 5237 // rounding control 5238 const char* rc; 5239 switch (rounding_control()) { 5240 case 0: rc = "round near"; break; 5241 case 1: rc = "round down"; break; 5242 case 2: rc = "round up "; break; 5243 case 3: rc = "chop "; break; 5244 default: 5245 rc = nullptr; // silence compiler warnings 5246 fatal("Unknown rounding control: %d", rounding_control()); 5247 }; 5248 // precision control 5249 const char* pc; 5250 switch (precision_control()) { 5251 case 0: pc = "24 bits "; break; 5252 case 1: pc = "reserved"; break; 5253 case 2: pc = "53 bits "; break; 5254 case 3: pc = "64 bits "; break; 5255 default: 5256 pc = nullptr; // silence compiler warnings 5257 fatal("Unknown precision control: %d", precision_control()); 5258 }; 5259 // flags 5260 char f[9]; 5261 f[0] = ' '; 5262 f[1] = ' '; 5263 f[2] = (precision ()) ? 'P' : 'p'; 5264 f[3] = (underflow ()) ? 'U' : 'u'; 5265 f[4] = (overflow ()) ? 'O' : 'o'; 5266 f[5] = (zero_divide ()) ? 'Z' : 'z'; 5267 f[6] = (denormalized()) ? 'D' : 'd'; 5268 f[7] = (invalid ()) ? 'I' : 'i'; 5269 f[8] = '\x0'; 5270 // output 5271 printf("%04x masks = %s, %s, %s", _value & 0xFFFF, f, rc, pc); 5272 } 5273 5274 }; 5275 5276 class StatusWord { 5277 public: 5278 int32_t _value; 5279 5280 bool busy() const { return ((_value >> 15) & 1) != 0; } 5281 bool C3() const { return ((_value >> 14) & 1) != 0; } 5282 bool C2() const { return ((_value >> 10) & 1) != 0; } 5283 bool C1() const { return ((_value >> 9) & 1) != 0; } 5284 bool C0() const { return ((_value >> 8) & 1) != 0; } 5285 int top() const { return (_value >> 11) & 7 ; } 5286 bool error_status() const { return ((_value >> 7) & 1) != 0; } 5287 bool stack_fault() const { return ((_value >> 6) & 1) != 0; } 5288 bool precision() const { return ((_value >> 5) & 1) != 0; } 5289 bool underflow() const { return ((_value >> 4) & 1) != 0; } 5290 bool overflow() const { return ((_value >> 3) & 1) != 0; } 5291 bool zero_divide() const { return ((_value >> 2) & 1) != 0; } 5292 bool denormalized() const { return ((_value >> 1) & 1) != 0; } 5293 bool invalid() const { return ((_value >> 0) & 1) != 0; } 5294 5295 void print() const { 5296 // condition codes 5297 char c[5]; 5298 c[0] = (C3()) ? '3' : '-'; 5299 c[1] = (C2()) ? '2' : '-'; 5300 c[2] = (C1()) ? '1' : '-'; 5301 c[3] = (C0()) ? '0' : '-'; 5302 c[4] = '\x0'; 5303 // flags 5304 char f[9]; 5305 f[0] = (error_status()) ? 'E' : '-'; 5306 f[1] = (stack_fault ()) ? 'S' : '-'; 5307 f[2] = (precision ()) ? 'P' : '-'; 5308 f[3] = (underflow ()) ? 'U' : '-'; 5309 f[4] = (overflow ()) ? 'O' : '-'; 5310 f[5] = (zero_divide ()) ? 'Z' : '-'; 5311 f[6] = (denormalized()) ? 'D' : '-'; 5312 f[7] = (invalid ()) ? 'I' : '-'; 5313 f[8] = '\x0'; 5314 // output 5315 printf("%04x flags = %s, cc = %s, top = %d", _value & 0xFFFF, f, c, top()); 5316 } 5317 5318 }; 5319 5320 class TagWord { 5321 public: 5322 int32_t _value; 5323 5324 int tag_at(int i) const { return (_value >> (i*2)) & 3; } 5325 5326 void print() const { 5327 printf("%04x", _value & 0xFFFF); 5328 } 5329 5330 }; 5331 5332 class FPU_Register { 5333 public: 5334 int32_t _m0; 5335 int32_t _m1; 5336 int16_t _ex; 5337 5338 bool is_indefinite() const { 5339 return _ex == -1 && _m1 == (int32_t)0xC0000000 && _m0 == 0; 5340 } 5341 5342 void print() const { 5343 char sign = (_ex < 0) ? '-' : '+'; 5344 const char* kind = (_ex == 0x7FFF || _ex == (int16_t)-1) ? "NaN" : " "; 5345 printf("%c%04hx.%08x%08x %s", sign, _ex, _m1, _m0, kind); 5346 }; 5347 5348 }; 5349 5350 class FPU_State { 5351 public: 5352 enum { 5353 register_size = 10, 5354 number_of_registers = 8, 5355 register_mask = 7 5356 }; 5357 5358 ControlWord _control_word; 5359 StatusWord _status_word; 5360 TagWord _tag_word; 5361 int32_t _error_offset; 5362 int32_t _error_selector; 5363 int32_t _data_offset; 5364 int32_t _data_selector; 5365 int8_t _register[register_size * number_of_registers]; 5366 5367 int tag_for_st(int i) const { return _tag_word.tag_at((_status_word.top() + i) & register_mask); } 5368 FPU_Register* st(int i) const { return (FPU_Register*)&_register[register_size * i]; } 5369 5370 const char* tag_as_string(int tag) const { 5371 switch (tag) { 5372 case 0: return "valid"; 5373 case 1: return "zero"; 5374 case 2: return "special"; 5375 case 3: return "empty"; 5376 } 5377 ShouldNotReachHere(); 5378 return nullptr; 5379 } 5380 5381 void print() const { 5382 // print computation registers 5383 { int t = _status_word.top(); 5384 for (int i = 0; i < number_of_registers; i++) { 5385 int j = (i - t) & register_mask; 5386 printf("%c r%d = ST%d = ", (j == 0 ? '*' : ' '), i, j); 5387 st(j)->print(); 5388 printf(" %s\n", tag_as_string(_tag_word.tag_at(i))); 5389 } 5390 } 5391 printf("\n"); 5392 // print control registers 5393 printf("ctrl = "); _control_word.print(); printf("\n"); 5394 printf("stat = "); _status_word .print(); printf("\n"); 5395 printf("tags = "); _tag_word .print(); printf("\n"); 5396 } 5397 5398 }; 5399 5400 class Flag_Register { 5401 public: 5402 int32_t _value; 5403 5404 bool overflow() const { return ((_value >> 11) & 1) != 0; } 5405 bool direction() const { return ((_value >> 10) & 1) != 0; } 5406 bool sign() const { return ((_value >> 7) & 1) != 0; } 5407 bool zero() const { return ((_value >> 6) & 1) != 0; } 5408 bool auxiliary_carry() const { return ((_value >> 4) & 1) != 0; } 5409 bool parity() const { return ((_value >> 2) & 1) != 0; } 5410 bool carry() const { return ((_value >> 0) & 1) != 0; } 5411 5412 void print() const { 5413 // flags 5414 char f[8]; 5415 f[0] = (overflow ()) ? 'O' : '-'; 5416 f[1] = (direction ()) ? 'D' : '-'; 5417 f[2] = (sign ()) ? 'S' : '-'; 5418 f[3] = (zero ()) ? 'Z' : '-'; 5419 f[4] = (auxiliary_carry()) ? 'A' : '-'; 5420 f[5] = (parity ()) ? 'P' : '-'; 5421 f[6] = (carry ()) ? 'C' : '-'; 5422 f[7] = '\x0'; 5423 // output 5424 printf("%08x flags = %s", _value, f); 5425 } 5426 5427 }; 5428 5429 class IU_Register { 5430 public: 5431 int32_t _value; 5432 5433 void print() const { 5434 printf("%08x %11d", _value, _value); 5435 } 5436 5437 }; 5438 5439 class IU_State { 5440 public: 5441 Flag_Register _eflags; 5442 IU_Register _rdi; 5443 IU_Register _rsi; 5444 IU_Register _rbp; 5445 IU_Register _rsp; 5446 IU_Register _rbx; 5447 IU_Register _rdx; 5448 IU_Register _rcx; 5449 IU_Register _rax; 5450 5451 void print() const { 5452 // computation registers 5453 printf("rax, = "); _rax.print(); printf("\n"); 5454 printf("rbx, = "); _rbx.print(); printf("\n"); 5455 printf("rcx = "); _rcx.print(); printf("\n"); 5456 printf("rdx = "); _rdx.print(); printf("\n"); 5457 printf("rdi = "); _rdi.print(); printf("\n"); 5458 printf("rsi = "); _rsi.print(); printf("\n"); 5459 printf("rbp, = "); _rbp.print(); printf("\n"); 5460 printf("rsp = "); _rsp.print(); printf("\n"); 5461 printf("\n"); 5462 // control registers 5463 printf("flgs = "); _eflags.print(); printf("\n"); 5464 } 5465 }; 5466 5467 5468 class CPU_State { 5469 public: 5470 FPU_State _fpu_state; 5471 IU_State _iu_state; 5472 5473 void print() const { 5474 printf("--------------------------------------------------\n"); 5475 _iu_state .print(); 5476 printf("\n"); 5477 _fpu_state.print(); 5478 printf("--------------------------------------------------\n"); 5479 } 5480 5481 }; 5482 5483 5484 static void _print_CPU_state(CPU_State* state) { 5485 state->print(); 5486 }; 5487 5488 5489 void MacroAssembler::print_CPU_state() { 5490 push_CPU_state(); 5491 push(rsp); // pass CPU state 5492 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _print_CPU_state))); 5493 addptr(rsp, wordSize); // discard argument 5494 pop_CPU_state(); 5495 } 5496 5497 5498 #ifndef _LP64 5499 static bool _verify_FPU(int stack_depth, char* s, CPU_State* state) { 5500 static int counter = 0; 5501 FPU_State* fs = &state->_fpu_state; 5502 counter++; 5503 // For leaf calls, only verify that the top few elements remain empty. 5504 // We only need 1 empty at the top for C2 code. 5505 if( stack_depth < 0 ) { 5506 if( fs->tag_for_st(7) != 3 ) { 5507 printf("FPR7 not empty\n"); 5508 state->print(); 5509 assert(false, "error"); 5510 return false; 5511 } 5512 return true; // All other stack states do not matter 5513 } 5514 5515 assert((fs->_control_word._value & 0xffff) == StubRoutines::x86::fpu_cntrl_wrd_std(), 5516 "bad FPU control word"); 5517 5518 // compute stack depth 5519 int i = 0; 5520 while (i < FPU_State::number_of_registers && fs->tag_for_st(i) < 3) i++; 5521 int d = i; 5522 while (i < FPU_State::number_of_registers && fs->tag_for_st(i) == 3) i++; 5523 // verify findings 5524 if (i != FPU_State::number_of_registers) { 5525 // stack not contiguous 5526 printf("%s: stack not contiguous at ST%d\n", s, i); 5527 state->print(); 5528 assert(false, "error"); 5529 return false; 5530 } 5531 // check if computed stack depth corresponds to expected stack depth 5532 if (stack_depth < 0) { 5533 // expected stack depth is -stack_depth or less 5534 if (d > -stack_depth) { 5535 // too many elements on the stack 5536 printf("%s: <= %d stack elements expected but found %d\n", s, -stack_depth, d); 5537 state->print(); 5538 assert(false, "error"); 5539 return false; 5540 } 5541 } else { 5542 // expected stack depth is stack_depth 5543 if (d != stack_depth) { 5544 // wrong stack depth 5545 printf("%s: %d stack elements expected but found %d\n", s, stack_depth, d); 5546 state->print(); 5547 assert(false, "error"); 5548 return false; 5549 } 5550 } 5551 // everything is cool 5552 return true; 5553 } 5554 5555 void MacroAssembler::verify_FPU(int stack_depth, const char* s) { 5556 if (!VerifyFPU) return; 5557 push_CPU_state(); 5558 push(rsp); // pass CPU state 5559 ExternalAddress msg((address) s); 5560 // pass message string s 5561 pushptr(msg.addr(), noreg); 5562 push(stack_depth); // pass stack depth 5563 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _verify_FPU))); 5564 addptr(rsp, 3 * wordSize); // discard arguments 5565 // check for error 5566 { Label L; 5567 testl(rax, rax); 5568 jcc(Assembler::notZero, L); 5569 int3(); // break if error condition 5570 bind(L); 5571 } 5572 pop_CPU_state(); 5573 } 5574 #endif // _LP64 5575 5576 void MacroAssembler::restore_cpu_control_state_after_jni(Register rscratch) { 5577 // Either restore the MXCSR register after returning from the JNI Call 5578 // or verify that it wasn't changed (with -Xcheck:jni flag). 5579 if (VM_Version::supports_sse()) { 5580 if (RestoreMXCSROnJNICalls) { 5581 ldmxcsr(ExternalAddress(StubRoutines::x86::addr_mxcsr_std()), rscratch); 5582 } else if (CheckJNICalls) { 5583 call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry())); 5584 } 5585 } 5586 // Clear upper bits of YMM registers to avoid SSE <-> AVX transition penalty. 5587 vzeroupper(); 5588 5589 #ifndef _LP64 5590 // Either restore the x87 floating pointer control word after returning 5591 // from the JNI call or verify that it wasn't changed. 5592 if (CheckJNICalls) { 5593 call(RuntimeAddress(StubRoutines::x86::verify_fpu_cntrl_wrd_entry())); 5594 } 5595 #endif // _LP64 5596 } 5597 5598 // ((OopHandle)result).resolve(); 5599 void MacroAssembler::resolve_oop_handle(Register result, Register tmp) { 5600 assert_different_registers(result, tmp); 5601 5602 // Only 64 bit platforms support GCs that require a tmp register 5603 // Only IN_HEAP loads require a thread_tmp register 5604 // OopHandle::resolve is an indirection like jobject. 5605 access_load_at(T_OBJECT, IN_NATIVE, 5606 result, Address(result, 0), tmp, /*tmp_thread*/noreg); 5607 } 5608 5609 // ((WeakHandle)result).resolve(); 5610 void MacroAssembler::resolve_weak_handle(Register rresult, Register rtmp) { 5611 assert_different_registers(rresult, rtmp); 5612 Label resolved; 5613 5614 // A null weak handle resolves to null. 5615 cmpptr(rresult, 0); 5616 jcc(Assembler::equal, resolved); 5617 5618 // Only 64 bit platforms support GCs that require a tmp register 5619 // Only IN_HEAP loads require a thread_tmp register 5620 // WeakHandle::resolve is an indirection like jweak. 5621 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 5622 rresult, Address(rresult, 0), rtmp, /*tmp_thread*/noreg); 5623 bind(resolved); 5624 } 5625 5626 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) { 5627 // get mirror 5628 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 5629 load_method_holder(mirror, method); 5630 movptr(mirror, Address(mirror, mirror_offset)); 5631 resolve_oop_handle(mirror, tmp); 5632 } 5633 5634 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) { 5635 load_method_holder(rresult, rmethod); 5636 movptr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset())); 5637 } 5638 5639 void MacroAssembler::load_method_holder(Register holder, Register method) { 5640 movptr(holder, Address(method, Method::const_offset())); // ConstMethod* 5641 movptr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool* 5642 movptr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass* 5643 } 5644 5645 void MacroAssembler::load_metadata(Register dst, Register src) { 5646 if (UseCompressedClassPointers) { 5647 movl(dst, Address(src, oopDesc::klass_offset_in_bytes())); 5648 } else { 5649 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes())); 5650 } 5651 } 5652 5653 void MacroAssembler::load_klass(Register dst, Register src, Register tmp) { 5654 assert_different_registers(src, tmp); 5655 assert_different_registers(dst, tmp); 5656 #ifdef _LP64 5657 if (UseCompressedClassPointers) { 5658 movl(dst, Address(src, oopDesc::klass_offset_in_bytes())); 5659 decode_klass_not_null(dst, tmp); 5660 } else 5661 #endif 5662 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes())); 5663 } 5664 5665 void MacroAssembler::load_prototype_header(Register dst, Register src, Register tmp) { 5666 load_klass(dst, src, tmp); 5667 movptr(dst, Address(dst, Klass::prototype_header_offset())); 5668 } 5669 5670 void MacroAssembler::store_klass(Register dst, Register src, Register tmp) { 5671 assert_different_registers(src, tmp); 5672 assert_different_registers(dst, tmp); 5673 #ifdef _LP64 5674 if (UseCompressedClassPointers) { 5675 encode_klass_not_null(src, tmp); 5676 movl(Address(dst, oopDesc::klass_offset_in_bytes()), src); 5677 } else 5678 #endif 5679 movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src); 5680 } 5681 5682 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src, 5683 Register tmp1, Register thread_tmp) { 5684 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5685 decorators = AccessInternal::decorator_fixup(decorators, type); 5686 bool as_raw = (decorators & AS_RAW) != 0; 5687 if (as_raw) { 5688 bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, thread_tmp); 5689 } else { 5690 bs->load_at(this, decorators, type, dst, src, tmp1, thread_tmp); 5691 } 5692 } 5693 5694 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val, 5695 Register tmp1, Register tmp2, Register tmp3) { 5696 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5697 decorators = AccessInternal::decorator_fixup(decorators, type); 5698 bool as_raw = (decorators & AS_RAW) != 0; 5699 if (as_raw) { 5700 bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 5701 } else { 5702 bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 5703 } 5704 } 5705 5706 void MacroAssembler::access_value_copy(DecoratorSet decorators, Register src, Register dst, 5707 Register inline_klass) { 5708 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5709 bs->value_copy(this, decorators, src, dst, inline_klass); 5710 } 5711 5712 void MacroAssembler::first_field_offset(Register inline_klass, Register offset) { 5713 movptr(offset, Address(inline_klass, InstanceKlass::adr_inlineklass_fixed_block_offset())); 5714 movl(offset, Address(offset, InlineKlass::first_field_offset_offset())); 5715 } 5716 5717 void MacroAssembler::data_for_oop(Register oop, Register data, Register inline_klass) { 5718 // ((address) (void*) o) + vk->first_field_offset(); 5719 Register offset = (data == oop) ? rscratch1 : data; 5720 first_field_offset(inline_klass, offset); 5721 if (data == oop) { 5722 addptr(data, offset); 5723 } else { 5724 lea(data, Address(oop, offset)); 5725 } 5726 } 5727 5728 void MacroAssembler::data_for_value_array_index(Register array, Register array_klass, 5729 Register index, Register data) { 5730 assert(index != rcx, "index needs to shift by rcx"); 5731 assert_different_registers(array, array_klass, index); 5732 assert_different_registers(rcx, array, index); 5733 5734 // array->base() + (index << Klass::layout_helper_log2_element_size(lh)); 5735 movl(rcx, Address(array_klass, Klass::layout_helper_offset())); 5736 5737 // Klass::layout_helper_log2_element_size(lh) 5738 // (lh >> _lh_log2_element_size_shift) & _lh_log2_element_size_mask; 5739 shrl(rcx, Klass::_lh_log2_element_size_shift); 5740 andl(rcx, Klass::_lh_log2_element_size_mask); 5741 shlptr(index); // index << rcx 5742 5743 lea(data, Address(array, index, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_PRIMITIVE_OBJECT))); 5744 } 5745 5746 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1, 5747 Register thread_tmp, DecoratorSet decorators) { 5748 access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp); 5749 } 5750 5751 // Doesn't do verification, generates fixed size code 5752 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1, 5753 Register thread_tmp, DecoratorSet decorators) { 5754 access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, thread_tmp); 5755 } 5756 5757 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1, 5758 Register tmp2, Register tmp3, DecoratorSet decorators) { 5759 access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3); 5760 } 5761 5762 // Used for storing nulls. 5763 void MacroAssembler::store_heap_oop_null(Address dst) { 5764 access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg); 5765 } 5766 5767 #ifdef _LP64 5768 void MacroAssembler::store_klass_gap(Register dst, Register src) { 5769 if (UseCompressedClassPointers) { 5770 // Store to klass gap in destination 5771 movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src); 5772 } 5773 } 5774 5775 #ifdef ASSERT 5776 void MacroAssembler::verify_heapbase(const char* msg) { 5777 assert (UseCompressedOops, "should be compressed"); 5778 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5779 if (CheckCompressedOops) { 5780 Label ok; 5781 ExternalAddress src2(CompressedOops::ptrs_base_addr()); 5782 const bool is_src2_reachable = reachable(src2); 5783 if (!is_src2_reachable) { 5784 push(rscratch1); // cmpptr trashes rscratch1 5785 } 5786 cmpptr(r12_heapbase, src2, rscratch1); 5787 jcc(Assembler::equal, ok); 5788 STOP(msg); 5789 bind(ok); 5790 if (!is_src2_reachable) { 5791 pop(rscratch1); 5792 } 5793 } 5794 } 5795 #endif 5796 5797 // Algorithm must match oop.inline.hpp encode_heap_oop. 5798 void MacroAssembler::encode_heap_oop(Register r) { 5799 #ifdef ASSERT 5800 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?"); 5801 #endif 5802 verify_oop_msg(r, "broken oop in encode_heap_oop"); 5803 if (CompressedOops::base() == nullptr) { 5804 if (CompressedOops::shift() != 0) { 5805 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5806 shrq(r, LogMinObjAlignmentInBytes); 5807 } 5808 return; 5809 } 5810 testq(r, r); 5811 cmovq(Assembler::equal, r, r12_heapbase); 5812 subq(r, r12_heapbase); 5813 shrq(r, LogMinObjAlignmentInBytes); 5814 } 5815 5816 void MacroAssembler::encode_heap_oop_not_null(Register r) { 5817 #ifdef ASSERT 5818 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?"); 5819 if (CheckCompressedOops) { 5820 Label ok; 5821 testq(r, r); 5822 jcc(Assembler::notEqual, ok); 5823 STOP("null oop passed to encode_heap_oop_not_null"); 5824 bind(ok); 5825 } 5826 #endif 5827 verify_oop_msg(r, "broken oop in encode_heap_oop_not_null"); 5828 if (CompressedOops::base() != nullptr) { 5829 subq(r, r12_heapbase); 5830 } 5831 if (CompressedOops::shift() != 0) { 5832 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5833 shrq(r, LogMinObjAlignmentInBytes); 5834 } 5835 } 5836 5837 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) { 5838 #ifdef ASSERT 5839 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?"); 5840 if (CheckCompressedOops) { 5841 Label ok; 5842 testq(src, src); 5843 jcc(Assembler::notEqual, ok); 5844 STOP("null oop passed to encode_heap_oop_not_null2"); 5845 bind(ok); 5846 } 5847 #endif 5848 verify_oop_msg(src, "broken oop in encode_heap_oop_not_null2"); 5849 if (dst != src) { 5850 movq(dst, src); 5851 } 5852 if (CompressedOops::base() != nullptr) { 5853 subq(dst, r12_heapbase); 5854 } 5855 if (CompressedOops::shift() != 0) { 5856 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5857 shrq(dst, LogMinObjAlignmentInBytes); 5858 } 5859 } 5860 5861 void MacroAssembler::decode_heap_oop(Register r) { 5862 #ifdef ASSERT 5863 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?"); 5864 #endif 5865 if (CompressedOops::base() == nullptr) { 5866 if (CompressedOops::shift() != 0) { 5867 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5868 shlq(r, LogMinObjAlignmentInBytes); 5869 } 5870 } else { 5871 Label done; 5872 shlq(r, LogMinObjAlignmentInBytes); 5873 jccb(Assembler::equal, done); 5874 addq(r, r12_heapbase); 5875 bind(done); 5876 } 5877 verify_oop_msg(r, "broken oop in decode_heap_oop"); 5878 } 5879 5880 void MacroAssembler::decode_heap_oop_not_null(Register r) { 5881 // Note: it will change flags 5882 assert (UseCompressedOops, "should only be used for compressed headers"); 5883 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5884 // Cannot assert, unverified entry point counts instructions (see .ad file) 5885 // vtableStubs also counts instructions in pd_code_size_limit. 5886 // Also do not verify_oop as this is called by verify_oop. 5887 if (CompressedOops::shift() != 0) { 5888 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5889 shlq(r, LogMinObjAlignmentInBytes); 5890 if (CompressedOops::base() != nullptr) { 5891 addq(r, r12_heapbase); 5892 } 5893 } else { 5894 assert (CompressedOops::base() == nullptr, "sanity"); 5895 } 5896 } 5897 5898 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { 5899 // Note: it will change flags 5900 assert (UseCompressedOops, "should only be used for compressed headers"); 5901 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5902 // Cannot assert, unverified entry point counts instructions (see .ad file) 5903 // vtableStubs also counts instructions in pd_code_size_limit. 5904 // Also do not verify_oop as this is called by verify_oop. 5905 if (CompressedOops::shift() != 0) { 5906 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5907 if (LogMinObjAlignmentInBytes == Address::times_8) { 5908 leaq(dst, Address(r12_heapbase, src, Address::times_8, 0)); 5909 } else { 5910 if (dst != src) { 5911 movq(dst, src); 5912 } 5913 shlq(dst, LogMinObjAlignmentInBytes); 5914 if (CompressedOops::base() != nullptr) { 5915 addq(dst, r12_heapbase); 5916 } 5917 } 5918 } else { 5919 assert (CompressedOops::base() == nullptr, "sanity"); 5920 if (dst != src) { 5921 movq(dst, src); 5922 } 5923 } 5924 } 5925 5926 void MacroAssembler::encode_klass_not_null(Register r, Register tmp) { 5927 assert_different_registers(r, tmp); 5928 if (CompressedKlassPointers::base() != nullptr) { 5929 mov64(tmp, (int64_t)CompressedKlassPointers::base()); 5930 subq(r, tmp); 5931 } 5932 if (CompressedKlassPointers::shift() != 0) { 5933 assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); 5934 shrq(r, LogKlassAlignmentInBytes); 5935 } 5936 } 5937 5938 void MacroAssembler::encode_and_move_klass_not_null(Register dst, Register src) { 5939 assert_different_registers(src, dst); 5940 if (CompressedKlassPointers::base() != nullptr) { 5941 mov64(dst, -(int64_t)CompressedKlassPointers::base()); 5942 addq(dst, src); 5943 } else { 5944 movptr(dst, src); 5945 } 5946 if (CompressedKlassPointers::shift() != 0) { 5947 assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); 5948 shrq(dst, LogKlassAlignmentInBytes); 5949 } 5950 } 5951 5952 void MacroAssembler::decode_klass_not_null(Register r, Register tmp) { 5953 assert_different_registers(r, tmp); 5954 // Note: it will change flags 5955 assert(UseCompressedClassPointers, "should only be used for compressed headers"); 5956 // Cannot assert, unverified entry point counts instructions (see .ad file) 5957 // vtableStubs also counts instructions in pd_code_size_limit. 5958 // Also do not verify_oop as this is called by verify_oop. 5959 if (CompressedKlassPointers::shift() != 0) { 5960 assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); 5961 shlq(r, LogKlassAlignmentInBytes); 5962 } 5963 if (CompressedKlassPointers::base() != nullptr) { 5964 mov64(tmp, (int64_t)CompressedKlassPointers::base()); 5965 addq(r, tmp); 5966 } 5967 } 5968 5969 void MacroAssembler::decode_and_move_klass_not_null(Register dst, Register src) { 5970 assert_different_registers(src, dst); 5971 // Note: it will change flags 5972 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5973 // Cannot assert, unverified entry point counts instructions (see .ad file) 5974 // vtableStubs also counts instructions in pd_code_size_limit. 5975 // Also do not verify_oop as this is called by verify_oop. 5976 5977 if (CompressedKlassPointers::base() == nullptr && 5978 CompressedKlassPointers::shift() == 0) { 5979 // The best case scenario is that there is no base or shift. Then it is already 5980 // a pointer that needs nothing but a register rename. 5981 movl(dst, src); 5982 } else { 5983 if (CompressedKlassPointers::base() != nullptr) { 5984 mov64(dst, (int64_t)CompressedKlassPointers::base()); 5985 } else { 5986 xorq(dst, dst); 5987 } 5988 if (CompressedKlassPointers::shift() != 0) { 5989 assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); 5990 assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?"); 5991 leaq(dst, Address(dst, src, Address::times_8, 0)); 5992 } else { 5993 addq(dst, src); 5994 } 5995 } 5996 } 5997 5998 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) { 5999 assert (UseCompressedOops, "should only be used for compressed headers"); 6000 assert (Universe::heap() != nullptr, "java heap should be initialized"); 6001 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6002 int oop_index = oop_recorder()->find_index(obj); 6003 RelocationHolder rspec = oop_Relocation::spec(oop_index); 6004 mov_narrow_oop(dst, oop_index, rspec); 6005 } 6006 6007 void MacroAssembler::set_narrow_oop(Address dst, jobject obj) { 6008 assert (UseCompressedOops, "should only be used for compressed headers"); 6009 assert (Universe::heap() != nullptr, "java heap should be initialized"); 6010 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6011 int oop_index = oop_recorder()->find_index(obj); 6012 RelocationHolder rspec = oop_Relocation::spec(oop_index); 6013 mov_narrow_oop(dst, oop_index, rspec); 6014 } 6015 6016 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) { 6017 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 6018 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6019 int klass_index = oop_recorder()->find_index(k); 6020 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 6021 mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 6022 } 6023 6024 void MacroAssembler::set_narrow_klass(Address dst, Klass* k) { 6025 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 6026 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6027 int klass_index = oop_recorder()->find_index(k); 6028 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 6029 mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 6030 } 6031 6032 void MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) { 6033 assert (UseCompressedOops, "should only be used for compressed headers"); 6034 assert (Universe::heap() != nullptr, "java heap should be initialized"); 6035 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6036 int oop_index = oop_recorder()->find_index(obj); 6037 RelocationHolder rspec = oop_Relocation::spec(oop_index); 6038 Assembler::cmp_narrow_oop(dst, oop_index, rspec); 6039 } 6040 6041 void MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) { 6042 assert (UseCompressedOops, "should only be used for compressed headers"); 6043 assert (Universe::heap() != nullptr, "java heap should be initialized"); 6044 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6045 int oop_index = oop_recorder()->find_index(obj); 6046 RelocationHolder rspec = oop_Relocation::spec(oop_index); 6047 Assembler::cmp_narrow_oop(dst, oop_index, rspec); 6048 } 6049 6050 void MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) { 6051 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 6052 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6053 int klass_index = oop_recorder()->find_index(k); 6054 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 6055 Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 6056 } 6057 6058 void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) { 6059 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 6060 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6061 int klass_index = oop_recorder()->find_index(k); 6062 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 6063 Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 6064 } 6065 6066 void MacroAssembler::reinit_heapbase() { 6067 if (UseCompressedOops) { 6068 if (Universe::heap() != nullptr) { 6069 if (CompressedOops::base() == nullptr) { 6070 MacroAssembler::xorptr(r12_heapbase, r12_heapbase); 6071 } else { 6072 mov64(r12_heapbase, (int64_t)CompressedOops::ptrs_base()); 6073 } 6074 } else { 6075 movptr(r12_heapbase, ExternalAddress(CompressedOops::ptrs_base_addr())); 6076 } 6077 } 6078 } 6079 6080 #endif // _LP64 6081 6082 #if COMPILER2_OR_JVMCI 6083 6084 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM/ZMM registers 6085 void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, KRegister mask) { 6086 // cnt - number of qwords (8-byte words). 6087 // base - start address, qword aligned. 6088 Label L_zero_64_bytes, L_loop, L_sloop, L_tail, L_end; 6089 bool use64byteVector = (MaxVectorSize == 64) && (VM_Version::avx3_threshold() == 0); 6090 if (use64byteVector) { 6091 evpbroadcastq(xtmp, val, AVX_512bit); 6092 } else if (MaxVectorSize >= 32) { 6093 movdq(xtmp, val); 6094 punpcklqdq(xtmp, xtmp); 6095 vinserti128_high(xtmp, xtmp); 6096 } else { 6097 movdq(xtmp, val); 6098 punpcklqdq(xtmp, xtmp); 6099 } 6100 jmp(L_zero_64_bytes); 6101 6102 BIND(L_loop); 6103 if (MaxVectorSize >= 32) { 6104 fill64(base, 0, xtmp, use64byteVector); 6105 } else { 6106 movdqu(Address(base, 0), xtmp); 6107 movdqu(Address(base, 16), xtmp); 6108 movdqu(Address(base, 32), xtmp); 6109 movdqu(Address(base, 48), xtmp); 6110 } 6111 addptr(base, 64); 6112 6113 BIND(L_zero_64_bytes); 6114 subptr(cnt, 8); 6115 jccb(Assembler::greaterEqual, L_loop); 6116 6117 // Copy trailing 64 bytes 6118 if (use64byteVector) { 6119 addptr(cnt, 8); 6120 jccb(Assembler::equal, L_end); 6121 fill64_masked(3, base, 0, xtmp, mask, cnt, val, true); 6122 jmp(L_end); 6123 } else { 6124 addptr(cnt, 4); 6125 jccb(Assembler::less, L_tail); 6126 if (MaxVectorSize >= 32) { 6127 vmovdqu(Address(base, 0), xtmp); 6128 } else { 6129 movdqu(Address(base, 0), xtmp); 6130 movdqu(Address(base, 16), xtmp); 6131 } 6132 } 6133 addptr(base, 32); 6134 subptr(cnt, 4); 6135 6136 BIND(L_tail); 6137 addptr(cnt, 4); 6138 jccb(Assembler::lessEqual, L_end); 6139 if (UseAVX > 2 && MaxVectorSize >= 32 && VM_Version::supports_avx512vl()) { 6140 fill32_masked(3, base, 0, xtmp, mask, cnt, val); 6141 } else { 6142 decrement(cnt); 6143 6144 BIND(L_sloop); 6145 movq(Address(base, 0), xtmp); 6146 addptr(base, 8); 6147 decrement(cnt); 6148 jccb(Assembler::greaterEqual, L_sloop); 6149 } 6150 BIND(L_end); 6151 } 6152 6153 int MacroAssembler::store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter) { 6154 assert(InlineTypeReturnedAsFields, "Inline types should never be returned as fields"); 6155 // An inline type might be returned. If fields are in registers we 6156 // need to allocate an inline type instance and initialize it with 6157 // the value of the fields. 6158 Label skip; 6159 // We only need a new buffered inline type if a new one is not returned 6160 testptr(rax, 1); 6161 jcc(Assembler::zero, skip); 6162 int call_offset = -1; 6163 6164 #ifdef _LP64 6165 // The following code is similar to allocate_instance but has some slight differences, 6166 // e.g. object size is always not zero, sometimes it's constant; storing klass ptr after 6167 // allocating is not necessary if vk != nullptr, etc. allocate_instance is not aware of these. 6168 Label slow_case; 6169 // 1. Try to allocate a new buffered inline instance either from TLAB or eden space 6170 mov(rscratch1, rax); // save rax for slow_case since *_allocate may corrupt it when allocation failed 6171 if (vk != nullptr) { 6172 // Called from C1, where the return type is statically known. 6173 movptr(rbx, (intptr_t)vk->get_InlineKlass()); 6174 jint obj_size = vk->layout_helper(); 6175 assert(obj_size != Klass::_lh_neutral_value, "inline class in return type must have been resolved"); 6176 if (UseTLAB) { 6177 tlab_allocate(r15_thread, rax, noreg, obj_size, r13, r14, slow_case); 6178 } else { 6179 jmp(slow_case); 6180 } 6181 } else { 6182 // Call from interpreter. RAX contains ((the InlineKlass* of the return type) | 0x01) 6183 mov(rbx, rax); 6184 andptr(rbx, -2); 6185 movl(r14, Address(rbx, Klass::layout_helper_offset())); 6186 if (UseTLAB) { 6187 tlab_allocate(r15_thread, rax, r14, 0, r13, r14, slow_case); 6188 } else { 6189 jmp(slow_case); 6190 } 6191 } 6192 if (UseTLAB) { 6193 // 2. Initialize buffered inline instance header 6194 Register buffer_obj = rax; 6195 movptr(Address(buffer_obj, oopDesc::mark_offset_in_bytes()), (intptr_t)markWord::inline_type_prototype().value()); 6196 xorl(r13, r13); 6197 store_klass_gap(buffer_obj, r13); 6198 if (vk == nullptr) { 6199 // store_klass corrupts rbx(klass), so save it in r13 for later use (interpreter case only). 6200 mov(r13, rbx); 6201 } 6202 store_klass(buffer_obj, rbx, rscratch1); 6203 // 3. Initialize its fields with an inline class specific handler 6204 if (vk != nullptr) { 6205 call(RuntimeAddress(vk->pack_handler())); // no need for call info as this will not safepoint. 6206 } else { 6207 movptr(rbx, Address(r13, InstanceKlass::adr_inlineklass_fixed_block_offset())); 6208 movptr(rbx, Address(rbx, InlineKlass::pack_handler_offset())); 6209 call(rbx); 6210 } 6211 jmp(skip); 6212 } 6213 bind(slow_case); 6214 // We failed to allocate a new inline type, fall back to a runtime 6215 // call. Some oop field may be live in some registers but we can't 6216 // tell. That runtime call will take care of preserving them 6217 // across a GC if there's one. 6218 mov(rax, rscratch1); 6219 #endif 6220 6221 if (from_interpreter) { 6222 super_call_VM_leaf(StubRoutines::store_inline_type_fields_to_buf()); 6223 } else { 6224 call(RuntimeAddress(StubRoutines::store_inline_type_fields_to_buf())); 6225 call_offset = offset(); 6226 } 6227 6228 bind(skip); 6229 return call_offset; 6230 } 6231 6232 // Move a value between registers/stack slots and update the reg_state 6233 bool MacroAssembler::move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]) { 6234 assert(from->is_valid() && to->is_valid(), "source and destination must be valid"); 6235 if (reg_state[to->value()] == reg_written) { 6236 return true; // Already written 6237 } 6238 if (from != to && bt != T_VOID) { 6239 if (reg_state[to->value()] == reg_readonly) { 6240 return false; // Not yet writable 6241 } 6242 if (from->is_reg()) { 6243 if (to->is_reg()) { 6244 if (from->is_XMMRegister()) { 6245 if (bt == T_DOUBLE) { 6246 movdbl(to->as_XMMRegister(), from->as_XMMRegister()); 6247 } else { 6248 assert(bt == T_FLOAT, "must be float"); 6249 movflt(to->as_XMMRegister(), from->as_XMMRegister()); 6250 } 6251 } else { 6252 movq(to->as_Register(), from->as_Register()); 6253 } 6254 } else { 6255 int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize; 6256 Address to_addr = Address(rsp, st_off); 6257 if (from->is_XMMRegister()) { 6258 if (bt == T_DOUBLE) { 6259 movdbl(to_addr, from->as_XMMRegister()); 6260 } else { 6261 assert(bt == T_FLOAT, "must be float"); 6262 movflt(to_addr, from->as_XMMRegister()); 6263 } 6264 } else { 6265 movq(to_addr, from->as_Register()); 6266 } 6267 } 6268 } else { 6269 Address from_addr = Address(rsp, from->reg2stack() * VMRegImpl::stack_slot_size + wordSize); 6270 if (to->is_reg()) { 6271 if (to->is_XMMRegister()) { 6272 if (bt == T_DOUBLE) { 6273 movdbl(to->as_XMMRegister(), from_addr); 6274 } else { 6275 assert(bt == T_FLOAT, "must be float"); 6276 movflt(to->as_XMMRegister(), from_addr); 6277 } 6278 } else { 6279 movq(to->as_Register(), from_addr); 6280 } 6281 } else { 6282 int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize; 6283 movq(r13, from_addr); 6284 movq(Address(rsp, st_off), r13); 6285 } 6286 } 6287 } 6288 // Update register states 6289 reg_state[from->value()] = reg_writable; 6290 reg_state[to->value()] = reg_written; 6291 return true; 6292 } 6293 6294 // Calculate the extra stack space required for packing or unpacking inline 6295 // args and adjust the stack pointer 6296 int MacroAssembler::extend_stack_for_inline_args(int args_on_stack) { 6297 // Two additional slots to account for return address 6298 int sp_inc = (args_on_stack + 2) * VMRegImpl::stack_slot_size; 6299 sp_inc = align_up(sp_inc, StackAlignmentInBytes); 6300 // Save the return address, adjust the stack (make sure it is properly 6301 // 16-byte aligned) and copy the return address to the new top of the stack. 6302 // The stack will be repaired on return (see MacroAssembler::remove_frame). 6303 assert(sp_inc > 0, "sanity"); 6304 pop(r13); 6305 subptr(rsp, sp_inc); 6306 push(r13); 6307 return sp_inc; 6308 } 6309 6310 // Read all fields from an inline type buffer and store the field values in registers/stack slots. 6311 bool MacroAssembler::unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, 6312 VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index, 6313 RegState reg_state[]) { 6314 assert(sig->at(sig_index)._bt == T_VOID, "should be at end delimiter"); 6315 assert(from->is_valid(), "source must be valid"); 6316 bool progress = false; 6317 #ifdef ASSERT 6318 const int start_offset = offset(); 6319 #endif 6320 6321 Label L_null, L_notNull; 6322 // Don't use r14 as tmp because it's used for spilling (see MacroAssembler::spill_reg_for) 6323 Register tmp1 = r10; 6324 Register tmp2 = r13; 6325 Register fromReg = noreg; 6326 ScalarizedInlineArgsStream stream(sig, sig_index, to, to_count, to_index, -1); 6327 bool done = true; 6328 bool mark_done = true; 6329 VMReg toReg; 6330 BasicType bt; 6331 // Check if argument requires a null check 6332 bool null_check = false; 6333 VMReg nullCheckReg; 6334 while (stream.next(nullCheckReg, bt)) { 6335 if (sig->at(stream.sig_index())._offset == -1) { 6336 null_check = true; 6337 break; 6338 } 6339 } 6340 stream.reset(sig_index, to_index); 6341 while (stream.next(toReg, bt)) { 6342 assert(toReg->is_valid(), "destination must be valid"); 6343 int idx = (int)toReg->value(); 6344 if (reg_state[idx] == reg_readonly) { 6345 if (idx != from->value()) { 6346 mark_done = false; 6347 } 6348 done = false; 6349 continue; 6350 } else if (reg_state[idx] == reg_written) { 6351 continue; 6352 } 6353 assert(reg_state[idx] == reg_writable, "must be writable"); 6354 reg_state[idx] = reg_written; 6355 progress = true; 6356 6357 if (fromReg == noreg) { 6358 if (from->is_reg()) { 6359 fromReg = from->as_Register(); 6360 } else { 6361 int st_off = from->reg2stack() * VMRegImpl::stack_slot_size + wordSize; 6362 movq(tmp1, Address(rsp, st_off)); 6363 fromReg = tmp1; 6364 } 6365 if (null_check) { 6366 // Nullable inline type argument, emit null check 6367 testptr(fromReg, fromReg); 6368 jcc(Assembler::zero, L_null); 6369 } 6370 } 6371 int off = sig->at(stream.sig_index())._offset; 6372 if (off == -1) { 6373 assert(null_check, "Missing null check at"); 6374 if (toReg->is_stack()) { 6375 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize; 6376 movq(Address(rsp, st_off), 1); 6377 } else { 6378 movq(toReg->as_Register(), 1); 6379 } 6380 continue; 6381 } 6382 assert(off > 0, "offset in object should be positive"); 6383 Address fromAddr = Address(fromReg, off); 6384 if (!toReg->is_XMMRegister()) { 6385 Register dst = toReg->is_stack() ? tmp2 : toReg->as_Register(); 6386 if (is_reference_type(bt)) { 6387 load_heap_oop(dst, fromAddr); 6388 } else { 6389 bool is_signed = (bt != T_CHAR) && (bt != T_BOOLEAN); 6390 load_sized_value(dst, fromAddr, type2aelembytes(bt), is_signed); 6391 } 6392 if (toReg->is_stack()) { 6393 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize; 6394 movq(Address(rsp, st_off), dst); 6395 } 6396 } else if (bt == T_DOUBLE) { 6397 movdbl(toReg->as_XMMRegister(), fromAddr); 6398 } else { 6399 assert(bt == T_FLOAT, "must be float"); 6400 movflt(toReg->as_XMMRegister(), fromAddr); 6401 } 6402 } 6403 if (progress && null_check) { 6404 if (done) { 6405 jmp(L_notNull); 6406 bind(L_null); 6407 // Set IsInit field to zero to signal that the argument is null. 6408 // Also set all oop fields to zero to make the GC happy. 6409 stream.reset(sig_index, to_index); 6410 while (stream.next(toReg, bt)) { 6411 if (sig->at(stream.sig_index())._offset == -1 || 6412 bt == T_OBJECT || bt == T_ARRAY) { 6413 if (toReg->is_stack()) { 6414 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize; 6415 movq(Address(rsp, st_off), 0); 6416 } else { 6417 xorq(toReg->as_Register(), toReg->as_Register()); 6418 } 6419 } 6420 } 6421 bind(L_notNull); 6422 } else { 6423 bind(L_null); 6424 } 6425 } 6426 6427 sig_index = stream.sig_index(); 6428 to_index = stream.regs_index(); 6429 6430 if (mark_done && reg_state[from->value()] != reg_written) { 6431 // This is okay because no one else will write to that slot 6432 reg_state[from->value()] = reg_writable; 6433 } 6434 from_index--; 6435 assert(progress || (start_offset == offset()), "should not emit code"); 6436 return done; 6437 } 6438 6439 bool MacroAssembler::pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index, 6440 VMRegPair* from, int from_count, int& from_index, VMReg to, 6441 RegState reg_state[], Register val_array) { 6442 assert(sig->at(sig_index)._bt == T_METADATA, "should be at delimiter"); 6443 assert(to->is_valid(), "destination must be valid"); 6444 6445 if (reg_state[to->value()] == reg_written) { 6446 skip_unpacked_fields(sig, sig_index, from, from_count, from_index); 6447 return true; // Already written 6448 } 6449 6450 // TODO 8284443 Isn't it an issue if below code uses r14 as tmp when it contains a spilled value? 6451 // Be careful with r14 because it's used for spilling (see MacroAssembler::spill_reg_for). 6452 Register val_obj_tmp = r11; 6453 Register from_reg_tmp = r14; 6454 Register tmp1 = r10; 6455 Register tmp2 = r13; 6456 Register tmp3 = rbx; 6457 Register val_obj = to->is_stack() ? val_obj_tmp : to->as_Register(); 6458 6459 assert_different_registers(val_obj_tmp, from_reg_tmp, tmp1, tmp2, tmp3, val_array); 6460 6461 if (reg_state[to->value()] == reg_readonly) { 6462 if (!is_reg_in_unpacked_fields(sig, sig_index, to, from, from_count, from_index)) { 6463 skip_unpacked_fields(sig, sig_index, from, from_count, from_index); 6464 return false; // Not yet writable 6465 } 6466 val_obj = val_obj_tmp; 6467 } 6468 6469 int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + vtarg_index * type2aelembytes(T_OBJECT); 6470 load_heap_oop(val_obj, Address(val_array, index)); 6471 6472 ScalarizedInlineArgsStream stream(sig, sig_index, from, from_count, from_index); 6473 VMReg fromReg; 6474 BasicType bt; 6475 Label L_null; 6476 while (stream.next(fromReg, bt)) { 6477 assert(fromReg->is_valid(), "source must be valid"); 6478 reg_state[fromReg->value()] = reg_writable; 6479 6480 int off = sig->at(stream.sig_index())._offset; 6481 if (off == -1) { 6482 // Nullable inline type argument, emit null check 6483 Label L_notNull; 6484 if (fromReg->is_stack()) { 6485 int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize; 6486 testb(Address(rsp, ld_off), 1); 6487 } else { 6488 testb(fromReg->as_Register(), 1); 6489 } 6490 jcc(Assembler::notZero, L_notNull); 6491 movptr(val_obj, 0); 6492 jmp(L_null); 6493 bind(L_notNull); 6494 continue; 6495 } 6496 6497 assert(off > 0, "offset in object should be positive"); 6498 size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize; 6499 6500 Address dst(val_obj, off); 6501 if (!fromReg->is_XMMRegister()) { 6502 Register src; 6503 if (fromReg->is_stack()) { 6504 src = from_reg_tmp; 6505 int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize; 6506 load_sized_value(src, Address(rsp, ld_off), size_in_bytes, /* is_signed */ false); 6507 } else { 6508 src = fromReg->as_Register(); 6509 } 6510 assert_different_registers(dst.base(), src, tmp1, tmp2, tmp3, val_array); 6511 if (is_reference_type(bt)) { 6512 store_heap_oop(dst, src, tmp1, tmp2, tmp3, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED); 6513 } else { 6514 store_sized_value(dst, src, size_in_bytes); 6515 } 6516 } else if (bt == T_DOUBLE) { 6517 movdbl(dst, fromReg->as_XMMRegister()); 6518 } else { 6519 assert(bt == T_FLOAT, "must be float"); 6520 movflt(dst, fromReg->as_XMMRegister()); 6521 } 6522 } 6523 bind(L_null); 6524 sig_index = stream.sig_index(); 6525 from_index = stream.regs_index(); 6526 6527 assert(reg_state[to->value()] == reg_writable, "must have already been read"); 6528 bool success = move_helper(val_obj->as_VMReg(), to, T_OBJECT, reg_state); 6529 assert(success, "to register must be writeable"); 6530 return true; 6531 } 6532 6533 VMReg MacroAssembler::spill_reg_for(VMReg reg) { 6534 return reg->is_XMMRegister() ? xmm8->as_VMReg() : r14->as_VMReg(); 6535 } 6536 6537 void MacroAssembler::remove_frame(int initial_framesize, bool needs_stack_repair) { 6538 assert((initial_framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned"); 6539 if (needs_stack_repair) { 6540 movq(rbp, Address(rsp, initial_framesize)); 6541 // The stack increment resides just below the saved rbp 6542 addq(rsp, Address(rsp, initial_framesize - wordSize)); 6543 } else { 6544 if (initial_framesize > 0) { 6545 addq(rsp, initial_framesize); 6546 } 6547 pop(rbp); 6548 } 6549 } 6550 6551 // Clearing constant sized memory using YMM/ZMM registers. 6552 void MacroAssembler::clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask) { 6553 assert(UseAVX > 2 && VM_Version::supports_avx512vlbw(), ""); 6554 bool use64byteVector = (MaxVectorSize > 32) && (VM_Version::avx3_threshold() == 0); 6555 6556 int vector64_count = (cnt & (~0x7)) >> 3; 6557 cnt = cnt & 0x7; 6558 const int fill64_per_loop = 4; 6559 const int max_unrolled_fill64 = 8; 6560 6561 // 64 byte initialization loop. 6562 vpxor(xtmp, xtmp, xtmp, use64byteVector ? AVX_512bit : AVX_256bit); 6563 int start64 = 0; 6564 if (vector64_count > max_unrolled_fill64) { 6565 Label LOOP; 6566 Register index = rtmp; 6567 6568 start64 = vector64_count - (vector64_count % fill64_per_loop); 6569 6570 movl(index, 0); 6571 BIND(LOOP); 6572 for (int i = 0; i < fill64_per_loop; i++) { 6573 fill64(Address(base, index, Address::times_1, i * 64), xtmp, use64byteVector); 6574 } 6575 addl(index, fill64_per_loop * 64); 6576 cmpl(index, start64 * 64); 6577 jccb(Assembler::less, LOOP); 6578 } 6579 for (int i = start64; i < vector64_count; i++) { 6580 fill64(base, i * 64, xtmp, use64byteVector); 6581 } 6582 6583 // Clear remaining 64 byte tail. 6584 int disp = vector64_count * 64; 6585 if (cnt) { 6586 switch (cnt) { 6587 case 1: 6588 movq(Address(base, disp), xtmp); 6589 break; 6590 case 2: 6591 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_128bit); 6592 break; 6593 case 3: 6594 movl(rtmp, 0x7); 6595 kmovwl(mask, rtmp); 6596 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_256bit); 6597 break; 6598 case 4: 6599 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 6600 break; 6601 case 5: 6602 if (use64byteVector) { 6603 movl(rtmp, 0x1F); 6604 kmovwl(mask, rtmp); 6605 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit); 6606 } else { 6607 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 6608 movq(Address(base, disp + 32), xtmp); 6609 } 6610 break; 6611 case 6: 6612 if (use64byteVector) { 6613 movl(rtmp, 0x3F); 6614 kmovwl(mask, rtmp); 6615 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit); 6616 } else { 6617 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 6618 evmovdqu(T_LONG, k0, Address(base, disp + 32), xtmp, false, Assembler::AVX_128bit); 6619 } 6620 break; 6621 case 7: 6622 if (use64byteVector) { 6623 movl(rtmp, 0x7F); 6624 kmovwl(mask, rtmp); 6625 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit); 6626 } else { 6627 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 6628 movl(rtmp, 0x7); 6629 kmovwl(mask, rtmp); 6630 evmovdqu(T_LONG, mask, Address(base, disp + 32), xtmp, true, Assembler::AVX_256bit); 6631 } 6632 break; 6633 default: 6634 fatal("Unexpected length : %d\n",cnt); 6635 break; 6636 } 6637 } 6638 } 6639 6640 void MacroAssembler::clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, 6641 bool is_large, bool word_copy_only, KRegister mask) { 6642 // cnt - number of qwords (8-byte words). 6643 // base - start address, qword aligned. 6644 // is_large - if optimizers know cnt is larger than InitArrayShortSize 6645 assert(base==rdi, "base register must be edi for rep stos"); 6646 assert(val==rax, "val register must be eax for rep stos"); 6647 assert(cnt==rcx, "cnt register must be ecx for rep stos"); 6648 assert(InitArrayShortSize % BytesPerLong == 0, 6649 "InitArrayShortSize should be the multiple of BytesPerLong"); 6650 6651 Label DONE; 6652 6653 if (!is_large) { 6654 Label LOOP, LONG; 6655 cmpptr(cnt, InitArrayShortSize/BytesPerLong); 6656 jccb(Assembler::greater, LONG); 6657 6658 NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM 6659 6660 decrement(cnt); 6661 jccb(Assembler::negative, DONE); // Zero length 6662 6663 // Use individual pointer-sized stores for small counts: 6664 BIND(LOOP); 6665 movptr(Address(base, cnt, Address::times_ptr), val); 6666 decrement(cnt); 6667 jccb(Assembler::greaterEqual, LOOP); 6668 jmpb(DONE); 6669 6670 BIND(LONG); 6671 } 6672 6673 // Use longer rep-prefixed ops for non-small counts: 6674 if (UseFastStosb && !word_copy_only) { 6675 shlptr(cnt, 3); // convert to number of bytes 6676 rep_stosb(); 6677 } else if (UseXMMForObjInit) { 6678 xmm_clear_mem(base, cnt, val, xtmp, mask); 6679 } else { 6680 NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM 6681 rep_stos(); 6682 } 6683 6684 BIND(DONE); 6685 } 6686 6687 #endif //COMPILER2_OR_JVMCI 6688 6689 6690 void MacroAssembler::generate_fill(BasicType t, bool aligned, 6691 Register to, Register value, Register count, 6692 Register rtmp, XMMRegister xtmp) { 6693 ShortBranchVerifier sbv(this); 6694 assert_different_registers(to, value, count, rtmp); 6695 Label L_exit; 6696 Label L_fill_2_bytes, L_fill_4_bytes; 6697 6698 #if defined(COMPILER2) && defined(_LP64) 6699 if(MaxVectorSize >=32 && 6700 VM_Version::supports_avx512vlbw() && 6701 VM_Version::supports_bmi2()) { 6702 generate_fill_avx3(t, to, value, count, rtmp, xtmp); 6703 return; 6704 } 6705 #endif 6706 6707 int shift = -1; 6708 switch (t) { 6709 case T_BYTE: 6710 shift = 2; 6711 break; 6712 case T_SHORT: 6713 shift = 1; 6714 break; 6715 case T_INT: 6716 shift = 0; 6717 break; 6718 default: ShouldNotReachHere(); 6719 } 6720 6721 if (t == T_BYTE) { 6722 andl(value, 0xff); 6723 movl(rtmp, value); 6724 shll(rtmp, 8); 6725 orl(value, rtmp); 6726 } 6727 if (t == T_SHORT) { 6728 andl(value, 0xffff); 6729 } 6730 if (t == T_BYTE || t == T_SHORT) { 6731 movl(rtmp, value); 6732 shll(rtmp, 16); 6733 orl(value, rtmp); 6734 } 6735 6736 cmpl(count, 2<<shift); // Short arrays (< 8 bytes) fill by element 6737 jcc(Assembler::below, L_fill_4_bytes); // use unsigned cmp 6738 if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) { 6739 Label L_skip_align2; 6740 // align source address at 4 bytes address boundary 6741 if (t == T_BYTE) { 6742 Label L_skip_align1; 6743 // One byte misalignment happens only for byte arrays 6744 testptr(to, 1); 6745 jccb(Assembler::zero, L_skip_align1); 6746 movb(Address(to, 0), value); 6747 increment(to); 6748 decrement(count); 6749 BIND(L_skip_align1); 6750 } 6751 // Two bytes misalignment happens only for byte and short (char) arrays 6752 testptr(to, 2); 6753 jccb(Assembler::zero, L_skip_align2); 6754 movw(Address(to, 0), value); 6755 addptr(to, 2); 6756 subl(count, 1<<(shift-1)); 6757 BIND(L_skip_align2); 6758 } 6759 if (UseSSE < 2) { 6760 Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes; 6761 // Fill 32-byte chunks 6762 subl(count, 8 << shift); 6763 jcc(Assembler::less, L_check_fill_8_bytes); 6764 align(16); 6765 6766 BIND(L_fill_32_bytes_loop); 6767 6768 for (int i = 0; i < 32; i += 4) { 6769 movl(Address(to, i), value); 6770 } 6771 6772 addptr(to, 32); 6773 subl(count, 8 << shift); 6774 jcc(Assembler::greaterEqual, L_fill_32_bytes_loop); 6775 BIND(L_check_fill_8_bytes); 6776 addl(count, 8 << shift); 6777 jccb(Assembler::zero, L_exit); 6778 jmpb(L_fill_8_bytes); 6779 6780 // 6781 // length is too short, just fill qwords 6782 // 6783 BIND(L_fill_8_bytes_loop); 6784 movl(Address(to, 0), value); 6785 movl(Address(to, 4), value); 6786 addptr(to, 8); 6787 BIND(L_fill_8_bytes); 6788 subl(count, 1 << (shift + 1)); 6789 jcc(Assembler::greaterEqual, L_fill_8_bytes_loop); 6790 // fall through to fill 4 bytes 6791 } else { 6792 Label L_fill_32_bytes; 6793 if (!UseUnalignedLoadStores) { 6794 // align to 8 bytes, we know we are 4 byte aligned to start 6795 testptr(to, 4); 6796 jccb(Assembler::zero, L_fill_32_bytes); 6797 movl(Address(to, 0), value); 6798 addptr(to, 4); 6799 subl(count, 1<<shift); 6800 } 6801 BIND(L_fill_32_bytes); 6802 { 6803 assert( UseSSE >= 2, "supported cpu only" ); 6804 Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes; 6805 movdl(xtmp, value); 6806 if (UseAVX >= 2 && UseUnalignedLoadStores) { 6807 Label L_check_fill_32_bytes; 6808 if (UseAVX > 2) { 6809 // Fill 64-byte chunks 6810 Label L_fill_64_bytes_loop_avx3, L_check_fill_64_bytes_avx2; 6811 6812 // If number of bytes to fill < VM_Version::avx3_threshold(), perform fill using AVX2 6813 cmpl(count, VM_Version::avx3_threshold()); 6814 jccb(Assembler::below, L_check_fill_64_bytes_avx2); 6815 6816 vpbroadcastd(xtmp, xtmp, Assembler::AVX_512bit); 6817 6818 subl(count, 16 << shift); 6819 jccb(Assembler::less, L_check_fill_32_bytes); 6820 align(16); 6821 6822 BIND(L_fill_64_bytes_loop_avx3); 6823 evmovdqul(Address(to, 0), xtmp, Assembler::AVX_512bit); 6824 addptr(to, 64); 6825 subl(count, 16 << shift); 6826 jcc(Assembler::greaterEqual, L_fill_64_bytes_loop_avx3); 6827 jmpb(L_check_fill_32_bytes); 6828 6829 BIND(L_check_fill_64_bytes_avx2); 6830 } 6831 // Fill 64-byte chunks 6832 Label L_fill_64_bytes_loop; 6833 vpbroadcastd(xtmp, xtmp, Assembler::AVX_256bit); 6834 6835 subl(count, 16 << shift); 6836 jcc(Assembler::less, L_check_fill_32_bytes); 6837 align(16); 6838 6839 BIND(L_fill_64_bytes_loop); 6840 vmovdqu(Address(to, 0), xtmp); 6841 vmovdqu(Address(to, 32), xtmp); 6842 addptr(to, 64); 6843 subl(count, 16 << shift); 6844 jcc(Assembler::greaterEqual, L_fill_64_bytes_loop); 6845 6846 BIND(L_check_fill_32_bytes); 6847 addl(count, 8 << shift); 6848 jccb(Assembler::less, L_check_fill_8_bytes); 6849 vmovdqu(Address(to, 0), xtmp); 6850 addptr(to, 32); 6851 subl(count, 8 << shift); 6852 6853 BIND(L_check_fill_8_bytes); 6854 // clean upper bits of YMM registers 6855 movdl(xtmp, value); 6856 pshufd(xtmp, xtmp, 0); 6857 } else { 6858 // Fill 32-byte chunks 6859 pshufd(xtmp, xtmp, 0); 6860 6861 subl(count, 8 << shift); 6862 jcc(Assembler::less, L_check_fill_8_bytes); 6863 align(16); 6864 6865 BIND(L_fill_32_bytes_loop); 6866 6867 if (UseUnalignedLoadStores) { 6868 movdqu(Address(to, 0), xtmp); 6869 movdqu(Address(to, 16), xtmp); 6870 } else { 6871 movq(Address(to, 0), xtmp); 6872 movq(Address(to, 8), xtmp); 6873 movq(Address(to, 16), xtmp); 6874 movq(Address(to, 24), xtmp); 6875 } 6876 6877 addptr(to, 32); 6878 subl(count, 8 << shift); 6879 jcc(Assembler::greaterEqual, L_fill_32_bytes_loop); 6880 6881 BIND(L_check_fill_8_bytes); 6882 } 6883 addl(count, 8 << shift); 6884 jccb(Assembler::zero, L_exit); 6885 jmpb(L_fill_8_bytes); 6886 6887 // 6888 // length is too short, just fill qwords 6889 // 6890 BIND(L_fill_8_bytes_loop); 6891 movq(Address(to, 0), xtmp); 6892 addptr(to, 8); 6893 BIND(L_fill_8_bytes); 6894 subl(count, 1 << (shift + 1)); 6895 jcc(Assembler::greaterEqual, L_fill_8_bytes_loop); 6896 } 6897 } 6898 // fill trailing 4 bytes 6899 BIND(L_fill_4_bytes); 6900 testl(count, 1<<shift); 6901 jccb(Assembler::zero, L_fill_2_bytes); 6902 movl(Address(to, 0), value); 6903 if (t == T_BYTE || t == T_SHORT) { 6904 Label L_fill_byte; 6905 addptr(to, 4); 6906 BIND(L_fill_2_bytes); 6907 // fill trailing 2 bytes 6908 testl(count, 1<<(shift-1)); 6909 jccb(Assembler::zero, L_fill_byte); 6910 movw(Address(to, 0), value); 6911 if (t == T_BYTE) { 6912 addptr(to, 2); 6913 BIND(L_fill_byte); 6914 // fill trailing byte 6915 testl(count, 1); 6916 jccb(Assembler::zero, L_exit); 6917 movb(Address(to, 0), value); 6918 } else { 6919 BIND(L_fill_byte); 6920 } 6921 } else { 6922 BIND(L_fill_2_bytes); 6923 } 6924 BIND(L_exit); 6925 } 6926 6927 void MacroAssembler::evpbroadcast(BasicType type, XMMRegister dst, Register src, int vector_len) { 6928 switch(type) { 6929 case T_BYTE: 6930 case T_BOOLEAN: 6931 evpbroadcastb(dst, src, vector_len); 6932 break; 6933 case T_SHORT: 6934 case T_CHAR: 6935 evpbroadcastw(dst, src, vector_len); 6936 break; 6937 case T_INT: 6938 case T_FLOAT: 6939 evpbroadcastd(dst, src, vector_len); 6940 break; 6941 case T_LONG: 6942 case T_DOUBLE: 6943 evpbroadcastq(dst, src, vector_len); 6944 break; 6945 default: 6946 fatal("Unhandled type : %s", type2name(type)); 6947 break; 6948 } 6949 } 6950 6951 // encode char[] to byte[] in ISO_8859_1 or ASCII 6952 //@IntrinsicCandidate 6953 //private static int implEncodeISOArray(byte[] sa, int sp, 6954 //byte[] da, int dp, int len) { 6955 // int i = 0; 6956 // for (; i < len; i++) { 6957 // char c = StringUTF16.getChar(sa, sp++); 6958 // if (c > '\u00FF') 6959 // break; 6960 // da[dp++] = (byte)c; 6961 // } 6962 // return i; 6963 //} 6964 // 6965 //@IntrinsicCandidate 6966 //private static int implEncodeAsciiArray(char[] sa, int sp, 6967 // byte[] da, int dp, int len) { 6968 // int i = 0; 6969 // for (; i < len; i++) { 6970 // char c = sa[sp++]; 6971 // if (c >= '\u0080') 6972 // break; 6973 // da[dp++] = (byte)c; 6974 // } 6975 // return i; 6976 //} 6977 void MacroAssembler::encode_iso_array(Register src, Register dst, Register len, 6978 XMMRegister tmp1Reg, XMMRegister tmp2Reg, 6979 XMMRegister tmp3Reg, XMMRegister tmp4Reg, 6980 Register tmp5, Register result, bool ascii) { 6981 6982 // rsi: src 6983 // rdi: dst 6984 // rdx: len 6985 // rcx: tmp5 6986 // rax: result 6987 ShortBranchVerifier sbv(this); 6988 assert_different_registers(src, dst, len, tmp5, result); 6989 Label L_done, L_copy_1_char, L_copy_1_char_exit; 6990 6991 int mask = ascii ? 0xff80ff80 : 0xff00ff00; 6992 int short_mask = ascii ? 0xff80 : 0xff00; 6993 6994 // set result 6995 xorl(result, result); 6996 // check for zero length 6997 testl(len, len); 6998 jcc(Assembler::zero, L_done); 6999 7000 movl(result, len); 7001 7002 // Setup pointers 7003 lea(src, Address(src, len, Address::times_2)); // char[] 7004 lea(dst, Address(dst, len, Address::times_1)); // byte[] 7005 negptr(len); 7006 7007 if (UseSSE42Intrinsics || UseAVX >= 2) { 7008 Label L_copy_8_chars, L_copy_8_chars_exit; 7009 Label L_chars_16_check, L_copy_16_chars, L_copy_16_chars_exit; 7010 7011 if (UseAVX >= 2) { 7012 Label L_chars_32_check, L_copy_32_chars, L_copy_32_chars_exit; 7013 movl(tmp5, mask); // create mask to test for Unicode or non-ASCII chars in vector 7014 movdl(tmp1Reg, tmp5); 7015 vpbroadcastd(tmp1Reg, tmp1Reg, Assembler::AVX_256bit); 7016 jmp(L_chars_32_check); 7017 7018 bind(L_copy_32_chars); 7019 vmovdqu(tmp3Reg, Address(src, len, Address::times_2, -64)); 7020 vmovdqu(tmp4Reg, Address(src, len, Address::times_2, -32)); 7021 vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1); 7022 vptest(tmp2Reg, tmp1Reg); // check for Unicode or non-ASCII chars in vector 7023 jccb(Assembler::notZero, L_copy_32_chars_exit); 7024 vpackuswb(tmp3Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1); 7025 vpermq(tmp4Reg, tmp3Reg, 0xD8, /* vector_len */ 1); 7026 vmovdqu(Address(dst, len, Address::times_1, -32), tmp4Reg); 7027 7028 bind(L_chars_32_check); 7029 addptr(len, 32); 7030 jcc(Assembler::lessEqual, L_copy_32_chars); 7031 7032 bind(L_copy_32_chars_exit); 7033 subptr(len, 16); 7034 jccb(Assembler::greater, L_copy_16_chars_exit); 7035 7036 } else if (UseSSE42Intrinsics) { 7037 movl(tmp5, mask); // create mask to test for Unicode or non-ASCII chars in vector 7038 movdl(tmp1Reg, tmp5); 7039 pshufd(tmp1Reg, tmp1Reg, 0); 7040 jmpb(L_chars_16_check); 7041 } 7042 7043 bind(L_copy_16_chars); 7044 if (UseAVX >= 2) { 7045 vmovdqu(tmp2Reg, Address(src, len, Address::times_2, -32)); 7046 vptest(tmp2Reg, tmp1Reg); 7047 jcc(Assembler::notZero, L_copy_16_chars_exit); 7048 vpackuswb(tmp2Reg, tmp2Reg, tmp1Reg, /* vector_len */ 1); 7049 vpermq(tmp3Reg, tmp2Reg, 0xD8, /* vector_len */ 1); 7050 } else { 7051 if (UseAVX > 0) { 7052 movdqu(tmp3Reg, Address(src, len, Address::times_2, -32)); 7053 movdqu(tmp4Reg, Address(src, len, Address::times_2, -16)); 7054 vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 0); 7055 } else { 7056 movdqu(tmp3Reg, Address(src, len, Address::times_2, -32)); 7057 por(tmp2Reg, tmp3Reg); 7058 movdqu(tmp4Reg, Address(src, len, Address::times_2, -16)); 7059 por(tmp2Reg, tmp4Reg); 7060 } 7061 ptest(tmp2Reg, tmp1Reg); // check for Unicode or non-ASCII chars in vector 7062 jccb(Assembler::notZero, L_copy_16_chars_exit); 7063 packuswb(tmp3Reg, tmp4Reg); 7064 } 7065 movdqu(Address(dst, len, Address::times_1, -16), tmp3Reg); 7066 7067 bind(L_chars_16_check); 7068 addptr(len, 16); 7069 jcc(Assembler::lessEqual, L_copy_16_chars); 7070 7071 bind(L_copy_16_chars_exit); 7072 if (UseAVX >= 2) { 7073 // clean upper bits of YMM registers 7074 vpxor(tmp2Reg, tmp2Reg); 7075 vpxor(tmp3Reg, tmp3Reg); 7076 vpxor(tmp4Reg, tmp4Reg); 7077 movdl(tmp1Reg, tmp5); 7078 pshufd(tmp1Reg, tmp1Reg, 0); 7079 } 7080 subptr(len, 8); 7081 jccb(Assembler::greater, L_copy_8_chars_exit); 7082 7083 bind(L_copy_8_chars); 7084 movdqu(tmp3Reg, Address(src, len, Address::times_2, -16)); 7085 ptest(tmp3Reg, tmp1Reg); 7086 jccb(Assembler::notZero, L_copy_8_chars_exit); 7087 packuswb(tmp3Reg, tmp1Reg); 7088 movq(Address(dst, len, Address::times_1, -8), tmp3Reg); 7089 addptr(len, 8); 7090 jccb(Assembler::lessEqual, L_copy_8_chars); 7091 7092 bind(L_copy_8_chars_exit); 7093 subptr(len, 8); 7094 jccb(Assembler::zero, L_done); 7095 } 7096 7097 bind(L_copy_1_char); 7098 load_unsigned_short(tmp5, Address(src, len, Address::times_2, 0)); 7099 testl(tmp5, short_mask); // check if Unicode or non-ASCII char 7100 jccb(Assembler::notZero, L_copy_1_char_exit); 7101 movb(Address(dst, len, Address::times_1, 0), tmp5); 7102 addptr(len, 1); 7103 jccb(Assembler::less, L_copy_1_char); 7104 7105 bind(L_copy_1_char_exit); 7106 addptr(result, len); // len is negative count of not processed elements 7107 7108 bind(L_done); 7109 } 7110 7111 #ifdef _LP64 7112 /** 7113 * Helper for multiply_to_len(). 7114 */ 7115 void MacroAssembler::add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2) { 7116 addq(dest_lo, src1); 7117 adcq(dest_hi, 0); 7118 addq(dest_lo, src2); 7119 adcq(dest_hi, 0); 7120 } 7121 7122 /** 7123 * Multiply 64 bit by 64 bit first loop. 7124 */ 7125 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 7126 Register y, Register y_idx, Register z, 7127 Register carry, Register product, 7128 Register idx, Register kdx) { 7129 // 7130 // jlong carry, x[], y[], z[]; 7131 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 7132 // huge_128 product = y[idx] * x[xstart] + carry; 7133 // z[kdx] = (jlong)product; 7134 // carry = (jlong)(product >>> 64); 7135 // } 7136 // z[xstart] = carry; 7137 // 7138 7139 Label L_first_loop, L_first_loop_exit; 7140 Label L_one_x, L_one_y, L_multiply; 7141 7142 decrementl(xstart); 7143 jcc(Assembler::negative, L_one_x); 7144 7145 movq(x_xstart, Address(x, xstart, Address::times_4, 0)); 7146 rorq(x_xstart, 32); // convert big-endian to little-endian 7147 7148 bind(L_first_loop); 7149 decrementl(idx); 7150 jcc(Assembler::negative, L_first_loop_exit); 7151 decrementl(idx); 7152 jcc(Assembler::negative, L_one_y); 7153 movq(y_idx, Address(y, idx, Address::times_4, 0)); 7154 rorq(y_idx, 32); // convert big-endian to little-endian 7155 bind(L_multiply); 7156 movq(product, x_xstart); 7157 mulq(y_idx); // product(rax) * y_idx -> rdx:rax 7158 addq(product, carry); 7159 adcq(rdx, 0); 7160 subl(kdx, 2); 7161 movl(Address(z, kdx, Address::times_4, 4), product); 7162 shrq(product, 32); 7163 movl(Address(z, kdx, Address::times_4, 0), product); 7164 movq(carry, rdx); 7165 jmp(L_first_loop); 7166 7167 bind(L_one_y); 7168 movl(y_idx, Address(y, 0)); 7169 jmp(L_multiply); 7170 7171 bind(L_one_x); 7172 movl(x_xstart, Address(x, 0)); 7173 jmp(L_first_loop); 7174 7175 bind(L_first_loop_exit); 7176 } 7177 7178 /** 7179 * Multiply 64 bit by 64 bit and add 128 bit. 7180 */ 7181 void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y, Register z, 7182 Register yz_idx, Register idx, 7183 Register carry, Register product, int offset) { 7184 // huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry; 7185 // z[kdx] = (jlong)product; 7186 7187 movq(yz_idx, Address(y, idx, Address::times_4, offset)); 7188 rorq(yz_idx, 32); // convert big-endian to little-endian 7189 movq(product, x_xstart); 7190 mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax) 7191 movq(yz_idx, Address(z, idx, Address::times_4, offset)); 7192 rorq(yz_idx, 32); // convert big-endian to little-endian 7193 7194 add2_with_carry(rdx, product, carry, yz_idx); 7195 7196 movl(Address(z, idx, Address::times_4, offset+4), product); 7197 shrq(product, 32); 7198 movl(Address(z, idx, Address::times_4, offset), product); 7199 7200 } 7201 7202 /** 7203 * Multiply 128 bit by 128 bit. Unrolled inner loop. 7204 */ 7205 void MacroAssembler::multiply_128_x_128_loop(Register x_xstart, Register y, Register z, 7206 Register yz_idx, Register idx, Register jdx, 7207 Register carry, Register product, 7208 Register carry2) { 7209 // jlong carry, x[], y[], z[]; 7210 // int kdx = ystart+1; 7211 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 7212 // huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry; 7213 // z[kdx+idx+1] = (jlong)product; 7214 // jlong carry2 = (jlong)(product >>> 64); 7215 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry2; 7216 // z[kdx+idx] = (jlong)product; 7217 // carry = (jlong)(product >>> 64); 7218 // } 7219 // idx += 2; 7220 // if (idx > 0) { 7221 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry; 7222 // z[kdx+idx] = (jlong)product; 7223 // carry = (jlong)(product >>> 64); 7224 // } 7225 // 7226 7227 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 7228 7229 movl(jdx, idx); 7230 andl(jdx, 0xFFFFFFFC); 7231 shrl(jdx, 2); 7232 7233 bind(L_third_loop); 7234 subl(jdx, 1); 7235 jcc(Assembler::negative, L_third_loop_exit); 7236 subl(idx, 4); 7237 7238 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 8); 7239 movq(carry2, rdx); 7240 7241 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product, 0); 7242 movq(carry, rdx); 7243 jmp(L_third_loop); 7244 7245 bind (L_third_loop_exit); 7246 7247 andl (idx, 0x3); 7248 jcc(Assembler::zero, L_post_third_loop_done); 7249 7250 Label L_check_1; 7251 subl(idx, 2); 7252 jcc(Assembler::negative, L_check_1); 7253 7254 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 0); 7255 movq(carry, rdx); 7256 7257 bind (L_check_1); 7258 addl (idx, 0x2); 7259 andl (idx, 0x1); 7260 subl(idx, 1); 7261 jcc(Assembler::negative, L_post_third_loop_done); 7262 7263 movl(yz_idx, Address(y, idx, Address::times_4, 0)); 7264 movq(product, x_xstart); 7265 mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax) 7266 movl(yz_idx, Address(z, idx, Address::times_4, 0)); 7267 7268 add2_with_carry(rdx, product, yz_idx, carry); 7269 7270 movl(Address(z, idx, Address::times_4, 0), product); 7271 shrq(product, 32); 7272 7273 shlq(rdx, 32); 7274 orq(product, rdx); 7275 movq(carry, product); 7276 7277 bind(L_post_third_loop_done); 7278 } 7279 7280 /** 7281 * Multiply 128 bit by 128 bit using BMI2. Unrolled inner loop. 7282 * 7283 */ 7284 void MacroAssembler::multiply_128_x_128_bmi2_loop(Register y, Register z, 7285 Register carry, Register carry2, 7286 Register idx, Register jdx, 7287 Register yz_idx1, Register yz_idx2, 7288 Register tmp, Register tmp3, Register tmp4) { 7289 assert(UseBMI2Instructions, "should be used only when BMI2 is available"); 7290 7291 // jlong carry, x[], y[], z[]; 7292 // int kdx = ystart+1; 7293 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 7294 // huge_128 tmp3 = (y[idx+1] * rdx) + z[kdx+idx+1] + carry; 7295 // jlong carry2 = (jlong)(tmp3 >>> 64); 7296 // huge_128 tmp4 = (y[idx] * rdx) + z[kdx+idx] + carry2; 7297 // carry = (jlong)(tmp4 >>> 64); 7298 // z[kdx+idx+1] = (jlong)tmp3; 7299 // z[kdx+idx] = (jlong)tmp4; 7300 // } 7301 // idx += 2; 7302 // if (idx > 0) { 7303 // yz_idx1 = (y[idx] * rdx) + z[kdx+idx] + carry; 7304 // z[kdx+idx] = (jlong)yz_idx1; 7305 // carry = (jlong)(yz_idx1 >>> 64); 7306 // } 7307 // 7308 7309 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 7310 7311 movl(jdx, idx); 7312 andl(jdx, 0xFFFFFFFC); 7313 shrl(jdx, 2); 7314 7315 bind(L_third_loop); 7316 subl(jdx, 1); 7317 jcc(Assembler::negative, L_third_loop_exit); 7318 subl(idx, 4); 7319 7320 movq(yz_idx1, Address(y, idx, Address::times_4, 8)); 7321 rorxq(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian 7322 movq(yz_idx2, Address(y, idx, Address::times_4, 0)); 7323 rorxq(yz_idx2, yz_idx2, 32); 7324 7325 mulxq(tmp4, tmp3, yz_idx1); // yz_idx1 * rdx -> tmp4:tmp3 7326 mulxq(carry2, tmp, yz_idx2); // yz_idx2 * rdx -> carry2:tmp 7327 7328 movq(yz_idx1, Address(z, idx, Address::times_4, 8)); 7329 rorxq(yz_idx1, yz_idx1, 32); 7330 movq(yz_idx2, Address(z, idx, Address::times_4, 0)); 7331 rorxq(yz_idx2, yz_idx2, 32); 7332 7333 if (VM_Version::supports_adx()) { 7334 adcxq(tmp3, carry); 7335 adoxq(tmp3, yz_idx1); 7336 7337 adcxq(tmp4, tmp); 7338 adoxq(tmp4, yz_idx2); 7339 7340 movl(carry, 0); // does not affect flags 7341 adcxq(carry2, carry); 7342 adoxq(carry2, carry); 7343 } else { 7344 add2_with_carry(tmp4, tmp3, carry, yz_idx1); 7345 add2_with_carry(carry2, tmp4, tmp, yz_idx2); 7346 } 7347 movq(carry, carry2); 7348 7349 movl(Address(z, idx, Address::times_4, 12), tmp3); 7350 shrq(tmp3, 32); 7351 movl(Address(z, idx, Address::times_4, 8), tmp3); 7352 7353 movl(Address(z, idx, Address::times_4, 4), tmp4); 7354 shrq(tmp4, 32); 7355 movl(Address(z, idx, Address::times_4, 0), tmp4); 7356 7357 jmp(L_third_loop); 7358 7359 bind (L_third_loop_exit); 7360 7361 andl (idx, 0x3); 7362 jcc(Assembler::zero, L_post_third_loop_done); 7363 7364 Label L_check_1; 7365 subl(idx, 2); 7366 jcc(Assembler::negative, L_check_1); 7367 7368 movq(yz_idx1, Address(y, idx, Address::times_4, 0)); 7369 rorxq(yz_idx1, yz_idx1, 32); 7370 mulxq(tmp4, tmp3, yz_idx1); // yz_idx1 * rdx -> tmp4:tmp3 7371 movq(yz_idx2, Address(z, idx, Address::times_4, 0)); 7372 rorxq(yz_idx2, yz_idx2, 32); 7373 7374 add2_with_carry(tmp4, tmp3, carry, yz_idx2); 7375 7376 movl(Address(z, idx, Address::times_4, 4), tmp3); 7377 shrq(tmp3, 32); 7378 movl(Address(z, idx, Address::times_4, 0), tmp3); 7379 movq(carry, tmp4); 7380 7381 bind (L_check_1); 7382 addl (idx, 0x2); 7383 andl (idx, 0x1); 7384 subl(idx, 1); 7385 jcc(Assembler::negative, L_post_third_loop_done); 7386 movl(tmp4, Address(y, idx, Address::times_4, 0)); 7387 mulxq(carry2, tmp3, tmp4); // tmp4 * rdx -> carry2:tmp3 7388 movl(tmp4, Address(z, idx, Address::times_4, 0)); 7389 7390 add2_with_carry(carry2, tmp3, tmp4, carry); 7391 7392 movl(Address(z, idx, Address::times_4, 0), tmp3); 7393 shrq(tmp3, 32); 7394 7395 shlq(carry2, 32); 7396 orq(tmp3, carry2); 7397 movq(carry, tmp3); 7398 7399 bind(L_post_third_loop_done); 7400 } 7401 7402 /** 7403 * Code for BigInteger::multiplyToLen() intrinsic. 7404 * 7405 * rdi: x 7406 * rax: xlen 7407 * rsi: y 7408 * rcx: ylen 7409 * r8: z 7410 * r11: zlen 7411 * r12: tmp1 7412 * r13: tmp2 7413 * r14: tmp3 7414 * r15: tmp4 7415 * rbx: tmp5 7416 * 7417 */ 7418 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register zlen, 7419 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5) { 7420 ShortBranchVerifier sbv(this); 7421 assert_different_registers(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, rdx); 7422 7423 push(tmp1); 7424 push(tmp2); 7425 push(tmp3); 7426 push(tmp4); 7427 push(tmp5); 7428 7429 push(xlen); 7430 push(zlen); 7431 7432 const Register idx = tmp1; 7433 const Register kdx = tmp2; 7434 const Register xstart = tmp3; 7435 7436 const Register y_idx = tmp4; 7437 const Register carry = tmp5; 7438 const Register product = xlen; 7439 const Register x_xstart = zlen; // reuse register 7440 7441 // First Loop. 7442 // 7443 // final static long LONG_MASK = 0xffffffffL; 7444 // int xstart = xlen - 1; 7445 // int ystart = ylen - 1; 7446 // long carry = 0; 7447 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 7448 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry; 7449 // z[kdx] = (int)product; 7450 // carry = product >>> 32; 7451 // } 7452 // z[xstart] = (int)carry; 7453 // 7454 7455 movl(idx, ylen); // idx = ylen; 7456 movl(kdx, zlen); // kdx = xlen+ylen; 7457 xorq(carry, carry); // carry = 0; 7458 7459 Label L_done; 7460 7461 movl(xstart, xlen); 7462 decrementl(xstart); 7463 jcc(Assembler::negative, L_done); 7464 7465 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx); 7466 7467 Label L_second_loop; 7468 testl(kdx, kdx); 7469 jcc(Assembler::zero, L_second_loop); 7470 7471 Label L_carry; 7472 subl(kdx, 1); 7473 jcc(Assembler::zero, L_carry); 7474 7475 movl(Address(z, kdx, Address::times_4, 0), carry); 7476 shrq(carry, 32); 7477 subl(kdx, 1); 7478 7479 bind(L_carry); 7480 movl(Address(z, kdx, Address::times_4, 0), carry); 7481 7482 // Second and third (nested) loops. 7483 // 7484 // for (int i = xstart-1; i >= 0; i--) { // Second loop 7485 // carry = 0; 7486 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop 7487 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) + 7488 // (z[k] & LONG_MASK) + carry; 7489 // z[k] = (int)product; 7490 // carry = product >>> 32; 7491 // } 7492 // z[i] = (int)carry; 7493 // } 7494 // 7495 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx 7496 7497 const Register jdx = tmp1; 7498 7499 bind(L_second_loop); 7500 xorl(carry, carry); // carry = 0; 7501 movl(jdx, ylen); // j = ystart+1 7502 7503 subl(xstart, 1); // i = xstart-1; 7504 jcc(Assembler::negative, L_done); 7505 7506 push (z); 7507 7508 Label L_last_x; 7509 lea(z, Address(z, xstart, Address::times_4, 4)); // z = z + k - j 7510 subl(xstart, 1); // i = xstart-1; 7511 jcc(Assembler::negative, L_last_x); 7512 7513 if (UseBMI2Instructions) { 7514 movq(rdx, Address(x, xstart, Address::times_4, 0)); 7515 rorxq(rdx, rdx, 32); // convert big-endian to little-endian 7516 } else { 7517 movq(x_xstart, Address(x, xstart, Address::times_4, 0)); 7518 rorq(x_xstart, 32); // convert big-endian to little-endian 7519 } 7520 7521 Label L_third_loop_prologue; 7522 bind(L_third_loop_prologue); 7523 7524 push (x); 7525 push (xstart); 7526 push (ylen); 7527 7528 7529 if (UseBMI2Instructions) { 7530 multiply_128_x_128_bmi2_loop(y, z, carry, x, jdx, ylen, product, tmp2, x_xstart, tmp3, tmp4); 7531 } else { // !UseBMI2Instructions 7532 multiply_128_x_128_loop(x_xstart, y, z, y_idx, jdx, ylen, carry, product, x); 7533 } 7534 7535 pop(ylen); 7536 pop(xlen); 7537 pop(x); 7538 pop(z); 7539 7540 movl(tmp3, xlen); 7541 addl(tmp3, 1); 7542 movl(Address(z, tmp3, Address::times_4, 0), carry); 7543 subl(tmp3, 1); 7544 jccb(Assembler::negative, L_done); 7545 7546 shrq(carry, 32); 7547 movl(Address(z, tmp3, Address::times_4, 0), carry); 7548 jmp(L_second_loop); 7549 7550 // Next infrequent code is moved outside loops. 7551 bind(L_last_x); 7552 if (UseBMI2Instructions) { 7553 movl(rdx, Address(x, 0)); 7554 } else { 7555 movl(x_xstart, Address(x, 0)); 7556 } 7557 jmp(L_third_loop_prologue); 7558 7559 bind(L_done); 7560 7561 pop(zlen); 7562 pop(xlen); 7563 7564 pop(tmp5); 7565 pop(tmp4); 7566 pop(tmp3); 7567 pop(tmp2); 7568 pop(tmp1); 7569 } 7570 7571 void MacroAssembler::vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale, 7572 Register result, Register tmp1, Register tmp2, XMMRegister rymm0, XMMRegister rymm1, XMMRegister rymm2){ 7573 assert(UseSSE42Intrinsics, "SSE4.2 must be enabled."); 7574 Label VECTOR16_LOOP, VECTOR8_LOOP, VECTOR4_LOOP; 7575 Label VECTOR8_TAIL, VECTOR4_TAIL; 7576 Label VECTOR32_NOT_EQUAL, VECTOR16_NOT_EQUAL, VECTOR8_NOT_EQUAL, VECTOR4_NOT_EQUAL; 7577 Label SAME_TILL_END, DONE; 7578 Label BYTES_LOOP, BYTES_TAIL, BYTES_NOT_EQUAL; 7579 7580 //scale is in rcx in both Win64 and Unix 7581 ShortBranchVerifier sbv(this); 7582 7583 shlq(length); 7584 xorq(result, result); 7585 7586 if ((AVX3Threshold == 0) && (UseAVX > 2) && 7587 VM_Version::supports_avx512vlbw()) { 7588 Label VECTOR64_LOOP, VECTOR64_NOT_EQUAL, VECTOR32_TAIL; 7589 7590 cmpq(length, 64); 7591 jcc(Assembler::less, VECTOR32_TAIL); 7592 7593 movq(tmp1, length); 7594 andq(tmp1, 0x3F); // tail count 7595 andq(length, ~(0x3F)); //vector count 7596 7597 bind(VECTOR64_LOOP); 7598 // AVX512 code to compare 64 byte vectors. 7599 evmovdqub(rymm0, Address(obja, result), Assembler::AVX_512bit); 7600 evpcmpeqb(k7, rymm0, Address(objb, result), Assembler::AVX_512bit); 7601 kortestql(k7, k7); 7602 jcc(Assembler::aboveEqual, VECTOR64_NOT_EQUAL); // mismatch 7603 addq(result, 64); 7604 subq(length, 64); 7605 jccb(Assembler::notZero, VECTOR64_LOOP); 7606 7607 //bind(VECTOR64_TAIL); 7608 testq(tmp1, tmp1); 7609 jcc(Assembler::zero, SAME_TILL_END); 7610 7611 //bind(VECTOR64_TAIL); 7612 // AVX512 code to compare up to 63 byte vectors. 7613 mov64(tmp2, 0xFFFFFFFFFFFFFFFF); 7614 shlxq(tmp2, tmp2, tmp1); 7615 notq(tmp2); 7616 kmovql(k3, tmp2); 7617 7618 evmovdqub(rymm0, k3, Address(obja, result), false, Assembler::AVX_512bit); 7619 evpcmpeqb(k7, k3, rymm0, Address(objb, result), Assembler::AVX_512bit); 7620 7621 ktestql(k7, k3); 7622 jcc(Assembler::below, SAME_TILL_END); // not mismatch 7623 7624 bind(VECTOR64_NOT_EQUAL); 7625 kmovql(tmp1, k7); 7626 notq(tmp1); 7627 tzcntq(tmp1, tmp1); 7628 addq(result, tmp1); 7629 shrq(result); 7630 jmp(DONE); 7631 bind(VECTOR32_TAIL); 7632 } 7633 7634 cmpq(length, 8); 7635 jcc(Assembler::equal, VECTOR8_LOOP); 7636 jcc(Assembler::less, VECTOR4_TAIL); 7637 7638 if (UseAVX >= 2) { 7639 Label VECTOR16_TAIL, VECTOR32_LOOP; 7640 7641 cmpq(length, 16); 7642 jcc(Assembler::equal, VECTOR16_LOOP); 7643 jcc(Assembler::less, VECTOR8_LOOP); 7644 7645 cmpq(length, 32); 7646 jccb(Assembler::less, VECTOR16_TAIL); 7647 7648 subq(length, 32); 7649 bind(VECTOR32_LOOP); 7650 vmovdqu(rymm0, Address(obja, result)); 7651 vmovdqu(rymm1, Address(objb, result)); 7652 vpxor(rymm2, rymm0, rymm1, Assembler::AVX_256bit); 7653 vptest(rymm2, rymm2); 7654 jcc(Assembler::notZero, VECTOR32_NOT_EQUAL);//mismatch found 7655 addq(result, 32); 7656 subq(length, 32); 7657 jcc(Assembler::greaterEqual, VECTOR32_LOOP); 7658 addq(length, 32); 7659 jcc(Assembler::equal, SAME_TILL_END); 7660 //falling through if less than 32 bytes left //close the branch here. 7661 7662 bind(VECTOR16_TAIL); 7663 cmpq(length, 16); 7664 jccb(Assembler::less, VECTOR8_TAIL); 7665 bind(VECTOR16_LOOP); 7666 movdqu(rymm0, Address(obja, result)); 7667 movdqu(rymm1, Address(objb, result)); 7668 vpxor(rymm2, rymm0, rymm1, Assembler::AVX_128bit); 7669 ptest(rymm2, rymm2); 7670 jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found 7671 addq(result, 16); 7672 subq(length, 16); 7673 jcc(Assembler::equal, SAME_TILL_END); 7674 //falling through if less than 16 bytes left 7675 } else {//regular intrinsics 7676 7677 cmpq(length, 16); 7678 jccb(Assembler::less, VECTOR8_TAIL); 7679 7680 subq(length, 16); 7681 bind(VECTOR16_LOOP); 7682 movdqu(rymm0, Address(obja, result)); 7683 movdqu(rymm1, Address(objb, result)); 7684 pxor(rymm0, rymm1); 7685 ptest(rymm0, rymm0); 7686 jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found 7687 addq(result, 16); 7688 subq(length, 16); 7689 jccb(Assembler::greaterEqual, VECTOR16_LOOP); 7690 addq(length, 16); 7691 jcc(Assembler::equal, SAME_TILL_END); 7692 //falling through if less than 16 bytes left 7693 } 7694 7695 bind(VECTOR8_TAIL); 7696 cmpq(length, 8); 7697 jccb(Assembler::less, VECTOR4_TAIL); 7698 bind(VECTOR8_LOOP); 7699 movq(tmp1, Address(obja, result)); 7700 movq(tmp2, Address(objb, result)); 7701 xorq(tmp1, tmp2); 7702 testq(tmp1, tmp1); 7703 jcc(Assembler::notZero, VECTOR8_NOT_EQUAL);//mismatch found 7704 addq(result, 8); 7705 subq(length, 8); 7706 jcc(Assembler::equal, SAME_TILL_END); 7707 //falling through if less than 8 bytes left 7708 7709 bind(VECTOR4_TAIL); 7710 cmpq(length, 4); 7711 jccb(Assembler::less, BYTES_TAIL); 7712 bind(VECTOR4_LOOP); 7713 movl(tmp1, Address(obja, result)); 7714 xorl(tmp1, Address(objb, result)); 7715 testl(tmp1, tmp1); 7716 jcc(Assembler::notZero, VECTOR4_NOT_EQUAL);//mismatch found 7717 addq(result, 4); 7718 subq(length, 4); 7719 jcc(Assembler::equal, SAME_TILL_END); 7720 //falling through if less than 4 bytes left 7721 7722 bind(BYTES_TAIL); 7723 bind(BYTES_LOOP); 7724 load_unsigned_byte(tmp1, Address(obja, result)); 7725 load_unsigned_byte(tmp2, Address(objb, result)); 7726 xorl(tmp1, tmp2); 7727 testl(tmp1, tmp1); 7728 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found 7729 decq(length); 7730 jcc(Assembler::zero, SAME_TILL_END); 7731 incq(result); 7732 load_unsigned_byte(tmp1, Address(obja, result)); 7733 load_unsigned_byte(tmp2, Address(objb, result)); 7734 xorl(tmp1, tmp2); 7735 testl(tmp1, tmp1); 7736 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found 7737 decq(length); 7738 jcc(Assembler::zero, SAME_TILL_END); 7739 incq(result); 7740 load_unsigned_byte(tmp1, Address(obja, result)); 7741 load_unsigned_byte(tmp2, Address(objb, result)); 7742 xorl(tmp1, tmp2); 7743 testl(tmp1, tmp1); 7744 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found 7745 jmp(SAME_TILL_END); 7746 7747 if (UseAVX >= 2) { 7748 bind(VECTOR32_NOT_EQUAL); 7749 vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_256bit); 7750 vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_256bit); 7751 vpxor(rymm0, rymm0, rymm2, Assembler::AVX_256bit); 7752 vpmovmskb(tmp1, rymm0); 7753 bsfq(tmp1, tmp1); 7754 addq(result, tmp1); 7755 shrq(result); 7756 jmp(DONE); 7757 } 7758 7759 bind(VECTOR16_NOT_EQUAL); 7760 if (UseAVX >= 2) { 7761 vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_128bit); 7762 vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_128bit); 7763 pxor(rymm0, rymm2); 7764 } else { 7765 pcmpeqb(rymm2, rymm2); 7766 pxor(rymm0, rymm1); 7767 pcmpeqb(rymm0, rymm1); 7768 pxor(rymm0, rymm2); 7769 } 7770 pmovmskb(tmp1, rymm0); 7771 bsfq(tmp1, tmp1); 7772 addq(result, tmp1); 7773 shrq(result); 7774 jmpb(DONE); 7775 7776 bind(VECTOR8_NOT_EQUAL); 7777 bind(VECTOR4_NOT_EQUAL); 7778 bsfq(tmp1, tmp1); 7779 shrq(tmp1, 3); 7780 addq(result, tmp1); 7781 bind(BYTES_NOT_EQUAL); 7782 shrq(result); 7783 jmpb(DONE); 7784 7785 bind(SAME_TILL_END); 7786 mov64(result, -1); 7787 7788 bind(DONE); 7789 } 7790 7791 //Helper functions for square_to_len() 7792 7793 /** 7794 * Store the squares of x[], right shifted one bit (divided by 2) into z[] 7795 * Preserves x and z and modifies rest of the registers. 7796 */ 7797 void MacroAssembler::square_rshift(Register x, Register xlen, Register z, Register tmp1, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 7798 // Perform square and right shift by 1 7799 // Handle odd xlen case first, then for even xlen do the following 7800 // jlong carry = 0; 7801 // for (int j=0, i=0; j < xlen; j+=2, i+=4) { 7802 // huge_128 product = x[j:j+1] * x[j:j+1]; 7803 // z[i:i+1] = (carry << 63) | (jlong)(product >>> 65); 7804 // z[i+2:i+3] = (jlong)(product >>> 1); 7805 // carry = (jlong)product; 7806 // } 7807 7808 xorq(tmp5, tmp5); // carry 7809 xorq(rdxReg, rdxReg); 7810 xorl(tmp1, tmp1); // index for x 7811 xorl(tmp4, tmp4); // index for z 7812 7813 Label L_first_loop, L_first_loop_exit; 7814 7815 testl(xlen, 1); 7816 jccb(Assembler::zero, L_first_loop); //jump if xlen is even 7817 7818 // Square and right shift by 1 the odd element using 32 bit multiply 7819 movl(raxReg, Address(x, tmp1, Address::times_4, 0)); 7820 imulq(raxReg, raxReg); 7821 shrq(raxReg, 1); 7822 adcq(tmp5, 0); 7823 movq(Address(z, tmp4, Address::times_4, 0), raxReg); 7824 incrementl(tmp1); 7825 addl(tmp4, 2); 7826 7827 // Square and right shift by 1 the rest using 64 bit multiply 7828 bind(L_first_loop); 7829 cmpptr(tmp1, xlen); 7830 jccb(Assembler::equal, L_first_loop_exit); 7831 7832 // Square 7833 movq(raxReg, Address(x, tmp1, Address::times_4, 0)); 7834 rorq(raxReg, 32); // convert big-endian to little-endian 7835 mulq(raxReg); // 64-bit multiply rax * rax -> rdx:rax 7836 7837 // Right shift by 1 and save carry 7838 shrq(tmp5, 1); // rdx:rax:tmp5 = (tmp5:rdx:rax) >>> 1 7839 rcrq(rdxReg, 1); 7840 rcrq(raxReg, 1); 7841 adcq(tmp5, 0); 7842 7843 // Store result in z 7844 movq(Address(z, tmp4, Address::times_4, 0), rdxReg); 7845 movq(Address(z, tmp4, Address::times_4, 8), raxReg); 7846 7847 // Update indices for x and z 7848 addl(tmp1, 2); 7849 addl(tmp4, 4); 7850 jmp(L_first_loop); 7851 7852 bind(L_first_loop_exit); 7853 } 7854 7855 7856 /** 7857 * Perform the following multiply add operation using BMI2 instructions 7858 * carry:sum = sum + op1*op2 + carry 7859 * op2 should be in rdx 7860 * op2 is preserved, all other registers are modified 7861 */ 7862 void MacroAssembler::multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, Register tmp2) { 7863 // assert op2 is rdx 7864 mulxq(tmp2, op1, op1); // op1 * op2 -> tmp2:op1 7865 addq(sum, carry); 7866 adcq(tmp2, 0); 7867 addq(sum, op1); 7868 adcq(tmp2, 0); 7869 movq(carry, tmp2); 7870 } 7871 7872 /** 7873 * Perform the following multiply add operation: 7874 * carry:sum = sum + op1*op2 + carry 7875 * Preserves op1, op2 and modifies rest of registers 7876 */ 7877 void MacroAssembler::multiply_add_64(Register sum, Register op1, Register op2, Register carry, Register rdxReg, Register raxReg) { 7878 // rdx:rax = op1 * op2 7879 movq(raxReg, op2); 7880 mulq(op1); 7881 7882 // rdx:rax = sum + carry + rdx:rax 7883 addq(sum, carry); 7884 adcq(rdxReg, 0); 7885 addq(sum, raxReg); 7886 adcq(rdxReg, 0); 7887 7888 // carry:sum = rdx:sum 7889 movq(carry, rdxReg); 7890 } 7891 7892 /** 7893 * Add 64 bit long carry into z[] with carry propagation. 7894 * Preserves z and carry register values and modifies rest of registers. 7895 * 7896 */ 7897 void MacroAssembler::add_one_64(Register z, Register zlen, Register carry, Register tmp1) { 7898 Label L_fourth_loop, L_fourth_loop_exit; 7899 7900 movl(tmp1, 1); 7901 subl(zlen, 2); 7902 addq(Address(z, zlen, Address::times_4, 0), carry); 7903 7904 bind(L_fourth_loop); 7905 jccb(Assembler::carryClear, L_fourth_loop_exit); 7906 subl(zlen, 2); 7907 jccb(Assembler::negative, L_fourth_loop_exit); 7908 addq(Address(z, zlen, Address::times_4, 0), tmp1); 7909 jmp(L_fourth_loop); 7910 bind(L_fourth_loop_exit); 7911 } 7912 7913 /** 7914 * Shift z[] left by 1 bit. 7915 * Preserves x, len, z and zlen registers and modifies rest of the registers. 7916 * 7917 */ 7918 void MacroAssembler::lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4) { 7919 7920 Label L_fifth_loop, L_fifth_loop_exit; 7921 7922 // Fifth loop 7923 // Perform primitiveLeftShift(z, zlen, 1) 7924 7925 const Register prev_carry = tmp1; 7926 const Register new_carry = tmp4; 7927 const Register value = tmp2; 7928 const Register zidx = tmp3; 7929 7930 // int zidx, carry; 7931 // long value; 7932 // carry = 0; 7933 // for (zidx = zlen-2; zidx >=0; zidx -= 2) { 7934 // (carry:value) = (z[i] << 1) | carry ; 7935 // z[i] = value; 7936 // } 7937 7938 movl(zidx, zlen); 7939 xorl(prev_carry, prev_carry); // clear carry flag and prev_carry register 7940 7941 bind(L_fifth_loop); 7942 decl(zidx); // Use decl to preserve carry flag 7943 decl(zidx); 7944 jccb(Assembler::negative, L_fifth_loop_exit); 7945 7946 if (UseBMI2Instructions) { 7947 movq(value, Address(z, zidx, Address::times_4, 0)); 7948 rclq(value, 1); 7949 rorxq(value, value, 32); 7950 movq(Address(z, zidx, Address::times_4, 0), value); // Store back in big endian form 7951 } 7952 else { 7953 // clear new_carry 7954 xorl(new_carry, new_carry); 7955 7956 // Shift z[i] by 1, or in previous carry and save new carry 7957 movq(value, Address(z, zidx, Address::times_4, 0)); 7958 shlq(value, 1); 7959 adcl(new_carry, 0); 7960 7961 orq(value, prev_carry); 7962 rorq(value, 0x20); 7963 movq(Address(z, zidx, Address::times_4, 0), value); // Store back in big endian form 7964 7965 // Set previous carry = new carry 7966 movl(prev_carry, new_carry); 7967 } 7968 jmp(L_fifth_loop); 7969 7970 bind(L_fifth_loop_exit); 7971 } 7972 7973 7974 /** 7975 * Code for BigInteger::squareToLen() intrinsic 7976 * 7977 * rdi: x 7978 * rsi: len 7979 * r8: z 7980 * rcx: zlen 7981 * r12: tmp1 7982 * r13: tmp2 7983 * r14: tmp3 7984 * r15: tmp4 7985 * rbx: tmp5 7986 * 7987 */ 7988 void MacroAssembler::square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 7989 7990 Label L_second_loop, L_second_loop_exit, L_third_loop, L_third_loop_exit, L_last_x, L_multiply; 7991 push(tmp1); 7992 push(tmp2); 7993 push(tmp3); 7994 push(tmp4); 7995 push(tmp5); 7996 7997 // First loop 7998 // Store the squares, right shifted one bit (i.e., divided by 2). 7999 square_rshift(x, len, z, tmp1, tmp3, tmp4, tmp5, rdxReg, raxReg); 8000 8001 // Add in off-diagonal sums. 8002 // 8003 // Second, third (nested) and fourth loops. 8004 // zlen +=2; 8005 // for (int xidx=len-2,zidx=zlen-4; xidx > 0; xidx-=2,zidx-=4) { 8006 // carry = 0; 8007 // long op2 = x[xidx:xidx+1]; 8008 // for (int j=xidx-2,k=zidx; j >= 0; j-=2) { 8009 // k -= 2; 8010 // long op1 = x[j:j+1]; 8011 // long sum = z[k:k+1]; 8012 // carry:sum = multiply_add_64(sum, op1, op2, carry, tmp_regs); 8013 // z[k:k+1] = sum; 8014 // } 8015 // add_one_64(z, k, carry, tmp_regs); 8016 // } 8017 8018 const Register carry = tmp5; 8019 const Register sum = tmp3; 8020 const Register op1 = tmp4; 8021 Register op2 = tmp2; 8022 8023 push(zlen); 8024 push(len); 8025 addl(zlen,2); 8026 bind(L_second_loop); 8027 xorq(carry, carry); 8028 subl(zlen, 4); 8029 subl(len, 2); 8030 push(zlen); 8031 push(len); 8032 cmpl(len, 0); 8033 jccb(Assembler::lessEqual, L_second_loop_exit); 8034 8035 // Multiply an array by one 64 bit long. 8036 if (UseBMI2Instructions) { 8037 op2 = rdxReg; 8038 movq(op2, Address(x, len, Address::times_4, 0)); 8039 rorxq(op2, op2, 32); 8040 } 8041 else { 8042 movq(op2, Address(x, len, Address::times_4, 0)); 8043 rorq(op2, 32); 8044 } 8045 8046 bind(L_third_loop); 8047 decrementl(len); 8048 jccb(Assembler::negative, L_third_loop_exit); 8049 decrementl(len); 8050 jccb(Assembler::negative, L_last_x); 8051 8052 movq(op1, Address(x, len, Address::times_4, 0)); 8053 rorq(op1, 32); 8054 8055 bind(L_multiply); 8056 subl(zlen, 2); 8057 movq(sum, Address(z, zlen, Address::times_4, 0)); 8058 8059 // Multiply 64 bit by 64 bit and add 64 bits lower half and upper 64 bits as carry. 8060 if (UseBMI2Instructions) { 8061 multiply_add_64_bmi2(sum, op1, op2, carry, tmp2); 8062 } 8063 else { 8064 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 8065 } 8066 8067 movq(Address(z, zlen, Address::times_4, 0), sum); 8068 8069 jmp(L_third_loop); 8070 bind(L_third_loop_exit); 8071 8072 // Fourth loop 8073 // Add 64 bit long carry into z with carry propagation. 8074 // Uses offsetted zlen. 8075 add_one_64(z, zlen, carry, tmp1); 8076 8077 pop(len); 8078 pop(zlen); 8079 jmp(L_second_loop); 8080 8081 // Next infrequent code is moved outside loops. 8082 bind(L_last_x); 8083 movl(op1, Address(x, 0)); 8084 jmp(L_multiply); 8085 8086 bind(L_second_loop_exit); 8087 pop(len); 8088 pop(zlen); 8089 pop(len); 8090 pop(zlen); 8091 8092 // Fifth loop 8093 // Shift z left 1 bit. 8094 lshift_by_1(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4); 8095 8096 // z[zlen-1] |= x[len-1] & 1; 8097 movl(tmp3, Address(x, len, Address::times_4, -4)); 8098 andl(tmp3, 1); 8099 orl(Address(z, zlen, Address::times_4, -4), tmp3); 8100 8101 pop(tmp5); 8102 pop(tmp4); 8103 pop(tmp3); 8104 pop(tmp2); 8105 pop(tmp1); 8106 } 8107 8108 /** 8109 * Helper function for mul_add() 8110 * Multiply the in[] by int k and add to out[] starting at offset offs using 8111 * 128 bit by 32 bit multiply and return the carry in tmp5. 8112 * Only quad int aligned length of in[] is operated on in this function. 8113 * k is in rdxReg for BMI2Instructions, for others it is in tmp2. 8114 * This function preserves out, in and k registers. 8115 * len and offset point to the appropriate index in "in" & "out" correspondingly 8116 * tmp5 has the carry. 8117 * other registers are temporary and are modified. 8118 * 8119 */ 8120 void MacroAssembler::mul_add_128_x_32_loop(Register out, Register in, 8121 Register offset, Register len, Register tmp1, Register tmp2, Register tmp3, 8122 Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 8123 8124 Label L_first_loop, L_first_loop_exit; 8125 8126 movl(tmp1, len); 8127 shrl(tmp1, 2); 8128 8129 bind(L_first_loop); 8130 subl(tmp1, 1); 8131 jccb(Assembler::negative, L_first_loop_exit); 8132 8133 subl(len, 4); 8134 subl(offset, 4); 8135 8136 Register op2 = tmp2; 8137 const Register sum = tmp3; 8138 const Register op1 = tmp4; 8139 const Register carry = tmp5; 8140 8141 if (UseBMI2Instructions) { 8142 op2 = rdxReg; 8143 } 8144 8145 movq(op1, Address(in, len, Address::times_4, 8)); 8146 rorq(op1, 32); 8147 movq(sum, Address(out, offset, Address::times_4, 8)); 8148 rorq(sum, 32); 8149 if (UseBMI2Instructions) { 8150 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg); 8151 } 8152 else { 8153 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 8154 } 8155 // Store back in big endian from little endian 8156 rorq(sum, 0x20); 8157 movq(Address(out, offset, Address::times_4, 8), sum); 8158 8159 movq(op1, Address(in, len, Address::times_4, 0)); 8160 rorq(op1, 32); 8161 movq(sum, Address(out, offset, Address::times_4, 0)); 8162 rorq(sum, 32); 8163 if (UseBMI2Instructions) { 8164 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg); 8165 } 8166 else { 8167 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 8168 } 8169 // Store back in big endian from little endian 8170 rorq(sum, 0x20); 8171 movq(Address(out, offset, Address::times_4, 0), sum); 8172 8173 jmp(L_first_loop); 8174 bind(L_first_loop_exit); 8175 } 8176 8177 /** 8178 * Code for BigInteger::mulAdd() intrinsic 8179 * 8180 * rdi: out 8181 * rsi: in 8182 * r11: offs (out.length - offset) 8183 * rcx: len 8184 * r8: k 8185 * r12: tmp1 8186 * r13: tmp2 8187 * r14: tmp3 8188 * r15: tmp4 8189 * rbx: tmp5 8190 * Multiply the in[] by word k and add to out[], return the carry in rax 8191 */ 8192 void MacroAssembler::mul_add(Register out, Register in, Register offs, 8193 Register len, Register k, Register tmp1, Register tmp2, Register tmp3, 8194 Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 8195 8196 Label L_carry, L_last_in, L_done; 8197 8198 // carry = 0; 8199 // for (int j=len-1; j >= 0; j--) { 8200 // long product = (in[j] & LONG_MASK) * kLong + 8201 // (out[offs] & LONG_MASK) + carry; 8202 // out[offs--] = (int)product; 8203 // carry = product >>> 32; 8204 // } 8205 // 8206 push(tmp1); 8207 push(tmp2); 8208 push(tmp3); 8209 push(tmp4); 8210 push(tmp5); 8211 8212 Register op2 = tmp2; 8213 const Register sum = tmp3; 8214 const Register op1 = tmp4; 8215 const Register carry = tmp5; 8216 8217 if (UseBMI2Instructions) { 8218 op2 = rdxReg; 8219 movl(op2, k); 8220 } 8221 else { 8222 movl(op2, k); 8223 } 8224 8225 xorq(carry, carry); 8226 8227 //First loop 8228 8229 //Multiply in[] by k in a 4 way unrolled loop using 128 bit by 32 bit multiply 8230 //The carry is in tmp5 8231 mul_add_128_x_32_loop(out, in, offs, len, tmp1, tmp2, tmp3, tmp4, tmp5, rdxReg, raxReg); 8232 8233 //Multiply the trailing in[] entry using 64 bit by 32 bit, if any 8234 decrementl(len); 8235 jccb(Assembler::negative, L_carry); 8236 decrementl(len); 8237 jccb(Assembler::negative, L_last_in); 8238 8239 movq(op1, Address(in, len, Address::times_4, 0)); 8240 rorq(op1, 32); 8241 8242 subl(offs, 2); 8243 movq(sum, Address(out, offs, Address::times_4, 0)); 8244 rorq(sum, 32); 8245 8246 if (UseBMI2Instructions) { 8247 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg); 8248 } 8249 else { 8250 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 8251 } 8252 8253 // Store back in big endian from little endian 8254 rorq(sum, 0x20); 8255 movq(Address(out, offs, Address::times_4, 0), sum); 8256 8257 testl(len, len); 8258 jccb(Assembler::zero, L_carry); 8259 8260 //Multiply the last in[] entry, if any 8261 bind(L_last_in); 8262 movl(op1, Address(in, 0)); 8263 movl(sum, Address(out, offs, Address::times_4, -4)); 8264 8265 movl(raxReg, k); 8266 mull(op1); //tmp4 * eax -> edx:eax 8267 addl(sum, carry); 8268 adcl(rdxReg, 0); 8269 addl(sum, raxReg); 8270 adcl(rdxReg, 0); 8271 movl(carry, rdxReg); 8272 8273 movl(Address(out, offs, Address::times_4, -4), sum); 8274 8275 bind(L_carry); 8276 //return tmp5/carry as carry in rax 8277 movl(rax, carry); 8278 8279 bind(L_done); 8280 pop(tmp5); 8281 pop(tmp4); 8282 pop(tmp3); 8283 pop(tmp2); 8284 pop(tmp1); 8285 } 8286 #endif 8287 8288 /** 8289 * Emits code to update CRC-32 with a byte value according to constants in table 8290 * 8291 * @param [in,out]crc Register containing the crc. 8292 * @param [in]val Register containing the byte to fold into the CRC. 8293 * @param [in]table Register containing the table of crc constants. 8294 * 8295 * uint32_t crc; 8296 * val = crc_table[(val ^ crc) & 0xFF]; 8297 * crc = val ^ (crc >> 8); 8298 * 8299 */ 8300 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 8301 xorl(val, crc); 8302 andl(val, 0xFF); 8303 shrl(crc, 8); // unsigned shift 8304 xorl(crc, Address(table, val, Address::times_4, 0)); 8305 } 8306 8307 /** 8308 * Fold 128-bit data chunk 8309 */ 8310 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) { 8311 if (UseAVX > 0) { 8312 vpclmulhdq(xtmp, xK, xcrc); // [123:64] 8313 vpclmulldq(xcrc, xK, xcrc); // [63:0] 8314 vpxor(xcrc, xcrc, Address(buf, offset), 0 /* vector_len */); 8315 pxor(xcrc, xtmp); 8316 } else { 8317 movdqa(xtmp, xcrc); 8318 pclmulhdq(xtmp, xK); // [123:64] 8319 pclmulldq(xcrc, xK); // [63:0] 8320 pxor(xcrc, xtmp); 8321 movdqu(xtmp, Address(buf, offset)); 8322 pxor(xcrc, xtmp); 8323 } 8324 } 8325 8326 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf) { 8327 if (UseAVX > 0) { 8328 vpclmulhdq(xtmp, xK, xcrc); 8329 vpclmulldq(xcrc, xK, xcrc); 8330 pxor(xcrc, xbuf); 8331 pxor(xcrc, xtmp); 8332 } else { 8333 movdqa(xtmp, xcrc); 8334 pclmulhdq(xtmp, xK); 8335 pclmulldq(xcrc, xK); 8336 pxor(xcrc, xbuf); 8337 pxor(xcrc, xtmp); 8338 } 8339 } 8340 8341 /** 8342 * 8-bit folds to compute 32-bit CRC 8343 * 8344 * uint64_t xcrc; 8345 * timesXtoThe32[xcrc & 0xFF] ^ (xcrc >> 8); 8346 */ 8347 void MacroAssembler::fold_8bit_crc32(XMMRegister xcrc, Register table, XMMRegister xtmp, Register tmp) { 8348 movdl(tmp, xcrc); 8349 andl(tmp, 0xFF); 8350 movdl(xtmp, Address(table, tmp, Address::times_4, 0)); 8351 psrldq(xcrc, 1); // unsigned shift one byte 8352 pxor(xcrc, xtmp); 8353 } 8354 8355 /** 8356 * uint32_t crc; 8357 * timesXtoThe32[crc & 0xFF] ^ (crc >> 8); 8358 */ 8359 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) { 8360 movl(tmp, crc); 8361 andl(tmp, 0xFF); 8362 shrl(crc, 8); 8363 xorl(crc, Address(table, tmp, Address::times_4, 0)); 8364 } 8365 8366 /** 8367 * @param crc register containing existing CRC (32-bit) 8368 * @param buf register pointing to input byte buffer (byte*) 8369 * @param len register containing number of bytes 8370 * @param table register that will contain address of CRC table 8371 * @param tmp scratch register 8372 */ 8373 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp) { 8374 assert_different_registers(crc, buf, len, table, tmp, rax); 8375 8376 Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned; 8377 Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop; 8378 8379 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 8380 // context for the registers used, where all instructions below are using 128-bit mode 8381 // On EVEX without VL and BW, these instructions will all be AVX. 8382 lea(table, ExternalAddress(StubRoutines::crc_table_addr())); 8383 notl(crc); // ~crc 8384 cmpl(len, 16); 8385 jcc(Assembler::less, L_tail); 8386 8387 // Align buffer to 16 bytes 8388 movl(tmp, buf); 8389 andl(tmp, 0xF); 8390 jccb(Assembler::zero, L_aligned); 8391 subl(tmp, 16); 8392 addl(len, tmp); 8393 8394 align(4); 8395 BIND(L_align_loop); 8396 movsbl(rax, Address(buf, 0)); // load byte with sign extension 8397 update_byte_crc32(crc, rax, table); 8398 increment(buf); 8399 incrementl(tmp); 8400 jccb(Assembler::less, L_align_loop); 8401 8402 BIND(L_aligned); 8403 movl(tmp, len); // save 8404 shrl(len, 4); 8405 jcc(Assembler::zero, L_tail_restore); 8406 8407 // Fold crc into first bytes of vector 8408 movdqa(xmm1, Address(buf, 0)); 8409 movdl(rax, xmm1); 8410 xorl(crc, rax); 8411 if (VM_Version::supports_sse4_1()) { 8412 pinsrd(xmm1, crc, 0); 8413 } else { 8414 pinsrw(xmm1, crc, 0); 8415 shrl(crc, 16); 8416 pinsrw(xmm1, crc, 1); 8417 } 8418 addptr(buf, 16); 8419 subl(len, 4); // len > 0 8420 jcc(Assembler::less, L_fold_tail); 8421 8422 movdqa(xmm2, Address(buf, 0)); 8423 movdqa(xmm3, Address(buf, 16)); 8424 movdqa(xmm4, Address(buf, 32)); 8425 addptr(buf, 48); 8426 subl(len, 3); 8427 jcc(Assembler::lessEqual, L_fold_512b); 8428 8429 // Fold total 512 bits of polynomial on each iteration, 8430 // 128 bits per each of 4 parallel streams. 8431 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 32), rscratch1); 8432 8433 align32(); 8434 BIND(L_fold_512b_loop); 8435 fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0); 8436 fold_128bit_crc32(xmm2, xmm0, xmm5, buf, 16); 8437 fold_128bit_crc32(xmm3, xmm0, xmm5, buf, 32); 8438 fold_128bit_crc32(xmm4, xmm0, xmm5, buf, 48); 8439 addptr(buf, 64); 8440 subl(len, 4); 8441 jcc(Assembler::greater, L_fold_512b_loop); 8442 8443 // Fold 512 bits to 128 bits. 8444 BIND(L_fold_512b); 8445 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16), rscratch1); 8446 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm2); 8447 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm3); 8448 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm4); 8449 8450 // Fold the rest of 128 bits data chunks 8451 BIND(L_fold_tail); 8452 addl(len, 3); 8453 jccb(Assembler::lessEqual, L_fold_128b); 8454 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16), rscratch1); 8455 8456 BIND(L_fold_tail_loop); 8457 fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0); 8458 addptr(buf, 16); 8459 decrementl(len); 8460 jccb(Assembler::greater, L_fold_tail_loop); 8461 8462 // Fold 128 bits in xmm1 down into 32 bits in crc register. 8463 BIND(L_fold_128b); 8464 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr()), rscratch1); 8465 if (UseAVX > 0) { 8466 vpclmulqdq(xmm2, xmm0, xmm1, 0x1); 8467 vpand(xmm3, xmm0, xmm2, 0 /* vector_len */); 8468 vpclmulqdq(xmm0, xmm0, xmm3, 0x1); 8469 } else { 8470 movdqa(xmm2, xmm0); 8471 pclmulqdq(xmm2, xmm1, 0x1); 8472 movdqa(xmm3, xmm0); 8473 pand(xmm3, xmm2); 8474 pclmulqdq(xmm0, xmm3, 0x1); 8475 } 8476 psrldq(xmm1, 8); 8477 psrldq(xmm2, 4); 8478 pxor(xmm0, xmm1); 8479 pxor(xmm0, xmm2); 8480 8481 // 8 8-bit folds to compute 32-bit CRC. 8482 for (int j = 0; j < 4; j++) { 8483 fold_8bit_crc32(xmm0, table, xmm1, rax); 8484 } 8485 movdl(crc, xmm0); // mov 32 bits to general register 8486 for (int j = 0; j < 4; j++) { 8487 fold_8bit_crc32(crc, table, rax); 8488 } 8489 8490 BIND(L_tail_restore); 8491 movl(len, tmp); // restore 8492 BIND(L_tail); 8493 andl(len, 0xf); 8494 jccb(Assembler::zero, L_exit); 8495 8496 // Fold the rest of bytes 8497 align(4); 8498 BIND(L_tail_loop); 8499 movsbl(rax, Address(buf, 0)); // load byte with sign extension 8500 update_byte_crc32(crc, rax, table); 8501 increment(buf); 8502 decrementl(len); 8503 jccb(Assembler::greater, L_tail_loop); 8504 8505 BIND(L_exit); 8506 notl(crc); // ~c 8507 } 8508 8509 #ifdef _LP64 8510 // Helper function for AVX 512 CRC32 8511 // Fold 512-bit data chunks 8512 void MacroAssembler::fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, 8513 Register pos, int offset) { 8514 evmovdquq(xmm3, Address(buf, pos, Address::times_1, offset), Assembler::AVX_512bit); 8515 evpclmulqdq(xtmp, xcrc, xK, 0x10, Assembler::AVX_512bit); // [123:64] 8516 evpclmulqdq(xmm2, xcrc, xK, 0x01, Assembler::AVX_512bit); // [63:0] 8517 evpxorq(xcrc, xtmp, xmm2, Assembler::AVX_512bit /* vector_len */); 8518 evpxorq(xcrc, xcrc, xmm3, Assembler::AVX_512bit /* vector_len */); 8519 } 8520 8521 // Helper function for AVX 512 CRC32 8522 // Compute CRC32 for < 256B buffers 8523 void MacroAssembler::kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register table, Register pos, 8524 Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop, 8525 Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup) { 8526 8527 Label L_less_than_32, L_exact_16_left, L_less_than_16_left; 8528 Label L_less_than_8_left, L_less_than_4_left, L_less_than_2_left, L_zero_left; 8529 Label L_only_less_than_4, L_only_less_than_3, L_only_less_than_2; 8530 8531 // check if there is enough buffer to be able to fold 16B at a time 8532 cmpl(len, 32); 8533 jcc(Assembler::less, L_less_than_32); 8534 8535 // if there is, load the constants 8536 movdqu(xmm10, Address(table, 1 * 16)); //rk1 and rk2 in xmm10 8537 movdl(xmm0, crc); // get the initial crc value 8538 movdqu(xmm7, Address(buf, pos, Address::times_1, 0 * 16)); //load the plaintext 8539 pxor(xmm7, xmm0); 8540 8541 // update the buffer pointer 8542 addl(pos, 16); 8543 //update the counter.subtract 32 instead of 16 to save one instruction from the loop 8544 subl(len, 32); 8545 jmp(L_16B_reduction_loop); 8546 8547 bind(L_less_than_32); 8548 //mov initial crc to the return value. this is necessary for zero - length buffers. 8549 movl(rax, crc); 8550 testl(len, len); 8551 jcc(Assembler::equal, L_cleanup); 8552 8553 movdl(xmm0, crc); //get the initial crc value 8554 8555 cmpl(len, 16); 8556 jcc(Assembler::equal, L_exact_16_left); 8557 jcc(Assembler::less, L_less_than_16_left); 8558 8559 movdqu(xmm7, Address(buf, pos, Address::times_1, 0 * 16)); //load the plaintext 8560 pxor(xmm7, xmm0); //xor the initial crc value 8561 addl(pos, 16); 8562 subl(len, 16); 8563 movdqu(xmm10, Address(table, 1 * 16)); // rk1 and rk2 in xmm10 8564 jmp(L_get_last_two_xmms); 8565 8566 bind(L_less_than_16_left); 8567 //use stack space to load data less than 16 bytes, zero - out the 16B in memory first. 8568 pxor(xmm1, xmm1); 8569 movptr(tmp1, rsp); 8570 movdqu(Address(tmp1, 0 * 16), xmm1); 8571 8572 cmpl(len, 4); 8573 jcc(Assembler::less, L_only_less_than_4); 8574 8575 //backup the counter value 8576 movl(tmp2, len); 8577 cmpl(len, 8); 8578 jcc(Assembler::less, L_less_than_8_left); 8579 8580 //load 8 Bytes 8581 movq(rax, Address(buf, pos, Address::times_1, 0 * 16)); 8582 movq(Address(tmp1, 0 * 16), rax); 8583 addptr(tmp1, 8); 8584 subl(len, 8); 8585 addl(pos, 8); 8586 8587 bind(L_less_than_8_left); 8588 cmpl(len, 4); 8589 jcc(Assembler::less, L_less_than_4_left); 8590 8591 //load 4 Bytes 8592 movl(rax, Address(buf, pos, Address::times_1, 0)); 8593 movl(Address(tmp1, 0 * 16), rax); 8594 addptr(tmp1, 4); 8595 subl(len, 4); 8596 addl(pos, 4); 8597 8598 bind(L_less_than_4_left); 8599 cmpl(len, 2); 8600 jcc(Assembler::less, L_less_than_2_left); 8601 8602 // load 2 Bytes 8603 movw(rax, Address(buf, pos, Address::times_1, 0)); 8604 movl(Address(tmp1, 0 * 16), rax); 8605 addptr(tmp1, 2); 8606 subl(len, 2); 8607 addl(pos, 2); 8608 8609 bind(L_less_than_2_left); 8610 cmpl(len, 1); 8611 jcc(Assembler::less, L_zero_left); 8612 8613 // load 1 Byte 8614 movb(rax, Address(buf, pos, Address::times_1, 0)); 8615 movb(Address(tmp1, 0 * 16), rax); 8616 8617 bind(L_zero_left); 8618 movdqu(xmm7, Address(rsp, 0)); 8619 pxor(xmm7, xmm0); //xor the initial crc value 8620 8621 lea(rax, ExternalAddress(StubRoutines::x86::shuf_table_crc32_avx512_addr())); 8622 movdqu(xmm0, Address(rax, tmp2)); 8623 pshufb(xmm7, xmm0); 8624 jmp(L_128_done); 8625 8626 bind(L_exact_16_left); 8627 movdqu(xmm7, Address(buf, pos, Address::times_1, 0)); 8628 pxor(xmm7, xmm0); //xor the initial crc value 8629 jmp(L_128_done); 8630 8631 bind(L_only_less_than_4); 8632 cmpl(len, 3); 8633 jcc(Assembler::less, L_only_less_than_3); 8634 8635 // load 3 Bytes 8636 movb(rax, Address(buf, pos, Address::times_1, 0)); 8637 movb(Address(tmp1, 0), rax); 8638 8639 movb(rax, Address(buf, pos, Address::times_1, 1)); 8640 movb(Address(tmp1, 1), rax); 8641 8642 movb(rax, Address(buf, pos, Address::times_1, 2)); 8643 movb(Address(tmp1, 2), rax); 8644 8645 movdqu(xmm7, Address(rsp, 0)); 8646 pxor(xmm7, xmm0); //xor the initial crc value 8647 8648 pslldq(xmm7, 0x5); 8649 jmp(L_barrett); 8650 bind(L_only_less_than_3); 8651 cmpl(len, 2); 8652 jcc(Assembler::less, L_only_less_than_2); 8653 8654 // load 2 Bytes 8655 movb(rax, Address(buf, pos, Address::times_1, 0)); 8656 movb(Address(tmp1, 0), rax); 8657 8658 movb(rax, Address(buf, pos, Address::times_1, 1)); 8659 movb(Address(tmp1, 1), rax); 8660 8661 movdqu(xmm7, Address(rsp, 0)); 8662 pxor(xmm7, xmm0); //xor the initial crc value 8663 8664 pslldq(xmm7, 0x6); 8665 jmp(L_barrett); 8666 8667 bind(L_only_less_than_2); 8668 //load 1 Byte 8669 movb(rax, Address(buf, pos, Address::times_1, 0)); 8670 movb(Address(tmp1, 0), rax); 8671 8672 movdqu(xmm7, Address(rsp, 0)); 8673 pxor(xmm7, xmm0); //xor the initial crc value 8674 8675 pslldq(xmm7, 0x7); 8676 } 8677 8678 /** 8679 * Compute CRC32 using AVX512 instructions 8680 * param crc register containing existing CRC (32-bit) 8681 * param buf register pointing to input byte buffer (byte*) 8682 * param len register containing number of bytes 8683 * param table address of crc or crc32c table 8684 * param tmp1 scratch register 8685 * param tmp2 scratch register 8686 * return rax result register 8687 * 8688 * This routine is identical for crc32c with the exception of the precomputed constant 8689 * table which will be passed as the table argument. The calculation steps are 8690 * the same for both variants. 8691 */ 8692 void MacroAssembler::kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2) { 8693 assert_different_registers(crc, buf, len, table, tmp1, tmp2, rax, r12); 8694 8695 Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned; 8696 Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop; 8697 Label L_less_than_256, L_fold_128_B_loop, L_fold_256_B_loop; 8698 Label L_fold_128_B_register, L_final_reduction_for_128, L_16B_reduction_loop; 8699 Label L_128_done, L_get_last_two_xmms, L_barrett, L_cleanup; 8700 8701 const Register pos = r12; 8702 push(r12); 8703 subptr(rsp, 16 * 2 + 8); 8704 8705 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 8706 // context for the registers used, where all instructions below are using 128-bit mode 8707 // On EVEX without VL and BW, these instructions will all be AVX. 8708 movl(pos, 0); 8709 8710 // check if smaller than 256B 8711 cmpl(len, 256); 8712 jcc(Assembler::less, L_less_than_256); 8713 8714 // load the initial crc value 8715 movdl(xmm10, crc); 8716 8717 // receive the initial 64B data, xor the initial crc value 8718 evmovdquq(xmm0, Address(buf, pos, Address::times_1, 0 * 64), Assembler::AVX_512bit); 8719 evmovdquq(xmm4, Address(buf, pos, Address::times_1, 1 * 64), Assembler::AVX_512bit); 8720 evpxorq(xmm0, xmm0, xmm10, Assembler::AVX_512bit); 8721 evbroadcasti32x4(xmm10, Address(table, 2 * 16), Assembler::AVX_512bit); //zmm10 has rk3 and rk4 8722 8723 subl(len, 256); 8724 cmpl(len, 256); 8725 jcc(Assembler::less, L_fold_128_B_loop); 8726 8727 evmovdquq(xmm7, Address(buf, pos, Address::times_1, 2 * 64), Assembler::AVX_512bit); 8728 evmovdquq(xmm8, Address(buf, pos, Address::times_1, 3 * 64), Assembler::AVX_512bit); 8729 evbroadcasti32x4(xmm16, Address(table, 0 * 16), Assembler::AVX_512bit); //zmm16 has rk-1 and rk-2 8730 subl(len, 256); 8731 8732 bind(L_fold_256_B_loop); 8733 addl(pos, 256); 8734 fold512bit_crc32_avx512(xmm0, xmm16, xmm1, buf, pos, 0 * 64); 8735 fold512bit_crc32_avx512(xmm4, xmm16, xmm1, buf, pos, 1 * 64); 8736 fold512bit_crc32_avx512(xmm7, xmm16, xmm1, buf, pos, 2 * 64); 8737 fold512bit_crc32_avx512(xmm8, xmm16, xmm1, buf, pos, 3 * 64); 8738 8739 subl(len, 256); 8740 jcc(Assembler::greaterEqual, L_fold_256_B_loop); 8741 8742 // Fold 256 into 128 8743 addl(pos, 256); 8744 evpclmulqdq(xmm1, xmm0, xmm10, 0x01, Assembler::AVX_512bit); 8745 evpclmulqdq(xmm2, xmm0, xmm10, 0x10, Assembler::AVX_512bit); 8746 vpternlogq(xmm7, 0x96, xmm1, xmm2, Assembler::AVX_512bit); // xor ABC 8747 8748 evpclmulqdq(xmm5, xmm4, xmm10, 0x01, Assembler::AVX_512bit); 8749 evpclmulqdq(xmm6, xmm4, xmm10, 0x10, Assembler::AVX_512bit); 8750 vpternlogq(xmm8, 0x96, xmm5, xmm6, Assembler::AVX_512bit); // xor ABC 8751 8752 evmovdquq(xmm0, xmm7, Assembler::AVX_512bit); 8753 evmovdquq(xmm4, xmm8, Assembler::AVX_512bit); 8754 8755 addl(len, 128); 8756 jmp(L_fold_128_B_register); 8757 8758 // at this section of the code, there is 128 * x + y(0 <= y<128) bytes of buffer.The fold_128_B_loop 8759 // loop will fold 128B at a time until we have 128 + y Bytes of buffer 8760 8761 // fold 128B at a time.This section of the code folds 8 xmm registers in parallel 8762 bind(L_fold_128_B_loop); 8763 addl(pos, 128); 8764 fold512bit_crc32_avx512(xmm0, xmm10, xmm1, buf, pos, 0 * 64); 8765 fold512bit_crc32_avx512(xmm4, xmm10, xmm1, buf, pos, 1 * 64); 8766 8767 subl(len, 128); 8768 jcc(Assembler::greaterEqual, L_fold_128_B_loop); 8769 8770 addl(pos, 128); 8771 8772 // at this point, the buffer pointer is pointing at the last y Bytes of the buffer, where 0 <= y < 128 8773 // the 128B of folded data is in 8 of the xmm registers : xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7 8774 bind(L_fold_128_B_register); 8775 evmovdquq(xmm16, Address(table, 5 * 16), Assembler::AVX_512bit); // multiply by rk9-rk16 8776 evmovdquq(xmm11, Address(table, 9 * 16), Assembler::AVX_512bit); // multiply by rk17-rk20, rk1,rk2, 0,0 8777 evpclmulqdq(xmm1, xmm0, xmm16, 0x01, Assembler::AVX_512bit); 8778 evpclmulqdq(xmm2, xmm0, xmm16, 0x10, Assembler::AVX_512bit); 8779 // save last that has no multiplicand 8780 vextracti64x2(xmm7, xmm4, 3); 8781 8782 evpclmulqdq(xmm5, xmm4, xmm11, 0x01, Assembler::AVX_512bit); 8783 evpclmulqdq(xmm6, xmm4, xmm11, 0x10, Assembler::AVX_512bit); 8784 // Needed later in reduction loop 8785 movdqu(xmm10, Address(table, 1 * 16)); 8786 vpternlogq(xmm1, 0x96, xmm2, xmm5, Assembler::AVX_512bit); // xor ABC 8787 vpternlogq(xmm1, 0x96, xmm6, xmm7, Assembler::AVX_512bit); // xor ABC 8788 8789 // Swap 1,0,3,2 - 01 00 11 10 8790 evshufi64x2(xmm8, xmm1, xmm1, 0x4e, Assembler::AVX_512bit); 8791 evpxorq(xmm8, xmm8, xmm1, Assembler::AVX_256bit); 8792 vextracti128(xmm5, xmm8, 1); 8793 evpxorq(xmm7, xmm5, xmm8, Assembler::AVX_128bit); 8794 8795 // instead of 128, we add 128 - 16 to the loop counter to save 1 instruction from the loop 8796 // instead of a cmp instruction, we use the negative flag with the jl instruction 8797 addl(len, 128 - 16); 8798 jcc(Assembler::less, L_final_reduction_for_128); 8799 8800 bind(L_16B_reduction_loop); 8801 vpclmulqdq(xmm8, xmm7, xmm10, 0x01); 8802 vpclmulqdq(xmm7, xmm7, xmm10, 0x10); 8803 vpxor(xmm7, xmm7, xmm8, Assembler::AVX_128bit); 8804 movdqu(xmm0, Address(buf, pos, Address::times_1, 0 * 16)); 8805 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 8806 addl(pos, 16); 8807 subl(len, 16); 8808 jcc(Assembler::greaterEqual, L_16B_reduction_loop); 8809 8810 bind(L_final_reduction_for_128); 8811 addl(len, 16); 8812 jcc(Assembler::equal, L_128_done); 8813 8814 bind(L_get_last_two_xmms); 8815 movdqu(xmm2, xmm7); 8816 addl(pos, len); 8817 movdqu(xmm1, Address(buf, pos, Address::times_1, -16)); 8818 subl(pos, len); 8819 8820 // get rid of the extra data that was loaded before 8821 // load the shift constant 8822 lea(rax, ExternalAddress(StubRoutines::x86::shuf_table_crc32_avx512_addr())); 8823 movdqu(xmm0, Address(rax, len)); 8824 addl(rax, len); 8825 8826 vpshufb(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 8827 //Change mask to 512 8828 vpxor(xmm0, xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr() + 2 * 16), Assembler::AVX_128bit, tmp2); 8829 vpshufb(xmm2, xmm2, xmm0, Assembler::AVX_128bit); 8830 8831 blendvpb(xmm2, xmm2, xmm1, xmm0, Assembler::AVX_128bit); 8832 vpclmulqdq(xmm8, xmm7, xmm10, 0x01); 8833 vpclmulqdq(xmm7, xmm7, xmm10, 0x10); 8834 vpxor(xmm7, xmm7, xmm8, Assembler::AVX_128bit); 8835 vpxor(xmm7, xmm7, xmm2, Assembler::AVX_128bit); 8836 8837 bind(L_128_done); 8838 // compute crc of a 128-bit value 8839 movdqu(xmm10, Address(table, 3 * 16)); 8840 movdqu(xmm0, xmm7); 8841 8842 // 64b fold 8843 vpclmulqdq(xmm7, xmm7, xmm10, 0x0); 8844 vpsrldq(xmm0, xmm0, 0x8, Assembler::AVX_128bit); 8845 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 8846 8847 // 32b fold 8848 movdqu(xmm0, xmm7); 8849 vpslldq(xmm7, xmm7, 0x4, Assembler::AVX_128bit); 8850 vpclmulqdq(xmm7, xmm7, xmm10, 0x10); 8851 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 8852 jmp(L_barrett); 8853 8854 bind(L_less_than_256); 8855 kernel_crc32_avx512_256B(crc, buf, len, table, pos, tmp1, tmp2, L_barrett, L_16B_reduction_loop, L_get_last_two_xmms, L_128_done, L_cleanup); 8856 8857 //barrett reduction 8858 bind(L_barrett); 8859 vpand(xmm7, xmm7, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr() + 1 * 16), Assembler::AVX_128bit, tmp2); 8860 movdqu(xmm1, xmm7); 8861 movdqu(xmm2, xmm7); 8862 movdqu(xmm10, Address(table, 4 * 16)); 8863 8864 pclmulqdq(xmm7, xmm10, 0x0); 8865 pxor(xmm7, xmm2); 8866 vpand(xmm7, xmm7, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr()), Assembler::AVX_128bit, tmp2); 8867 movdqu(xmm2, xmm7); 8868 pclmulqdq(xmm7, xmm10, 0x10); 8869 pxor(xmm7, xmm2); 8870 pxor(xmm7, xmm1); 8871 pextrd(crc, xmm7, 2); 8872 8873 bind(L_cleanup); 8874 addptr(rsp, 16 * 2 + 8); 8875 pop(r12); 8876 } 8877 8878 // S. Gueron / Information Processing Letters 112 (2012) 184 8879 // Algorithm 4: Computing carry-less multiplication using a precomputed lookup table. 8880 // Input: A 32 bit value B = [byte3, byte2, byte1, byte0]. 8881 // Output: the 64-bit carry-less product of B * CONST 8882 void MacroAssembler::crc32c_ipl_alg4(Register in, uint32_t n, 8883 Register tmp1, Register tmp2, Register tmp3) { 8884 lea(tmp3, ExternalAddress(StubRoutines::crc32c_table_addr())); 8885 if (n > 0) { 8886 addq(tmp3, n * 256 * 8); 8887 } 8888 // Q1 = TABLEExt[n][B & 0xFF]; 8889 movl(tmp1, in); 8890 andl(tmp1, 0x000000FF); 8891 shll(tmp1, 3); 8892 addq(tmp1, tmp3); 8893 movq(tmp1, Address(tmp1, 0)); 8894 8895 // Q2 = TABLEExt[n][B >> 8 & 0xFF]; 8896 movl(tmp2, in); 8897 shrl(tmp2, 8); 8898 andl(tmp2, 0x000000FF); 8899 shll(tmp2, 3); 8900 addq(tmp2, tmp3); 8901 movq(tmp2, Address(tmp2, 0)); 8902 8903 shlq(tmp2, 8); 8904 xorq(tmp1, tmp2); 8905 8906 // Q3 = TABLEExt[n][B >> 16 & 0xFF]; 8907 movl(tmp2, in); 8908 shrl(tmp2, 16); 8909 andl(tmp2, 0x000000FF); 8910 shll(tmp2, 3); 8911 addq(tmp2, tmp3); 8912 movq(tmp2, Address(tmp2, 0)); 8913 8914 shlq(tmp2, 16); 8915 xorq(tmp1, tmp2); 8916 8917 // Q4 = TABLEExt[n][B >> 24 & 0xFF]; 8918 shrl(in, 24); 8919 andl(in, 0x000000FF); 8920 shll(in, 3); 8921 addq(in, tmp3); 8922 movq(in, Address(in, 0)); 8923 8924 shlq(in, 24); 8925 xorq(in, tmp1); 8926 // return Q1 ^ Q2 << 8 ^ Q3 << 16 ^ Q4 << 24; 8927 } 8928 8929 void MacroAssembler::crc32c_pclmulqdq(XMMRegister w_xtmp1, 8930 Register in_out, 8931 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, 8932 XMMRegister w_xtmp2, 8933 Register tmp1, 8934 Register n_tmp2, Register n_tmp3) { 8935 if (is_pclmulqdq_supported) { 8936 movdl(w_xtmp1, in_out); // modified blindly 8937 8938 movl(tmp1, const_or_pre_comp_const_index); 8939 movdl(w_xtmp2, tmp1); 8940 pclmulqdq(w_xtmp1, w_xtmp2, 0); 8941 8942 movdq(in_out, w_xtmp1); 8943 } else { 8944 crc32c_ipl_alg4(in_out, const_or_pre_comp_const_index, tmp1, n_tmp2, n_tmp3); 8945 } 8946 } 8947 8948 // Recombination Alternative 2: No bit-reflections 8949 // T1 = (CRC_A * U1) << 1 8950 // T2 = (CRC_B * U2) << 1 8951 // C1 = T1 >> 32 8952 // C2 = T2 >> 32 8953 // T1 = T1 & 0xFFFFFFFF 8954 // T2 = T2 & 0xFFFFFFFF 8955 // T1 = CRC32(0, T1) 8956 // T2 = CRC32(0, T2) 8957 // C1 = C1 ^ T1 8958 // C2 = C2 ^ T2 8959 // CRC = C1 ^ C2 ^ CRC_C 8960 void MacroAssembler::crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, 8961 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 8962 Register tmp1, Register tmp2, 8963 Register n_tmp3) { 8964 crc32c_pclmulqdq(w_xtmp1, in_out, const_or_pre_comp_const_index_u1, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 8965 crc32c_pclmulqdq(w_xtmp2, in1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 8966 shlq(in_out, 1); 8967 movl(tmp1, in_out); 8968 shrq(in_out, 32); 8969 xorl(tmp2, tmp2); 8970 crc32(tmp2, tmp1, 4); 8971 xorl(in_out, tmp2); // we don't care about upper 32 bit contents here 8972 shlq(in1, 1); 8973 movl(tmp1, in1); 8974 shrq(in1, 32); 8975 xorl(tmp2, tmp2); 8976 crc32(tmp2, tmp1, 4); 8977 xorl(in1, tmp2); 8978 xorl(in_out, in1); 8979 xorl(in_out, in2); 8980 } 8981 8982 // Set N to predefined value 8983 // Subtract from a length of a buffer 8984 // execute in a loop: 8985 // CRC_A = 0xFFFFFFFF, CRC_B = 0, CRC_C = 0 8986 // for i = 1 to N do 8987 // CRC_A = CRC32(CRC_A, A[i]) 8988 // CRC_B = CRC32(CRC_B, B[i]) 8989 // CRC_C = CRC32(CRC_C, C[i]) 8990 // end for 8991 // Recombine 8992 void MacroAssembler::crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, 8993 Register in_out1, Register in_out2, Register in_out3, 8994 Register tmp1, Register tmp2, Register tmp3, 8995 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 8996 Register tmp4, Register tmp5, 8997 Register n_tmp6) { 8998 Label L_processPartitions; 8999 Label L_processPartition; 9000 Label L_exit; 9001 9002 bind(L_processPartitions); 9003 cmpl(in_out1, 3 * size); 9004 jcc(Assembler::less, L_exit); 9005 xorl(tmp1, tmp1); 9006 xorl(tmp2, tmp2); 9007 movq(tmp3, in_out2); 9008 addq(tmp3, size); 9009 9010 bind(L_processPartition); 9011 crc32(in_out3, Address(in_out2, 0), 8); 9012 crc32(tmp1, Address(in_out2, size), 8); 9013 crc32(tmp2, Address(in_out2, size * 2), 8); 9014 addq(in_out2, 8); 9015 cmpq(in_out2, tmp3); 9016 jcc(Assembler::less, L_processPartition); 9017 crc32c_rec_alt2(const_or_pre_comp_const_index_u1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, in_out3, tmp1, tmp2, 9018 w_xtmp1, w_xtmp2, w_xtmp3, 9019 tmp4, tmp5, 9020 n_tmp6); 9021 addq(in_out2, 2 * size); 9022 subl(in_out1, 3 * size); 9023 jmp(L_processPartitions); 9024 9025 bind(L_exit); 9026 } 9027 #else 9028 void MacroAssembler::crc32c_ipl_alg4(Register in_out, uint32_t n, 9029 Register tmp1, Register tmp2, Register tmp3, 9030 XMMRegister xtmp1, XMMRegister xtmp2) { 9031 lea(tmp3, ExternalAddress(StubRoutines::crc32c_table_addr())); 9032 if (n > 0) { 9033 addl(tmp3, n * 256 * 8); 9034 } 9035 // Q1 = TABLEExt[n][B & 0xFF]; 9036 movl(tmp1, in_out); 9037 andl(tmp1, 0x000000FF); 9038 shll(tmp1, 3); 9039 addl(tmp1, tmp3); 9040 movq(xtmp1, Address(tmp1, 0)); 9041 9042 // Q2 = TABLEExt[n][B >> 8 & 0xFF]; 9043 movl(tmp2, in_out); 9044 shrl(tmp2, 8); 9045 andl(tmp2, 0x000000FF); 9046 shll(tmp2, 3); 9047 addl(tmp2, tmp3); 9048 movq(xtmp2, Address(tmp2, 0)); 9049 9050 psllq(xtmp2, 8); 9051 pxor(xtmp1, xtmp2); 9052 9053 // Q3 = TABLEExt[n][B >> 16 & 0xFF]; 9054 movl(tmp2, in_out); 9055 shrl(tmp2, 16); 9056 andl(tmp2, 0x000000FF); 9057 shll(tmp2, 3); 9058 addl(tmp2, tmp3); 9059 movq(xtmp2, Address(tmp2, 0)); 9060 9061 psllq(xtmp2, 16); 9062 pxor(xtmp1, xtmp2); 9063 9064 // Q4 = TABLEExt[n][B >> 24 & 0xFF]; 9065 shrl(in_out, 24); 9066 andl(in_out, 0x000000FF); 9067 shll(in_out, 3); 9068 addl(in_out, tmp3); 9069 movq(xtmp2, Address(in_out, 0)); 9070 9071 psllq(xtmp2, 24); 9072 pxor(xtmp1, xtmp2); // Result in CXMM 9073 // return Q1 ^ Q2 << 8 ^ Q3 << 16 ^ Q4 << 24; 9074 } 9075 9076 void MacroAssembler::crc32c_pclmulqdq(XMMRegister w_xtmp1, 9077 Register in_out, 9078 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, 9079 XMMRegister w_xtmp2, 9080 Register tmp1, 9081 Register n_tmp2, Register n_tmp3) { 9082 if (is_pclmulqdq_supported) { 9083 movdl(w_xtmp1, in_out); 9084 9085 movl(tmp1, const_or_pre_comp_const_index); 9086 movdl(w_xtmp2, tmp1); 9087 pclmulqdq(w_xtmp1, w_xtmp2, 0); 9088 // Keep result in XMM since GPR is 32 bit in length 9089 } else { 9090 crc32c_ipl_alg4(in_out, const_or_pre_comp_const_index, tmp1, n_tmp2, n_tmp3, w_xtmp1, w_xtmp2); 9091 } 9092 } 9093 9094 void MacroAssembler::crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, 9095 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 9096 Register tmp1, Register tmp2, 9097 Register n_tmp3) { 9098 crc32c_pclmulqdq(w_xtmp1, in_out, const_or_pre_comp_const_index_u1, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 9099 crc32c_pclmulqdq(w_xtmp2, in1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 9100 9101 psllq(w_xtmp1, 1); 9102 movdl(tmp1, w_xtmp1); 9103 psrlq(w_xtmp1, 32); 9104 movdl(in_out, w_xtmp1); 9105 9106 xorl(tmp2, tmp2); 9107 crc32(tmp2, tmp1, 4); 9108 xorl(in_out, tmp2); 9109 9110 psllq(w_xtmp2, 1); 9111 movdl(tmp1, w_xtmp2); 9112 psrlq(w_xtmp2, 32); 9113 movdl(in1, w_xtmp2); 9114 9115 xorl(tmp2, tmp2); 9116 crc32(tmp2, tmp1, 4); 9117 xorl(in1, tmp2); 9118 xorl(in_out, in1); 9119 xorl(in_out, in2); 9120 } 9121 9122 void MacroAssembler::crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, 9123 Register in_out1, Register in_out2, Register in_out3, 9124 Register tmp1, Register tmp2, Register tmp3, 9125 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 9126 Register tmp4, Register tmp5, 9127 Register n_tmp6) { 9128 Label L_processPartitions; 9129 Label L_processPartition; 9130 Label L_exit; 9131 9132 bind(L_processPartitions); 9133 cmpl(in_out1, 3 * size); 9134 jcc(Assembler::less, L_exit); 9135 xorl(tmp1, tmp1); 9136 xorl(tmp2, tmp2); 9137 movl(tmp3, in_out2); 9138 addl(tmp3, size); 9139 9140 bind(L_processPartition); 9141 crc32(in_out3, Address(in_out2, 0), 4); 9142 crc32(tmp1, Address(in_out2, size), 4); 9143 crc32(tmp2, Address(in_out2, size*2), 4); 9144 crc32(in_out3, Address(in_out2, 0+4), 4); 9145 crc32(tmp1, Address(in_out2, size+4), 4); 9146 crc32(tmp2, Address(in_out2, size*2+4), 4); 9147 addl(in_out2, 8); 9148 cmpl(in_out2, tmp3); 9149 jcc(Assembler::less, L_processPartition); 9150 9151 push(tmp3); 9152 push(in_out1); 9153 push(in_out2); 9154 tmp4 = tmp3; 9155 tmp5 = in_out1; 9156 n_tmp6 = in_out2; 9157 9158 crc32c_rec_alt2(const_or_pre_comp_const_index_u1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, in_out3, tmp1, tmp2, 9159 w_xtmp1, w_xtmp2, w_xtmp3, 9160 tmp4, tmp5, 9161 n_tmp6); 9162 9163 pop(in_out2); 9164 pop(in_out1); 9165 pop(tmp3); 9166 9167 addl(in_out2, 2 * size); 9168 subl(in_out1, 3 * size); 9169 jmp(L_processPartitions); 9170 9171 bind(L_exit); 9172 } 9173 #endif //LP64 9174 9175 #ifdef _LP64 9176 // Algorithm 2: Pipelined usage of the CRC32 instruction. 9177 // Input: A buffer I of L bytes. 9178 // Output: the CRC32C value of the buffer. 9179 // Notations: 9180 // Write L = 24N + r, with N = floor (L/24). 9181 // r = L mod 24 (0 <= r < 24). 9182 // Consider I as the concatenation of A|B|C|R, where A, B, C, each, 9183 // N quadwords, and R consists of r bytes. 9184 // A[j] = I [8j+7:8j], j= 0, 1, ..., N-1 9185 // B[j] = I [N + 8j+7:N + 8j], j= 0, 1, ..., N-1 9186 // C[j] = I [2N + 8j+7:2N + 8j], j= 0, 1, ..., N-1 9187 // if r > 0 R[j] = I [3N +j], j= 0, 1, ...,r-1 9188 void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, 9189 Register tmp1, Register tmp2, Register tmp3, 9190 Register tmp4, Register tmp5, Register tmp6, 9191 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 9192 bool is_pclmulqdq_supported) { 9193 uint32_t const_or_pre_comp_const_index[CRC32C_NUM_PRECOMPUTED_CONSTANTS]; 9194 Label L_wordByWord; 9195 Label L_byteByByteProlog; 9196 Label L_byteByByte; 9197 Label L_exit; 9198 9199 if (is_pclmulqdq_supported ) { 9200 const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::_crc32c_table_addr; 9201 const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::_crc32c_table_addr+1); 9202 9203 const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 2); 9204 const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 3); 9205 9206 const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 4); 9207 const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 5); 9208 assert((CRC32C_NUM_PRECOMPUTED_CONSTANTS - 1 ) == 5, "Checking whether you declared all of the constants based on the number of \"chunks\""); 9209 } else { 9210 const_or_pre_comp_const_index[0] = 1; 9211 const_or_pre_comp_const_index[1] = 0; 9212 9213 const_or_pre_comp_const_index[2] = 3; 9214 const_or_pre_comp_const_index[3] = 2; 9215 9216 const_or_pre_comp_const_index[4] = 5; 9217 const_or_pre_comp_const_index[5] = 4; 9218 } 9219 crc32c_proc_chunk(CRC32C_HIGH, const_or_pre_comp_const_index[0], const_or_pre_comp_const_index[1], is_pclmulqdq_supported, 9220 in2, in1, in_out, 9221 tmp1, tmp2, tmp3, 9222 w_xtmp1, w_xtmp2, w_xtmp3, 9223 tmp4, tmp5, 9224 tmp6); 9225 crc32c_proc_chunk(CRC32C_MIDDLE, const_or_pre_comp_const_index[2], const_or_pre_comp_const_index[3], is_pclmulqdq_supported, 9226 in2, in1, in_out, 9227 tmp1, tmp2, tmp3, 9228 w_xtmp1, w_xtmp2, w_xtmp3, 9229 tmp4, tmp5, 9230 tmp6); 9231 crc32c_proc_chunk(CRC32C_LOW, const_or_pre_comp_const_index[4], const_or_pre_comp_const_index[5], is_pclmulqdq_supported, 9232 in2, in1, in_out, 9233 tmp1, tmp2, tmp3, 9234 w_xtmp1, w_xtmp2, w_xtmp3, 9235 tmp4, tmp5, 9236 tmp6); 9237 movl(tmp1, in2); 9238 andl(tmp1, 0x00000007); 9239 negl(tmp1); 9240 addl(tmp1, in2); 9241 addq(tmp1, in1); 9242 9243 cmpq(in1, tmp1); 9244 jccb(Assembler::greaterEqual, L_byteByByteProlog); 9245 align(16); 9246 BIND(L_wordByWord); 9247 crc32(in_out, Address(in1, 0), 8); 9248 addq(in1, 8); 9249 cmpq(in1, tmp1); 9250 jcc(Assembler::less, L_wordByWord); 9251 9252 BIND(L_byteByByteProlog); 9253 andl(in2, 0x00000007); 9254 movl(tmp2, 1); 9255 9256 cmpl(tmp2, in2); 9257 jccb(Assembler::greater, L_exit); 9258 BIND(L_byteByByte); 9259 crc32(in_out, Address(in1, 0), 1); 9260 incq(in1); 9261 incl(tmp2); 9262 cmpl(tmp2, in2); 9263 jcc(Assembler::lessEqual, L_byteByByte); 9264 9265 BIND(L_exit); 9266 } 9267 #else 9268 void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, 9269 Register tmp1, Register tmp2, Register tmp3, 9270 Register tmp4, Register tmp5, Register tmp6, 9271 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 9272 bool is_pclmulqdq_supported) { 9273 uint32_t const_or_pre_comp_const_index[CRC32C_NUM_PRECOMPUTED_CONSTANTS]; 9274 Label L_wordByWord; 9275 Label L_byteByByteProlog; 9276 Label L_byteByByte; 9277 Label L_exit; 9278 9279 if (is_pclmulqdq_supported) { 9280 const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::_crc32c_table_addr; 9281 const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 1); 9282 9283 const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 2); 9284 const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 3); 9285 9286 const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 4); 9287 const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 5); 9288 } else { 9289 const_or_pre_comp_const_index[0] = 1; 9290 const_or_pre_comp_const_index[1] = 0; 9291 9292 const_or_pre_comp_const_index[2] = 3; 9293 const_or_pre_comp_const_index[3] = 2; 9294 9295 const_or_pre_comp_const_index[4] = 5; 9296 const_or_pre_comp_const_index[5] = 4; 9297 } 9298 crc32c_proc_chunk(CRC32C_HIGH, const_or_pre_comp_const_index[0], const_or_pre_comp_const_index[1], is_pclmulqdq_supported, 9299 in2, in1, in_out, 9300 tmp1, tmp2, tmp3, 9301 w_xtmp1, w_xtmp2, w_xtmp3, 9302 tmp4, tmp5, 9303 tmp6); 9304 crc32c_proc_chunk(CRC32C_MIDDLE, const_or_pre_comp_const_index[2], const_or_pre_comp_const_index[3], is_pclmulqdq_supported, 9305 in2, in1, in_out, 9306 tmp1, tmp2, tmp3, 9307 w_xtmp1, w_xtmp2, w_xtmp3, 9308 tmp4, tmp5, 9309 tmp6); 9310 crc32c_proc_chunk(CRC32C_LOW, const_or_pre_comp_const_index[4], const_or_pre_comp_const_index[5], is_pclmulqdq_supported, 9311 in2, in1, in_out, 9312 tmp1, tmp2, tmp3, 9313 w_xtmp1, w_xtmp2, w_xtmp3, 9314 tmp4, tmp5, 9315 tmp6); 9316 movl(tmp1, in2); 9317 andl(tmp1, 0x00000007); 9318 negl(tmp1); 9319 addl(tmp1, in2); 9320 addl(tmp1, in1); 9321 9322 BIND(L_wordByWord); 9323 cmpl(in1, tmp1); 9324 jcc(Assembler::greaterEqual, L_byteByByteProlog); 9325 crc32(in_out, Address(in1,0), 4); 9326 addl(in1, 4); 9327 jmp(L_wordByWord); 9328 9329 BIND(L_byteByByteProlog); 9330 andl(in2, 0x00000007); 9331 movl(tmp2, 1); 9332 9333 BIND(L_byteByByte); 9334 cmpl(tmp2, in2); 9335 jccb(Assembler::greater, L_exit); 9336 movb(tmp1, Address(in1, 0)); 9337 crc32(in_out, tmp1, 1); 9338 incl(in1); 9339 incl(tmp2); 9340 jmp(L_byteByByte); 9341 9342 BIND(L_exit); 9343 } 9344 #endif // LP64 9345 #undef BIND 9346 #undef BLOCK_COMMENT 9347 9348 // Compress char[] array to byte[]. 9349 // ..\jdk\src\java.base\share\classes\java\lang\StringUTF16.java 9350 // @IntrinsicCandidate 9351 // private static int compress(char[] src, int srcOff, byte[] dst, int dstOff, int len) { 9352 // for (int i = 0; i < len; i++) { 9353 // int c = src[srcOff++]; 9354 // if (c >>> 8 != 0) { 9355 // return 0; 9356 // } 9357 // dst[dstOff++] = (byte)c; 9358 // } 9359 // return len; 9360 // } 9361 void MacroAssembler::char_array_compress(Register src, Register dst, Register len, 9362 XMMRegister tmp1Reg, XMMRegister tmp2Reg, 9363 XMMRegister tmp3Reg, XMMRegister tmp4Reg, 9364 Register tmp5, Register result, KRegister mask1, KRegister mask2) { 9365 Label copy_chars_loop, return_length, return_zero, done; 9366 9367 // rsi: src 9368 // rdi: dst 9369 // rdx: len 9370 // rcx: tmp5 9371 // rax: result 9372 9373 // rsi holds start addr of source char[] to be compressed 9374 // rdi holds start addr of destination byte[] 9375 // rdx holds length 9376 9377 assert(len != result, ""); 9378 9379 // save length for return 9380 push(len); 9381 9382 if ((AVX3Threshold == 0) && (UseAVX > 2) && // AVX512 9383 VM_Version::supports_avx512vlbw() && 9384 VM_Version::supports_bmi2()) { 9385 9386 Label copy_32_loop, copy_loop_tail, below_threshold; 9387 9388 // alignment 9389 Label post_alignment; 9390 9391 // if length of the string is less than 16, handle it in an old fashioned way 9392 testl(len, -32); 9393 jcc(Assembler::zero, below_threshold); 9394 9395 // First check whether a character is compressible ( <= 0xFF). 9396 // Create mask to test for Unicode chars inside zmm vector 9397 movl(result, 0x00FF); 9398 evpbroadcastw(tmp2Reg, result, Assembler::AVX_512bit); 9399 9400 testl(len, -64); 9401 jcc(Assembler::zero, post_alignment); 9402 9403 movl(tmp5, dst); 9404 andl(tmp5, (32 - 1)); 9405 negl(tmp5); 9406 andl(tmp5, (32 - 1)); 9407 9408 // bail out when there is nothing to be done 9409 testl(tmp5, 0xFFFFFFFF); 9410 jcc(Assembler::zero, post_alignment); 9411 9412 // ~(~0 << len), where len is the # of remaining elements to process 9413 movl(result, 0xFFFFFFFF); 9414 shlxl(result, result, tmp5); 9415 notl(result); 9416 kmovdl(mask2, result); 9417 9418 evmovdquw(tmp1Reg, mask2, Address(src, 0), /*merge*/ false, Assembler::AVX_512bit); 9419 evpcmpw(mask1, mask2, tmp1Reg, tmp2Reg, Assembler::le, /*signed*/ false, Assembler::AVX_512bit); 9420 ktestd(mask1, mask2); 9421 jcc(Assembler::carryClear, return_zero); 9422 9423 evpmovwb(Address(dst, 0), mask2, tmp1Reg, Assembler::AVX_512bit); 9424 9425 addptr(src, tmp5); 9426 addptr(src, tmp5); 9427 addptr(dst, tmp5); 9428 subl(len, tmp5); 9429 9430 bind(post_alignment); 9431 // end of alignment 9432 9433 movl(tmp5, len); 9434 andl(tmp5, (32 - 1)); // tail count (in chars) 9435 andl(len, ~(32 - 1)); // vector count (in chars) 9436 jcc(Assembler::zero, copy_loop_tail); 9437 9438 lea(src, Address(src, len, Address::times_2)); 9439 lea(dst, Address(dst, len, Address::times_1)); 9440 negptr(len); 9441 9442 bind(copy_32_loop); 9443 evmovdquw(tmp1Reg, Address(src, len, Address::times_2), Assembler::AVX_512bit); 9444 evpcmpuw(mask1, tmp1Reg, tmp2Reg, Assembler::le, Assembler::AVX_512bit); 9445 kortestdl(mask1, mask1); 9446 jcc(Assembler::carryClear, return_zero); 9447 9448 // All elements in current processed chunk are valid candidates for 9449 // compression. Write a truncated byte elements to the memory. 9450 evpmovwb(Address(dst, len, Address::times_1), tmp1Reg, Assembler::AVX_512bit); 9451 addptr(len, 32); 9452 jcc(Assembler::notZero, copy_32_loop); 9453 9454 bind(copy_loop_tail); 9455 // bail out when there is nothing to be done 9456 testl(tmp5, 0xFFFFFFFF); 9457 jcc(Assembler::zero, return_length); 9458 9459 movl(len, tmp5); 9460 9461 // ~(~0 << len), where len is the # of remaining elements to process 9462 movl(result, 0xFFFFFFFF); 9463 shlxl(result, result, len); 9464 notl(result); 9465 9466 kmovdl(mask2, result); 9467 9468 evmovdquw(tmp1Reg, mask2, Address(src, 0), /*merge*/ false, Assembler::AVX_512bit); 9469 evpcmpw(mask1, mask2, tmp1Reg, tmp2Reg, Assembler::le, /*signed*/ false, Assembler::AVX_512bit); 9470 ktestd(mask1, mask2); 9471 jcc(Assembler::carryClear, return_zero); 9472 9473 evpmovwb(Address(dst, 0), mask2, tmp1Reg, Assembler::AVX_512bit); 9474 jmp(return_length); 9475 9476 bind(below_threshold); 9477 } 9478 9479 if (UseSSE42Intrinsics) { 9480 Label copy_32_loop, copy_16, copy_tail; 9481 9482 movl(result, len); 9483 9484 movl(tmp5, 0xff00ff00); // create mask to test for Unicode chars in vectors 9485 9486 // vectored compression 9487 andl(len, 0xfffffff0); // vector count (in chars) 9488 andl(result, 0x0000000f); // tail count (in chars) 9489 testl(len, len); 9490 jcc(Assembler::zero, copy_16); 9491 9492 // compress 16 chars per iter 9493 movdl(tmp1Reg, tmp5); 9494 pshufd(tmp1Reg, tmp1Reg, 0); // store Unicode mask in tmp1Reg 9495 pxor(tmp4Reg, tmp4Reg); 9496 9497 lea(src, Address(src, len, Address::times_2)); 9498 lea(dst, Address(dst, len, Address::times_1)); 9499 negptr(len); 9500 9501 bind(copy_32_loop); 9502 movdqu(tmp2Reg, Address(src, len, Address::times_2)); // load 1st 8 characters 9503 por(tmp4Reg, tmp2Reg); 9504 movdqu(tmp3Reg, Address(src, len, Address::times_2, 16)); // load next 8 characters 9505 por(tmp4Reg, tmp3Reg); 9506 ptest(tmp4Reg, tmp1Reg); // check for Unicode chars in next vector 9507 jcc(Assembler::notZero, return_zero); 9508 packuswb(tmp2Reg, tmp3Reg); // only ASCII chars; compress each to 1 byte 9509 movdqu(Address(dst, len, Address::times_1), tmp2Reg); 9510 addptr(len, 16); 9511 jcc(Assembler::notZero, copy_32_loop); 9512 9513 // compress next vector of 8 chars (if any) 9514 bind(copy_16); 9515 movl(len, result); 9516 andl(len, 0xfffffff8); // vector count (in chars) 9517 andl(result, 0x00000007); // tail count (in chars) 9518 testl(len, len); 9519 jccb(Assembler::zero, copy_tail); 9520 9521 movdl(tmp1Reg, tmp5); 9522 pshufd(tmp1Reg, tmp1Reg, 0); // store Unicode mask in tmp1Reg 9523 pxor(tmp3Reg, tmp3Reg); 9524 9525 movdqu(tmp2Reg, Address(src, 0)); 9526 ptest(tmp2Reg, tmp1Reg); // check for Unicode chars in vector 9527 jccb(Assembler::notZero, return_zero); 9528 packuswb(tmp2Reg, tmp3Reg); // only LATIN1 chars; compress each to 1 byte 9529 movq(Address(dst, 0), tmp2Reg); 9530 addptr(src, 16); 9531 addptr(dst, 8); 9532 9533 bind(copy_tail); 9534 movl(len, result); 9535 } 9536 // compress 1 char per iter 9537 testl(len, len); 9538 jccb(Assembler::zero, return_length); 9539 lea(src, Address(src, len, Address::times_2)); 9540 lea(dst, Address(dst, len, Address::times_1)); 9541 negptr(len); 9542 9543 bind(copy_chars_loop); 9544 load_unsigned_short(result, Address(src, len, Address::times_2)); 9545 testl(result, 0xff00); // check if Unicode char 9546 jccb(Assembler::notZero, return_zero); 9547 movb(Address(dst, len, Address::times_1), result); // ASCII char; compress to 1 byte 9548 increment(len); 9549 jcc(Assembler::notZero, copy_chars_loop); 9550 9551 // if compression succeeded, return length 9552 bind(return_length); 9553 pop(result); 9554 jmpb(done); 9555 9556 // if compression failed, return 0 9557 bind(return_zero); 9558 xorl(result, result); 9559 addptr(rsp, wordSize); 9560 9561 bind(done); 9562 } 9563 9564 // Inflate byte[] array to char[]. 9565 // ..\jdk\src\java.base\share\classes\java\lang\StringLatin1.java 9566 // @IntrinsicCandidate 9567 // private static void inflate(byte[] src, int srcOff, char[] dst, int dstOff, int len) { 9568 // for (int i = 0; i < len; i++) { 9569 // dst[dstOff++] = (char)(src[srcOff++] & 0xff); 9570 // } 9571 // } 9572 void MacroAssembler::byte_array_inflate(Register src, Register dst, Register len, 9573 XMMRegister tmp1, Register tmp2, KRegister mask) { 9574 Label copy_chars_loop, done, below_threshold, avx3_threshold; 9575 // rsi: src 9576 // rdi: dst 9577 // rdx: len 9578 // rcx: tmp2 9579 9580 // rsi holds start addr of source byte[] to be inflated 9581 // rdi holds start addr of destination char[] 9582 // rdx holds length 9583 assert_different_registers(src, dst, len, tmp2); 9584 movl(tmp2, len); 9585 if ((UseAVX > 2) && // AVX512 9586 VM_Version::supports_avx512vlbw() && 9587 VM_Version::supports_bmi2()) { 9588 9589 Label copy_32_loop, copy_tail; 9590 Register tmp3_aliased = len; 9591 9592 // if length of the string is less than 16, handle it in an old fashioned way 9593 testl(len, -16); 9594 jcc(Assembler::zero, below_threshold); 9595 9596 testl(len, -1 * AVX3Threshold); 9597 jcc(Assembler::zero, avx3_threshold); 9598 9599 // In order to use only one arithmetic operation for the main loop we use 9600 // this pre-calculation 9601 andl(tmp2, (32 - 1)); // tail count (in chars), 32 element wide loop 9602 andl(len, -32); // vector count 9603 jccb(Assembler::zero, copy_tail); 9604 9605 lea(src, Address(src, len, Address::times_1)); 9606 lea(dst, Address(dst, len, Address::times_2)); 9607 negptr(len); 9608 9609 9610 // inflate 32 chars per iter 9611 bind(copy_32_loop); 9612 vpmovzxbw(tmp1, Address(src, len, Address::times_1), Assembler::AVX_512bit); 9613 evmovdquw(Address(dst, len, Address::times_2), tmp1, Assembler::AVX_512bit); 9614 addptr(len, 32); 9615 jcc(Assembler::notZero, copy_32_loop); 9616 9617 bind(copy_tail); 9618 // bail out when there is nothing to be done 9619 testl(tmp2, -1); // we don't destroy the contents of tmp2 here 9620 jcc(Assembler::zero, done); 9621 9622 // ~(~0 << length), where length is the # of remaining elements to process 9623 movl(tmp3_aliased, -1); 9624 shlxl(tmp3_aliased, tmp3_aliased, tmp2); 9625 notl(tmp3_aliased); 9626 kmovdl(mask, tmp3_aliased); 9627 evpmovzxbw(tmp1, mask, Address(src, 0), Assembler::AVX_512bit); 9628 evmovdquw(Address(dst, 0), mask, tmp1, /*merge*/ true, Assembler::AVX_512bit); 9629 9630 jmp(done); 9631 bind(avx3_threshold); 9632 } 9633 if (UseSSE42Intrinsics) { 9634 Label copy_16_loop, copy_8_loop, copy_bytes, copy_new_tail, copy_tail; 9635 9636 if (UseAVX > 1) { 9637 andl(tmp2, (16 - 1)); 9638 andl(len, -16); 9639 jccb(Assembler::zero, copy_new_tail); 9640 } else { 9641 andl(tmp2, 0x00000007); // tail count (in chars) 9642 andl(len, 0xfffffff8); // vector count (in chars) 9643 jccb(Assembler::zero, copy_tail); 9644 } 9645 9646 // vectored inflation 9647 lea(src, Address(src, len, Address::times_1)); 9648 lea(dst, Address(dst, len, Address::times_2)); 9649 negptr(len); 9650 9651 if (UseAVX > 1) { 9652 bind(copy_16_loop); 9653 vpmovzxbw(tmp1, Address(src, len, Address::times_1), Assembler::AVX_256bit); 9654 vmovdqu(Address(dst, len, Address::times_2), tmp1); 9655 addptr(len, 16); 9656 jcc(Assembler::notZero, copy_16_loop); 9657 9658 bind(below_threshold); 9659 bind(copy_new_tail); 9660 movl(len, tmp2); 9661 andl(tmp2, 0x00000007); 9662 andl(len, 0xFFFFFFF8); 9663 jccb(Assembler::zero, copy_tail); 9664 9665 pmovzxbw(tmp1, Address(src, 0)); 9666 movdqu(Address(dst, 0), tmp1); 9667 addptr(src, 8); 9668 addptr(dst, 2 * 8); 9669 9670 jmp(copy_tail, true); 9671 } 9672 9673 // inflate 8 chars per iter 9674 bind(copy_8_loop); 9675 pmovzxbw(tmp1, Address(src, len, Address::times_1)); // unpack to 8 words 9676 movdqu(Address(dst, len, Address::times_2), tmp1); 9677 addptr(len, 8); 9678 jcc(Assembler::notZero, copy_8_loop); 9679 9680 bind(copy_tail); 9681 movl(len, tmp2); 9682 9683 cmpl(len, 4); 9684 jccb(Assembler::less, copy_bytes); 9685 9686 movdl(tmp1, Address(src, 0)); // load 4 byte chars 9687 pmovzxbw(tmp1, tmp1); 9688 movq(Address(dst, 0), tmp1); 9689 subptr(len, 4); 9690 addptr(src, 4); 9691 addptr(dst, 8); 9692 9693 bind(copy_bytes); 9694 } else { 9695 bind(below_threshold); 9696 } 9697 9698 testl(len, len); 9699 jccb(Assembler::zero, done); 9700 lea(src, Address(src, len, Address::times_1)); 9701 lea(dst, Address(dst, len, Address::times_2)); 9702 negptr(len); 9703 9704 // inflate 1 char per iter 9705 bind(copy_chars_loop); 9706 load_unsigned_byte(tmp2, Address(src, len, Address::times_1)); // load byte char 9707 movw(Address(dst, len, Address::times_2), tmp2); // inflate byte char to word 9708 increment(len); 9709 jcc(Assembler::notZero, copy_chars_loop); 9710 9711 bind(done); 9712 } 9713 9714 9715 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, bool merge, int vector_len) { 9716 switch(type) { 9717 case T_BYTE: 9718 case T_BOOLEAN: 9719 evmovdqub(dst, kmask, src, merge, vector_len); 9720 break; 9721 case T_CHAR: 9722 case T_SHORT: 9723 evmovdquw(dst, kmask, src, merge, vector_len); 9724 break; 9725 case T_INT: 9726 case T_FLOAT: 9727 evmovdqul(dst, kmask, src, merge, vector_len); 9728 break; 9729 case T_LONG: 9730 case T_DOUBLE: 9731 evmovdquq(dst, kmask, src, merge, vector_len); 9732 break; 9733 default: 9734 fatal("Unexpected type argument %s", type2name(type)); 9735 break; 9736 } 9737 } 9738 9739 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, bool merge, int vector_len) { 9740 switch(type) { 9741 case T_BYTE: 9742 case T_BOOLEAN: 9743 evmovdqub(dst, kmask, src, merge, vector_len); 9744 break; 9745 case T_CHAR: 9746 case T_SHORT: 9747 evmovdquw(dst, kmask, src, merge, vector_len); 9748 break; 9749 case T_INT: 9750 case T_FLOAT: 9751 evmovdqul(dst, kmask, src, merge, vector_len); 9752 break; 9753 case T_LONG: 9754 case T_DOUBLE: 9755 evmovdquq(dst, kmask, src, merge, vector_len); 9756 break; 9757 default: 9758 fatal("Unexpected type argument %s", type2name(type)); 9759 break; 9760 } 9761 } 9762 9763 void MacroAssembler::knot(uint masklen, KRegister dst, KRegister src, KRegister ktmp, Register rtmp) { 9764 switch(masklen) { 9765 case 2: 9766 knotbl(dst, src); 9767 movl(rtmp, 3); 9768 kmovbl(ktmp, rtmp); 9769 kandbl(dst, ktmp, dst); 9770 break; 9771 case 4: 9772 knotbl(dst, src); 9773 movl(rtmp, 15); 9774 kmovbl(ktmp, rtmp); 9775 kandbl(dst, ktmp, dst); 9776 break; 9777 case 8: 9778 knotbl(dst, src); 9779 break; 9780 case 16: 9781 knotwl(dst, src); 9782 break; 9783 case 32: 9784 knotdl(dst, src); 9785 break; 9786 case 64: 9787 knotql(dst, src); 9788 break; 9789 default: 9790 fatal("Unexpected vector length %d", masklen); 9791 break; 9792 } 9793 } 9794 9795 void MacroAssembler::kand(BasicType type, KRegister dst, KRegister src1, KRegister src2) { 9796 switch(type) { 9797 case T_BOOLEAN: 9798 case T_BYTE: 9799 kandbl(dst, src1, src2); 9800 break; 9801 case T_CHAR: 9802 case T_SHORT: 9803 kandwl(dst, src1, src2); 9804 break; 9805 case T_INT: 9806 case T_FLOAT: 9807 kanddl(dst, src1, src2); 9808 break; 9809 case T_LONG: 9810 case T_DOUBLE: 9811 kandql(dst, src1, src2); 9812 break; 9813 default: 9814 fatal("Unexpected type argument %s", type2name(type)); 9815 break; 9816 } 9817 } 9818 9819 void MacroAssembler::kor(BasicType type, KRegister dst, KRegister src1, KRegister src2) { 9820 switch(type) { 9821 case T_BOOLEAN: 9822 case T_BYTE: 9823 korbl(dst, src1, src2); 9824 break; 9825 case T_CHAR: 9826 case T_SHORT: 9827 korwl(dst, src1, src2); 9828 break; 9829 case T_INT: 9830 case T_FLOAT: 9831 kordl(dst, src1, src2); 9832 break; 9833 case T_LONG: 9834 case T_DOUBLE: 9835 korql(dst, src1, src2); 9836 break; 9837 default: 9838 fatal("Unexpected type argument %s", type2name(type)); 9839 break; 9840 } 9841 } 9842 9843 void MacroAssembler::kxor(BasicType type, KRegister dst, KRegister src1, KRegister src2) { 9844 switch(type) { 9845 case T_BOOLEAN: 9846 case T_BYTE: 9847 kxorbl(dst, src1, src2); 9848 break; 9849 case T_CHAR: 9850 case T_SHORT: 9851 kxorwl(dst, src1, src2); 9852 break; 9853 case T_INT: 9854 case T_FLOAT: 9855 kxordl(dst, src1, src2); 9856 break; 9857 case T_LONG: 9858 case T_DOUBLE: 9859 kxorql(dst, src1, src2); 9860 break; 9861 default: 9862 fatal("Unexpected type argument %s", type2name(type)); 9863 break; 9864 } 9865 } 9866 9867 void MacroAssembler::evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9868 switch(type) { 9869 case T_BOOLEAN: 9870 case T_BYTE: 9871 evpermb(dst, mask, nds, src, merge, vector_len); break; 9872 case T_CHAR: 9873 case T_SHORT: 9874 evpermw(dst, mask, nds, src, merge, vector_len); break; 9875 case T_INT: 9876 case T_FLOAT: 9877 evpermd(dst, mask, nds, src, merge, vector_len); break; 9878 case T_LONG: 9879 case T_DOUBLE: 9880 evpermq(dst, mask, nds, src, merge, vector_len); break; 9881 default: 9882 fatal("Unexpected type argument %s", type2name(type)); break; 9883 } 9884 } 9885 9886 void MacroAssembler::evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9887 switch(type) { 9888 case T_BOOLEAN: 9889 case T_BYTE: 9890 evpermb(dst, mask, nds, src, merge, vector_len); break; 9891 case T_CHAR: 9892 case T_SHORT: 9893 evpermw(dst, mask, nds, src, merge, vector_len); break; 9894 case T_INT: 9895 case T_FLOAT: 9896 evpermd(dst, mask, nds, src, merge, vector_len); break; 9897 case T_LONG: 9898 case T_DOUBLE: 9899 evpermq(dst, mask, nds, src, merge, vector_len); break; 9900 default: 9901 fatal("Unexpected type argument %s", type2name(type)); break; 9902 } 9903 } 9904 9905 void MacroAssembler::evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9906 switch(type) { 9907 case T_BYTE: 9908 evpminsb(dst, mask, nds, src, merge, vector_len); break; 9909 case T_SHORT: 9910 evpminsw(dst, mask, nds, src, merge, vector_len); break; 9911 case T_INT: 9912 evpminsd(dst, mask, nds, src, merge, vector_len); break; 9913 case T_LONG: 9914 evpminsq(dst, mask, nds, src, merge, vector_len); break; 9915 default: 9916 fatal("Unexpected type argument %s", type2name(type)); break; 9917 } 9918 } 9919 9920 void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9921 switch(type) { 9922 case T_BYTE: 9923 evpmaxsb(dst, mask, nds, src, merge, vector_len); break; 9924 case T_SHORT: 9925 evpmaxsw(dst, mask, nds, src, merge, vector_len); break; 9926 case T_INT: 9927 evpmaxsd(dst, mask, nds, src, merge, vector_len); break; 9928 case T_LONG: 9929 evpmaxsq(dst, mask, nds, src, merge, vector_len); break; 9930 default: 9931 fatal("Unexpected type argument %s", type2name(type)); break; 9932 } 9933 } 9934 9935 void MacroAssembler::evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9936 switch(type) { 9937 case T_BYTE: 9938 evpminsb(dst, mask, nds, src, merge, vector_len); break; 9939 case T_SHORT: 9940 evpminsw(dst, mask, nds, src, merge, vector_len); break; 9941 case T_INT: 9942 evpminsd(dst, mask, nds, src, merge, vector_len); break; 9943 case T_LONG: 9944 evpminsq(dst, mask, nds, src, merge, vector_len); break; 9945 default: 9946 fatal("Unexpected type argument %s", type2name(type)); break; 9947 } 9948 } 9949 9950 void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9951 switch(type) { 9952 case T_BYTE: 9953 evpmaxsb(dst, mask, nds, src, merge, vector_len); break; 9954 case T_SHORT: 9955 evpmaxsw(dst, mask, nds, src, merge, vector_len); break; 9956 case T_INT: 9957 evpmaxsd(dst, mask, nds, src, merge, vector_len); break; 9958 case T_LONG: 9959 evpmaxsq(dst, mask, nds, src, merge, vector_len); break; 9960 default: 9961 fatal("Unexpected type argument %s", type2name(type)); break; 9962 } 9963 } 9964 9965 void MacroAssembler::evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9966 switch(type) { 9967 case T_INT: 9968 evpxord(dst, mask, nds, src, merge, vector_len); break; 9969 case T_LONG: 9970 evpxorq(dst, mask, nds, src, merge, vector_len); break; 9971 default: 9972 fatal("Unexpected type argument %s", type2name(type)); break; 9973 } 9974 } 9975 9976 void MacroAssembler::evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9977 switch(type) { 9978 case T_INT: 9979 evpxord(dst, mask, nds, src, merge, vector_len); break; 9980 case T_LONG: 9981 evpxorq(dst, mask, nds, src, merge, vector_len); break; 9982 default: 9983 fatal("Unexpected type argument %s", type2name(type)); break; 9984 } 9985 } 9986 9987 void MacroAssembler::evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9988 switch(type) { 9989 case T_INT: 9990 Assembler::evpord(dst, mask, nds, src, merge, vector_len); break; 9991 case T_LONG: 9992 evporq(dst, mask, nds, src, merge, vector_len); break; 9993 default: 9994 fatal("Unexpected type argument %s", type2name(type)); break; 9995 } 9996 } 9997 9998 void MacroAssembler::evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9999 switch(type) { 10000 case T_INT: 10001 Assembler::evpord(dst, mask, nds, src, merge, vector_len); break; 10002 case T_LONG: 10003 evporq(dst, mask, nds, src, merge, vector_len); break; 10004 default: 10005 fatal("Unexpected type argument %s", type2name(type)); break; 10006 } 10007 } 10008 10009 void MacroAssembler::evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 10010 switch(type) { 10011 case T_INT: 10012 evpandd(dst, mask, nds, src, merge, vector_len); break; 10013 case T_LONG: 10014 evpandq(dst, mask, nds, src, merge, vector_len); break; 10015 default: 10016 fatal("Unexpected type argument %s", type2name(type)); break; 10017 } 10018 } 10019 10020 void MacroAssembler::evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 10021 switch(type) { 10022 case T_INT: 10023 evpandd(dst, mask, nds, src, merge, vector_len); break; 10024 case T_LONG: 10025 evpandq(dst, mask, nds, src, merge, vector_len); break; 10026 default: 10027 fatal("Unexpected type argument %s", type2name(type)); break; 10028 } 10029 } 10030 10031 void MacroAssembler::kortest(uint masklen, KRegister src1, KRegister src2) { 10032 switch(masklen) { 10033 case 8: 10034 kortestbl(src1, src2); 10035 break; 10036 case 16: 10037 kortestwl(src1, src2); 10038 break; 10039 case 32: 10040 kortestdl(src1, src2); 10041 break; 10042 case 64: 10043 kortestql(src1, src2); 10044 break; 10045 default: 10046 fatal("Unexpected mask length %d", masklen); 10047 break; 10048 } 10049 } 10050 10051 10052 void MacroAssembler::ktest(uint masklen, KRegister src1, KRegister src2) { 10053 switch(masklen) { 10054 case 8: 10055 ktestbl(src1, src2); 10056 break; 10057 case 16: 10058 ktestwl(src1, src2); 10059 break; 10060 case 32: 10061 ktestdl(src1, src2); 10062 break; 10063 case 64: 10064 ktestql(src1, src2); 10065 break; 10066 default: 10067 fatal("Unexpected mask length %d", masklen); 10068 break; 10069 } 10070 } 10071 10072 void MacroAssembler::evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc) { 10073 switch(type) { 10074 case T_INT: 10075 evprold(dst, mask, src, shift, merge, vlen_enc); break; 10076 case T_LONG: 10077 evprolq(dst, mask, src, shift, merge, vlen_enc); break; 10078 default: 10079 fatal("Unexpected type argument %s", type2name(type)); break; 10080 break; 10081 } 10082 } 10083 10084 void MacroAssembler::evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc) { 10085 switch(type) { 10086 case T_INT: 10087 evprord(dst, mask, src, shift, merge, vlen_enc); break; 10088 case T_LONG: 10089 evprorq(dst, mask, src, shift, merge, vlen_enc); break; 10090 default: 10091 fatal("Unexpected type argument %s", type2name(type)); break; 10092 } 10093 } 10094 10095 void MacroAssembler::evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc) { 10096 switch(type) { 10097 case T_INT: 10098 evprolvd(dst, mask, src1, src2, merge, vlen_enc); break; 10099 case T_LONG: 10100 evprolvq(dst, mask, src1, src2, merge, vlen_enc); break; 10101 default: 10102 fatal("Unexpected type argument %s", type2name(type)); break; 10103 } 10104 } 10105 10106 void MacroAssembler::evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc) { 10107 switch(type) { 10108 case T_INT: 10109 evprorvd(dst, mask, src1, src2, merge, vlen_enc); break; 10110 case T_LONG: 10111 evprorvq(dst, mask, src1, src2, merge, vlen_enc); break; 10112 default: 10113 fatal("Unexpected type argument %s", type2name(type)); break; 10114 } 10115 } 10116 10117 void MacroAssembler::evpandq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 10118 assert(rscratch != noreg || always_reachable(src), "missing"); 10119 10120 if (reachable(src)) { 10121 evpandq(dst, nds, as_Address(src), vector_len); 10122 } else { 10123 lea(rscratch, src); 10124 evpandq(dst, nds, Address(rscratch, 0), vector_len); 10125 } 10126 } 10127 10128 void MacroAssembler::evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch) { 10129 assert(rscratch != noreg || always_reachable(src), "missing"); 10130 10131 if (reachable(src)) { 10132 Assembler::evpaddq(dst, mask, nds, as_Address(src), merge, vector_len); 10133 } else { 10134 lea(rscratch, src); 10135 Assembler::evpaddq(dst, mask, nds, Address(rscratch, 0), merge, vector_len); 10136 } 10137 } 10138 10139 void MacroAssembler::evporq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 10140 assert(rscratch != noreg || always_reachable(src), "missing"); 10141 10142 if (reachable(src)) { 10143 evporq(dst, nds, as_Address(src), vector_len); 10144 } else { 10145 lea(rscratch, src); 10146 evporq(dst, nds, Address(rscratch, 0), vector_len); 10147 } 10148 } 10149 10150 void MacroAssembler::vpshufb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 10151 assert(rscratch != noreg || always_reachable(src), "missing"); 10152 10153 if (reachable(src)) { 10154 vpshufb(dst, nds, as_Address(src), vector_len); 10155 } else { 10156 lea(rscratch, src); 10157 vpshufb(dst, nds, Address(rscratch, 0), vector_len); 10158 } 10159 } 10160 10161 void MacroAssembler::vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, AddressLiteral src3, int vector_len, Register rscratch) { 10162 assert(rscratch != noreg || always_reachable(src3), "missing"); 10163 10164 if (reachable(src3)) { 10165 vpternlogq(dst, imm8, src2, as_Address(src3), vector_len); 10166 } else { 10167 lea(rscratch, src3); 10168 vpternlogq(dst, imm8, src2, Address(rscratch, 0), vector_len); 10169 } 10170 } 10171 10172 #if COMPILER2_OR_JVMCI 10173 10174 void MacroAssembler::fill_masked(BasicType bt, Address dst, XMMRegister xmm, KRegister mask, 10175 Register length, Register temp, int vec_enc) { 10176 // Computing mask for predicated vector store. 10177 movptr(temp, -1); 10178 bzhiq(temp, temp, length); 10179 kmov(mask, temp); 10180 evmovdqu(bt, mask, dst, xmm, true, vec_enc); 10181 } 10182 10183 // Set memory operation for length "less than" 64 bytes. 10184 void MacroAssembler::fill64_masked(uint shift, Register dst, int disp, 10185 XMMRegister xmm, KRegister mask, Register length, 10186 Register temp, bool use64byteVector) { 10187 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 10188 const BasicType type[] = { T_BYTE, T_SHORT, T_INT, T_LONG}; 10189 if (!use64byteVector) { 10190 fill32(dst, disp, xmm); 10191 subptr(length, 32 >> shift); 10192 fill32_masked(shift, dst, disp + 32, xmm, mask, length, temp); 10193 } else { 10194 assert(MaxVectorSize == 64, "vector length != 64"); 10195 fill_masked(type[shift], Address(dst, disp), xmm, mask, length, temp, Assembler::AVX_512bit); 10196 } 10197 } 10198 10199 10200 void MacroAssembler::fill32_masked(uint shift, Register dst, int disp, 10201 XMMRegister xmm, KRegister mask, Register length, 10202 Register temp) { 10203 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 10204 const BasicType type[] = { T_BYTE, T_SHORT, T_INT, T_LONG}; 10205 fill_masked(type[shift], Address(dst, disp), xmm, mask, length, temp, Assembler::AVX_256bit); 10206 } 10207 10208 10209 void MacroAssembler::fill32(Address dst, XMMRegister xmm) { 10210 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 10211 vmovdqu(dst, xmm); 10212 } 10213 10214 void MacroAssembler::fill32(Register dst, int disp, XMMRegister xmm) { 10215 fill32(Address(dst, disp), xmm); 10216 } 10217 10218 void MacroAssembler::fill64(Address dst, XMMRegister xmm, bool use64byteVector) { 10219 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 10220 if (!use64byteVector) { 10221 fill32(dst, xmm); 10222 fill32(dst.plus_disp(32), xmm); 10223 } else { 10224 evmovdquq(dst, xmm, Assembler::AVX_512bit); 10225 } 10226 } 10227 10228 void MacroAssembler::fill64(Register dst, int disp, XMMRegister xmm, bool use64byteVector) { 10229 fill64(Address(dst, disp), xmm, use64byteVector); 10230 } 10231 10232 #ifdef _LP64 10233 void MacroAssembler::generate_fill_avx3(BasicType type, Register to, Register value, 10234 Register count, Register rtmp, XMMRegister xtmp) { 10235 Label L_exit; 10236 Label L_fill_start; 10237 Label L_fill_64_bytes; 10238 Label L_fill_96_bytes; 10239 Label L_fill_128_bytes; 10240 Label L_fill_128_bytes_loop; 10241 Label L_fill_128_loop_header; 10242 Label L_fill_128_bytes_loop_header; 10243 Label L_fill_128_bytes_loop_pre_header; 10244 Label L_fill_zmm_sequence; 10245 10246 int shift = -1; 10247 int avx3threshold = VM_Version::avx3_threshold(); 10248 switch(type) { 10249 case T_BYTE: shift = 0; 10250 break; 10251 case T_SHORT: shift = 1; 10252 break; 10253 case T_INT: shift = 2; 10254 break; 10255 /* Uncomment when LONG fill stubs are supported. 10256 case T_LONG: shift = 3; 10257 break; 10258 */ 10259 default: 10260 fatal("Unhandled type: %s\n", type2name(type)); 10261 } 10262 10263 if ((avx3threshold != 0) || (MaxVectorSize == 32)) { 10264 10265 if (MaxVectorSize == 64) { 10266 cmpq(count, avx3threshold >> shift); 10267 jcc(Assembler::greater, L_fill_zmm_sequence); 10268 } 10269 10270 evpbroadcast(type, xtmp, value, Assembler::AVX_256bit); 10271 10272 bind(L_fill_start); 10273 10274 cmpq(count, 32 >> shift); 10275 jccb(Assembler::greater, L_fill_64_bytes); 10276 fill32_masked(shift, to, 0, xtmp, k2, count, rtmp); 10277 jmp(L_exit); 10278 10279 bind(L_fill_64_bytes); 10280 cmpq(count, 64 >> shift); 10281 jccb(Assembler::greater, L_fill_96_bytes); 10282 fill64_masked(shift, to, 0, xtmp, k2, count, rtmp); 10283 jmp(L_exit); 10284 10285 bind(L_fill_96_bytes); 10286 cmpq(count, 96 >> shift); 10287 jccb(Assembler::greater, L_fill_128_bytes); 10288 fill64(to, 0, xtmp); 10289 subq(count, 64 >> shift); 10290 fill32_masked(shift, to, 64, xtmp, k2, count, rtmp); 10291 jmp(L_exit); 10292 10293 bind(L_fill_128_bytes); 10294 cmpq(count, 128 >> shift); 10295 jccb(Assembler::greater, L_fill_128_bytes_loop_pre_header); 10296 fill64(to, 0, xtmp); 10297 fill32(to, 64, xtmp); 10298 subq(count, 96 >> shift); 10299 fill32_masked(shift, to, 96, xtmp, k2, count, rtmp); 10300 jmp(L_exit); 10301 10302 bind(L_fill_128_bytes_loop_pre_header); 10303 { 10304 mov(rtmp, to); 10305 andq(rtmp, 31); 10306 jccb(Assembler::zero, L_fill_128_bytes_loop_header); 10307 negq(rtmp); 10308 addq(rtmp, 32); 10309 mov64(r8, -1L); 10310 bzhiq(r8, r8, rtmp); 10311 kmovql(k2, r8); 10312 evmovdqu(T_BYTE, k2, Address(to, 0), xtmp, true, Assembler::AVX_256bit); 10313 addq(to, rtmp); 10314 shrq(rtmp, shift); 10315 subq(count, rtmp); 10316 } 10317 10318 cmpq(count, 128 >> shift); 10319 jcc(Assembler::less, L_fill_start); 10320 10321 bind(L_fill_128_bytes_loop_header); 10322 subq(count, 128 >> shift); 10323 10324 align32(); 10325 bind(L_fill_128_bytes_loop); 10326 fill64(to, 0, xtmp); 10327 fill64(to, 64, xtmp); 10328 addq(to, 128); 10329 subq(count, 128 >> shift); 10330 jccb(Assembler::greaterEqual, L_fill_128_bytes_loop); 10331 10332 addq(count, 128 >> shift); 10333 jcc(Assembler::zero, L_exit); 10334 jmp(L_fill_start); 10335 } 10336 10337 if (MaxVectorSize == 64) { 10338 // Sequence using 64 byte ZMM register. 10339 Label L_fill_128_bytes_zmm; 10340 Label L_fill_192_bytes_zmm; 10341 Label L_fill_192_bytes_loop_zmm; 10342 Label L_fill_192_bytes_loop_header_zmm; 10343 Label L_fill_192_bytes_loop_pre_header_zmm; 10344 Label L_fill_start_zmm_sequence; 10345 10346 bind(L_fill_zmm_sequence); 10347 evpbroadcast(type, xtmp, value, Assembler::AVX_512bit); 10348 10349 bind(L_fill_start_zmm_sequence); 10350 cmpq(count, 64 >> shift); 10351 jccb(Assembler::greater, L_fill_128_bytes_zmm); 10352 fill64_masked(shift, to, 0, xtmp, k2, count, rtmp, true); 10353 jmp(L_exit); 10354 10355 bind(L_fill_128_bytes_zmm); 10356 cmpq(count, 128 >> shift); 10357 jccb(Assembler::greater, L_fill_192_bytes_zmm); 10358 fill64(to, 0, xtmp, true); 10359 subq(count, 64 >> shift); 10360 fill64_masked(shift, to, 64, xtmp, k2, count, rtmp, true); 10361 jmp(L_exit); 10362 10363 bind(L_fill_192_bytes_zmm); 10364 cmpq(count, 192 >> shift); 10365 jccb(Assembler::greater, L_fill_192_bytes_loop_pre_header_zmm); 10366 fill64(to, 0, xtmp, true); 10367 fill64(to, 64, xtmp, true); 10368 subq(count, 128 >> shift); 10369 fill64_masked(shift, to, 128, xtmp, k2, count, rtmp, true); 10370 jmp(L_exit); 10371 10372 bind(L_fill_192_bytes_loop_pre_header_zmm); 10373 { 10374 movq(rtmp, to); 10375 andq(rtmp, 63); 10376 jccb(Assembler::zero, L_fill_192_bytes_loop_header_zmm); 10377 negq(rtmp); 10378 addq(rtmp, 64); 10379 mov64(r8, -1L); 10380 bzhiq(r8, r8, rtmp); 10381 kmovql(k2, r8); 10382 evmovdqu(T_BYTE, k2, Address(to, 0), xtmp, true, Assembler::AVX_512bit); 10383 addq(to, rtmp); 10384 shrq(rtmp, shift); 10385 subq(count, rtmp); 10386 } 10387 10388 cmpq(count, 192 >> shift); 10389 jcc(Assembler::less, L_fill_start_zmm_sequence); 10390 10391 bind(L_fill_192_bytes_loop_header_zmm); 10392 subq(count, 192 >> shift); 10393 10394 align32(); 10395 bind(L_fill_192_bytes_loop_zmm); 10396 fill64(to, 0, xtmp, true); 10397 fill64(to, 64, xtmp, true); 10398 fill64(to, 128, xtmp, true); 10399 addq(to, 192); 10400 subq(count, 192 >> shift); 10401 jccb(Assembler::greaterEqual, L_fill_192_bytes_loop_zmm); 10402 10403 addq(count, 192 >> shift); 10404 jcc(Assembler::zero, L_exit); 10405 jmp(L_fill_start_zmm_sequence); 10406 } 10407 bind(L_exit); 10408 } 10409 #endif 10410 #endif //COMPILER2_OR_JVMCI 10411 10412 10413 #ifdef _LP64 10414 void MacroAssembler::convert_f2i(Register dst, XMMRegister src) { 10415 Label done; 10416 cvttss2sil(dst, src); 10417 // Conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub 10418 cmpl(dst, 0x80000000); // float_sign_flip 10419 jccb(Assembler::notEqual, done); 10420 subptr(rsp, 8); 10421 movflt(Address(rsp, 0), src); 10422 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::f2i_fixup()))); 10423 pop(dst); 10424 bind(done); 10425 } 10426 10427 void MacroAssembler::convert_d2i(Register dst, XMMRegister src) { 10428 Label done; 10429 cvttsd2sil(dst, src); 10430 // Conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub 10431 cmpl(dst, 0x80000000); // float_sign_flip 10432 jccb(Assembler::notEqual, done); 10433 subptr(rsp, 8); 10434 movdbl(Address(rsp, 0), src); 10435 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::d2i_fixup()))); 10436 pop(dst); 10437 bind(done); 10438 } 10439 10440 void MacroAssembler::convert_f2l(Register dst, XMMRegister src) { 10441 Label done; 10442 cvttss2siq(dst, src); 10443 cmp64(dst, ExternalAddress((address) StubRoutines::x86::double_sign_flip())); 10444 jccb(Assembler::notEqual, done); 10445 subptr(rsp, 8); 10446 movflt(Address(rsp, 0), src); 10447 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::f2l_fixup()))); 10448 pop(dst); 10449 bind(done); 10450 } 10451 10452 void MacroAssembler::round_float(Register dst, XMMRegister src, Register rtmp, Register rcx) { 10453 // Following code is line by line assembly translation rounding algorithm. 10454 // Please refer to java.lang.Math.round(float) algorithm for details. 10455 const int32_t FloatConsts_EXP_BIT_MASK = 0x7F800000; 10456 const int32_t FloatConsts_SIGNIFICAND_WIDTH = 24; 10457 const int32_t FloatConsts_EXP_BIAS = 127; 10458 const int32_t FloatConsts_SIGNIF_BIT_MASK = 0x007FFFFF; 10459 const int32_t MINUS_32 = 0xFFFFFFE0; 10460 Label L_special_case, L_block1, L_exit; 10461 movl(rtmp, FloatConsts_EXP_BIT_MASK); 10462 movdl(dst, src); 10463 andl(dst, rtmp); 10464 sarl(dst, FloatConsts_SIGNIFICAND_WIDTH - 1); 10465 movl(rtmp, FloatConsts_SIGNIFICAND_WIDTH - 2 + FloatConsts_EXP_BIAS); 10466 subl(rtmp, dst); 10467 movl(rcx, rtmp); 10468 movl(dst, MINUS_32); 10469 testl(rtmp, dst); 10470 jccb(Assembler::notEqual, L_special_case); 10471 movdl(dst, src); 10472 andl(dst, FloatConsts_SIGNIF_BIT_MASK); 10473 orl(dst, FloatConsts_SIGNIF_BIT_MASK + 1); 10474 movdl(rtmp, src); 10475 testl(rtmp, rtmp); 10476 jccb(Assembler::greaterEqual, L_block1); 10477 negl(dst); 10478 bind(L_block1); 10479 sarl(dst); 10480 addl(dst, 0x1); 10481 sarl(dst, 0x1); 10482 jmp(L_exit); 10483 bind(L_special_case); 10484 convert_f2i(dst, src); 10485 bind(L_exit); 10486 } 10487 10488 void MacroAssembler::round_double(Register dst, XMMRegister src, Register rtmp, Register rcx) { 10489 // Following code is line by line assembly translation rounding algorithm. 10490 // Please refer to java.lang.Math.round(double) algorithm for details. 10491 const int64_t DoubleConsts_EXP_BIT_MASK = 0x7FF0000000000000L; 10492 const int64_t DoubleConsts_SIGNIFICAND_WIDTH = 53; 10493 const int64_t DoubleConsts_EXP_BIAS = 1023; 10494 const int64_t DoubleConsts_SIGNIF_BIT_MASK = 0x000FFFFFFFFFFFFFL; 10495 const int64_t MINUS_64 = 0xFFFFFFFFFFFFFFC0L; 10496 Label L_special_case, L_block1, L_exit; 10497 mov64(rtmp, DoubleConsts_EXP_BIT_MASK); 10498 movq(dst, src); 10499 andq(dst, rtmp); 10500 sarq(dst, DoubleConsts_SIGNIFICAND_WIDTH - 1); 10501 mov64(rtmp, DoubleConsts_SIGNIFICAND_WIDTH - 2 + DoubleConsts_EXP_BIAS); 10502 subq(rtmp, dst); 10503 movq(rcx, rtmp); 10504 mov64(dst, MINUS_64); 10505 testq(rtmp, dst); 10506 jccb(Assembler::notEqual, L_special_case); 10507 movq(dst, src); 10508 mov64(rtmp, DoubleConsts_SIGNIF_BIT_MASK); 10509 andq(dst, rtmp); 10510 mov64(rtmp, DoubleConsts_SIGNIF_BIT_MASK + 1); 10511 orq(dst, rtmp); 10512 movq(rtmp, src); 10513 testq(rtmp, rtmp); 10514 jccb(Assembler::greaterEqual, L_block1); 10515 negq(dst); 10516 bind(L_block1); 10517 sarq(dst); 10518 addq(dst, 0x1); 10519 sarq(dst, 0x1); 10520 jmp(L_exit); 10521 bind(L_special_case); 10522 convert_d2l(dst, src); 10523 bind(L_exit); 10524 } 10525 10526 void MacroAssembler::convert_d2l(Register dst, XMMRegister src) { 10527 Label done; 10528 cvttsd2siq(dst, src); 10529 cmp64(dst, ExternalAddress((address) StubRoutines::x86::double_sign_flip())); 10530 jccb(Assembler::notEqual, done); 10531 subptr(rsp, 8); 10532 movdbl(Address(rsp, 0), src); 10533 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::d2l_fixup()))); 10534 pop(dst); 10535 bind(done); 10536 } 10537 10538 void MacroAssembler::cache_wb(Address line) 10539 { 10540 // 64 bit cpus always support clflush 10541 assert(VM_Version::supports_clflush(), "clflush should be available"); 10542 bool optimized = VM_Version::supports_clflushopt(); 10543 bool no_evict = VM_Version::supports_clwb(); 10544 10545 // prefer clwb (writeback without evict) otherwise 10546 // prefer clflushopt (potentially parallel writeback with evict) 10547 // otherwise fallback on clflush (serial writeback with evict) 10548 10549 if (optimized) { 10550 if (no_evict) { 10551 clwb(line); 10552 } else { 10553 clflushopt(line); 10554 } 10555 } else { 10556 // no need for fence when using CLFLUSH 10557 clflush(line); 10558 } 10559 } 10560 10561 void MacroAssembler::cache_wbsync(bool is_pre) 10562 { 10563 assert(VM_Version::supports_clflush(), "clflush should be available"); 10564 bool optimized = VM_Version::supports_clflushopt(); 10565 bool no_evict = VM_Version::supports_clwb(); 10566 10567 // pick the correct implementation 10568 10569 if (!is_pre && (optimized || no_evict)) { 10570 // need an sfence for post flush when using clflushopt or clwb 10571 // otherwise no no need for any synchroniaztion 10572 10573 sfence(); 10574 } 10575 } 10576 10577 #endif // _LP64 10578 10579 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) { 10580 switch (cond) { 10581 // Note some conditions are synonyms for others 10582 case Assembler::zero: return Assembler::notZero; 10583 case Assembler::notZero: return Assembler::zero; 10584 case Assembler::less: return Assembler::greaterEqual; 10585 case Assembler::lessEqual: return Assembler::greater; 10586 case Assembler::greater: return Assembler::lessEqual; 10587 case Assembler::greaterEqual: return Assembler::less; 10588 case Assembler::below: return Assembler::aboveEqual; 10589 case Assembler::belowEqual: return Assembler::above; 10590 case Assembler::above: return Assembler::belowEqual; 10591 case Assembler::aboveEqual: return Assembler::below; 10592 case Assembler::overflow: return Assembler::noOverflow; 10593 case Assembler::noOverflow: return Assembler::overflow; 10594 case Assembler::negative: return Assembler::positive; 10595 case Assembler::positive: return Assembler::negative; 10596 case Assembler::parity: return Assembler::noParity; 10597 case Assembler::noParity: return Assembler::parity; 10598 } 10599 ShouldNotReachHere(); return Assembler::overflow; 10600 } 10601 10602 SkipIfEqual::SkipIfEqual( 10603 MacroAssembler* masm, const bool* flag_addr, bool value, Register rscratch) { 10604 _masm = masm; 10605 _masm->cmp8(ExternalAddress((address)flag_addr), value, rscratch); 10606 _masm->jcc(Assembler::equal, _label); 10607 } 10608 10609 SkipIfEqual::~SkipIfEqual() { 10610 _masm->bind(_label); 10611 } 10612 10613 // 32-bit Windows has its own fast-path implementation 10614 // of get_thread 10615 #if !defined(WIN32) || defined(_LP64) 10616 10617 // This is simply a call to Thread::current() 10618 void MacroAssembler::get_thread(Register thread) { 10619 if (thread != rax) { 10620 push(rax); 10621 } 10622 LP64_ONLY(push(rdi);) 10623 LP64_ONLY(push(rsi);) 10624 push(rdx); 10625 push(rcx); 10626 #ifdef _LP64 10627 push(r8); 10628 push(r9); 10629 push(r10); 10630 push(r11); 10631 #endif 10632 10633 MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, Thread::current), 0); 10634 10635 #ifdef _LP64 10636 pop(r11); 10637 pop(r10); 10638 pop(r9); 10639 pop(r8); 10640 #endif 10641 pop(rcx); 10642 pop(rdx); 10643 LP64_ONLY(pop(rsi);) 10644 LP64_ONLY(pop(rdi);) 10645 if (thread != rax) { 10646 mov(thread, rax); 10647 pop(rax); 10648 } 10649 } 10650 10651 10652 #endif // !WIN32 || _LP64 10653 10654 void MacroAssembler::check_stack_alignment(Register sp, const char* msg, unsigned bias, Register tmp) { 10655 Label L_stack_ok; 10656 if (bias == 0) { 10657 testptr(sp, 2 * wordSize - 1); 10658 } else { 10659 // lea(tmp, Address(rsp, bias); 10660 mov(tmp, sp); 10661 addptr(tmp, bias); 10662 testptr(tmp, 2 * wordSize - 1); 10663 } 10664 jcc(Assembler::equal, L_stack_ok); 10665 block_comment(msg); 10666 stop(msg); 10667 bind(L_stack_ok); 10668 } 10669 10670 // Implements lightweight-locking. 10671 // Branches to slow upon failure to lock the object, with ZF cleared. 10672 // Falls through upon success with unspecified ZF. 10673 // 10674 // obj: the object to be locked 10675 // hdr: the (pre-loaded) header of the object, must be rax 10676 // thread: the thread which attempts to lock obj 10677 // tmp: a temporary register 10678 void MacroAssembler::lightweight_lock(Register obj, Register hdr, Register thread, Register tmp, Label& slow) { 10679 assert(hdr == rax, "header must be in rax for cmpxchg"); 10680 assert_different_registers(obj, hdr, thread, tmp); 10681 10682 // First we need to check if the lock-stack has room for pushing the object reference. 10683 // Note: we subtract 1 from the end-offset so that we can do a 'greater' comparison, instead 10684 // of 'greaterEqual' below, which readily clears the ZF. This makes C2 code a little simpler and 10685 // avoids one branch. 10686 cmpl(Address(thread, JavaThread::lock_stack_top_offset()), LockStack::end_offset() - 1); 10687 jcc(Assembler::greater, slow); 10688 10689 // Now we attempt to take the fast-lock. 10690 // Clear lock_mask bits (locked state). 10691 andptr(hdr, ~(int32_t)markWord::lock_mask_in_place); 10692 movptr(tmp, hdr); 10693 // Set unlocked_value bit. 10694 orptr(hdr, markWord::unlocked_value); 10695 if (EnableValhalla) { 10696 // Mask inline_type bit such that we go to the slow path if object is an inline type 10697 andptr(hdr, ~((int) markWord::inline_type_bit_in_place)); 10698 } 10699 lock(); 10700 cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes())); 10701 jcc(Assembler::notEqual, slow); 10702 10703 // If successful, push object to lock-stack. 10704 movl(tmp, Address(thread, JavaThread::lock_stack_top_offset())); 10705 movptr(Address(thread, tmp), obj); 10706 incrementl(tmp, oopSize); 10707 movl(Address(thread, JavaThread::lock_stack_top_offset()), tmp); 10708 } 10709 10710 // Implements lightweight-unlocking. 10711 // Branches to slow upon failure, with ZF cleared. 10712 // Falls through upon success, with unspecified ZF. 10713 // 10714 // obj: the object to be unlocked 10715 // hdr: the (pre-loaded) header of the object, must be rax 10716 // tmp: a temporary register 10717 void MacroAssembler::lightweight_unlock(Register obj, Register hdr, Register tmp, Label& slow) { 10718 assert(hdr == rax, "header must be in rax for cmpxchg"); 10719 assert_different_registers(obj, hdr, tmp); 10720 10721 // Mark-word must be lock_mask now, try to swing it back to unlocked_value. 10722 movptr(tmp, hdr); // The expected old value 10723 orptr(tmp, markWord::unlocked_value); 10724 lock(); 10725 cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes())); 10726 jcc(Assembler::notEqual, slow); 10727 // Pop the lock object from the lock-stack. 10728 #ifdef _LP64 10729 const Register thread = r15_thread; 10730 #else 10731 const Register thread = rax; 10732 get_thread(thread); 10733 #endif 10734 subl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize); 10735 #ifdef ASSERT 10736 movl(tmp, Address(thread, JavaThread::lock_stack_top_offset())); 10737 movptr(Address(thread, tmp), 0); 10738 #endif 10739 }