1 /* 2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "asm/assembler.inline.hpp" 28 #include "code/compiledIC.hpp" 29 #include "compiler/compiler_globals.hpp" 30 #include "compiler/disassembler.hpp" 31 #include "crc32c.h" 32 #include "gc/shared/barrierSet.hpp" 33 #include "gc/shared/barrierSetAssembler.hpp" 34 #include "gc/shared/collectedHeap.inline.hpp" 35 #include "gc/shared/tlab_globals.hpp" 36 #include "interpreter/bytecodeHistogram.hpp" 37 #include "interpreter/interpreter.hpp" 38 #include "interpreter/interpreterRuntime.hpp" 39 #include "jvm.h" 40 #include "memory/resourceArea.hpp" 41 #include "memory/universe.hpp" 42 #include "oops/accessDecorators.hpp" 43 #include "oops/compressedKlass.inline.hpp" 44 #include "oops/compressedOops.inline.hpp" 45 #include "oops/klass.inline.hpp" 46 #include "prims/methodHandles.hpp" 47 #include "runtime/continuation.hpp" 48 #include "runtime/interfaceSupport.inline.hpp" 49 #include "runtime/javaThread.hpp" 50 #include "runtime/jniHandles.hpp" 51 #include "runtime/objectMonitor.hpp" 52 #include "runtime/os.hpp" 53 #include "runtime/safepoint.hpp" 54 #include "runtime/safepointMechanism.hpp" 55 #include "runtime/sharedRuntime.hpp" 56 #include "runtime/stubRoutines.hpp" 57 #include "utilities/checkedCast.hpp" 58 #include "utilities/macros.hpp" 59 60 #ifdef PRODUCT 61 #define BLOCK_COMMENT(str) /* nothing */ 62 #define STOP(error) stop(error) 63 #else 64 #define BLOCK_COMMENT(str) block_comment(str) 65 #define STOP(error) block_comment(error); stop(error) 66 #endif 67 68 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 69 70 #ifdef ASSERT 71 bool AbstractAssembler::pd_check_instruction_mark() { return true; } 72 #endif 73 74 static const Assembler::Condition reverse[] = { 75 Assembler::noOverflow /* overflow = 0x0 */ , 76 Assembler::overflow /* noOverflow = 0x1 */ , 77 Assembler::aboveEqual /* carrySet = 0x2, below = 0x2 */ , 78 Assembler::below /* aboveEqual = 0x3, carryClear = 0x3 */ , 79 Assembler::notZero /* zero = 0x4, equal = 0x4 */ , 80 Assembler::zero /* notZero = 0x5, notEqual = 0x5 */ , 81 Assembler::above /* belowEqual = 0x6 */ , 82 Assembler::belowEqual /* above = 0x7 */ , 83 Assembler::positive /* negative = 0x8 */ , 84 Assembler::negative /* positive = 0x9 */ , 85 Assembler::noParity /* parity = 0xa */ , 86 Assembler::parity /* noParity = 0xb */ , 87 Assembler::greaterEqual /* less = 0xc */ , 88 Assembler::less /* greaterEqual = 0xd */ , 89 Assembler::greater /* lessEqual = 0xe */ , 90 Assembler::lessEqual /* greater = 0xf, */ 91 92 }; 93 94 95 // Implementation of MacroAssembler 96 97 // First all the versions that have distinct versions depending on 32/64 bit 98 // Unless the difference is trivial (1 line or so). 99 100 #ifndef _LP64 101 102 // 32bit versions 103 104 Address MacroAssembler::as_Address(AddressLiteral adr) { 105 return Address(adr.target(), adr.rspec()); 106 } 107 108 Address MacroAssembler::as_Address(ArrayAddress adr, Register rscratch) { 109 assert(rscratch == noreg, ""); 110 return Address::make_array(adr); 111 } 112 113 void MacroAssembler::call_VM_leaf_base(address entry_point, 114 int number_of_arguments) { 115 call(RuntimeAddress(entry_point)); 116 increment(rsp, number_of_arguments * wordSize); 117 } 118 119 void MacroAssembler::cmpklass(Address src1, Metadata* obj) { 120 cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate()); 121 } 122 123 124 void MacroAssembler::cmpklass(Register src1, Metadata* obj) { 125 cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate()); 126 } 127 128 void MacroAssembler::cmpoop(Address src1, jobject obj) { 129 cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate()); 130 } 131 132 void MacroAssembler::cmpoop(Register src1, jobject obj, Register rscratch) { 133 assert(rscratch == noreg, "redundant"); 134 cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate()); 135 } 136 137 void MacroAssembler::extend_sign(Register hi, Register lo) { 138 // According to Intel Doc. AP-526, "Integer Divide", p.18. 139 if (VM_Version::is_P6() && hi == rdx && lo == rax) { 140 cdql(); 141 } else { 142 movl(hi, lo); 143 sarl(hi, 31); 144 } 145 } 146 147 void MacroAssembler::jC2(Register tmp, Label& L) { 148 // set parity bit if FPU flag C2 is set (via rax) 149 save_rax(tmp); 150 fwait(); fnstsw_ax(); 151 sahf(); 152 restore_rax(tmp); 153 // branch 154 jcc(Assembler::parity, L); 155 } 156 157 void MacroAssembler::jnC2(Register tmp, Label& L) { 158 // set parity bit if FPU flag C2 is set (via rax) 159 save_rax(tmp); 160 fwait(); fnstsw_ax(); 161 sahf(); 162 restore_rax(tmp); 163 // branch 164 jcc(Assembler::noParity, L); 165 } 166 167 // 32bit can do a case table jump in one instruction but we no longer allow the base 168 // to be installed in the Address class 169 void MacroAssembler::jump(ArrayAddress entry, Register rscratch) { 170 assert(rscratch == noreg, "not needed"); 171 jmp(as_Address(entry, noreg)); 172 } 173 174 // Note: y_lo will be destroyed 175 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) { 176 // Long compare for Java (semantics as described in JVM spec.) 177 Label high, low, done; 178 179 cmpl(x_hi, y_hi); 180 jcc(Assembler::less, low); 181 jcc(Assembler::greater, high); 182 // x_hi is the return register 183 xorl(x_hi, x_hi); 184 cmpl(x_lo, y_lo); 185 jcc(Assembler::below, low); 186 jcc(Assembler::equal, done); 187 188 bind(high); 189 xorl(x_hi, x_hi); 190 increment(x_hi); 191 jmp(done); 192 193 bind(low); 194 xorl(x_hi, x_hi); 195 decrementl(x_hi); 196 197 bind(done); 198 } 199 200 void MacroAssembler::lea(Register dst, AddressLiteral src) { 201 mov_literal32(dst, (int32_t)src.target(), src.rspec()); 202 } 203 204 void MacroAssembler::lea(Address dst, AddressLiteral adr, Register rscratch) { 205 assert(rscratch == noreg, "not needed"); 206 207 // leal(dst, as_Address(adr)); 208 // see note in movl as to why we must use a move 209 mov_literal32(dst, (int32_t)adr.target(), adr.rspec()); 210 } 211 212 void MacroAssembler::leave() { 213 mov(rsp, rbp); 214 pop(rbp); 215 } 216 217 void MacroAssembler::lmul(int x_rsp_offset, int y_rsp_offset) { 218 // Multiplication of two Java long values stored on the stack 219 // as illustrated below. Result is in rdx:rax. 220 // 221 // rsp ---> [ ?? ] \ \ 222 // .... | y_rsp_offset | 223 // [ y_lo ] / (in bytes) | x_rsp_offset 224 // [ y_hi ] | (in bytes) 225 // .... | 226 // [ x_lo ] / 227 // [ x_hi ] 228 // .... 229 // 230 // Basic idea: lo(result) = lo(x_lo * y_lo) 231 // hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) + lo(x_lo * y_hi) 232 Address x_hi(rsp, x_rsp_offset + wordSize); Address x_lo(rsp, x_rsp_offset); 233 Address y_hi(rsp, y_rsp_offset + wordSize); Address y_lo(rsp, y_rsp_offset); 234 Label quick; 235 // load x_hi, y_hi and check if quick 236 // multiplication is possible 237 movl(rbx, x_hi); 238 movl(rcx, y_hi); 239 movl(rax, rbx); 240 orl(rbx, rcx); // rbx, = 0 <=> x_hi = 0 and y_hi = 0 241 jcc(Assembler::zero, quick); // if rbx, = 0 do quick multiply 242 // do full multiplication 243 // 1st step 244 mull(y_lo); // x_hi * y_lo 245 movl(rbx, rax); // save lo(x_hi * y_lo) in rbx, 246 // 2nd step 247 movl(rax, x_lo); 248 mull(rcx); // x_lo * y_hi 249 addl(rbx, rax); // add lo(x_lo * y_hi) to rbx, 250 // 3rd step 251 bind(quick); // note: rbx, = 0 if quick multiply! 252 movl(rax, x_lo); 253 mull(y_lo); // x_lo * y_lo 254 addl(rdx, rbx); // correct hi(x_lo * y_lo) 255 } 256 257 void MacroAssembler::lneg(Register hi, Register lo) { 258 negl(lo); 259 adcl(hi, 0); 260 negl(hi); 261 } 262 263 void MacroAssembler::lshl(Register hi, Register lo) { 264 // Java shift left long support (semantics as described in JVM spec., p.305) 265 // (basic idea for shift counts s >= n: x << s == (x << n) << (s - n)) 266 // shift value is in rcx ! 267 assert(hi != rcx, "must not use rcx"); 268 assert(lo != rcx, "must not use rcx"); 269 const Register s = rcx; // shift count 270 const int n = BitsPerWord; 271 Label L; 272 andl(s, 0x3f); // s := s & 0x3f (s < 0x40) 273 cmpl(s, n); // if (s < n) 274 jcc(Assembler::less, L); // else (s >= n) 275 movl(hi, lo); // x := x << n 276 xorl(lo, lo); 277 // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n! 278 bind(L); // s (mod n) < n 279 shldl(hi, lo); // x := x << s 280 shll(lo); 281 } 282 283 284 void MacroAssembler::lshr(Register hi, Register lo, bool sign_extension) { 285 // Java shift right long support (semantics as described in JVM spec., p.306 & p.310) 286 // (basic idea for shift counts s >= n: x >> s == (x >> n) >> (s - n)) 287 assert(hi != rcx, "must not use rcx"); 288 assert(lo != rcx, "must not use rcx"); 289 const Register s = rcx; // shift count 290 const int n = BitsPerWord; 291 Label L; 292 andl(s, 0x3f); // s := s & 0x3f (s < 0x40) 293 cmpl(s, n); // if (s < n) 294 jcc(Assembler::less, L); // else (s >= n) 295 movl(lo, hi); // x := x >> n 296 if (sign_extension) sarl(hi, 31); 297 else xorl(hi, hi); 298 // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n! 299 bind(L); // s (mod n) < n 300 shrdl(lo, hi); // x := x >> s 301 if (sign_extension) sarl(hi); 302 else shrl(hi); 303 } 304 305 void MacroAssembler::movoop(Register dst, jobject obj) { 306 mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate()); 307 } 308 309 void MacroAssembler::movoop(Address dst, jobject obj, Register rscratch) { 310 assert(rscratch == noreg, "redundant"); 311 mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate()); 312 } 313 314 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { 315 mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate()); 316 } 317 318 void MacroAssembler::mov_metadata(Address dst, Metadata* obj, Register rscratch) { 319 assert(rscratch == noreg, "redundant"); 320 mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate()); 321 } 322 323 void MacroAssembler::movptr(Register dst, AddressLiteral src) { 324 if (src.is_lval()) { 325 mov_literal32(dst, (intptr_t)src.target(), src.rspec()); 326 } else { 327 movl(dst, as_Address(src)); 328 } 329 } 330 331 void MacroAssembler::movptr(ArrayAddress dst, Register src, Register rscratch) { 332 assert(rscratch == noreg, "redundant"); 333 movl(as_Address(dst, noreg), src); 334 } 335 336 void MacroAssembler::movptr(Register dst, ArrayAddress src) { 337 movl(dst, as_Address(src, noreg)); 338 } 339 340 void MacroAssembler::movptr(Address dst, intptr_t src, Register rscratch) { 341 assert(rscratch == noreg, "redundant"); 342 movl(dst, src); 343 } 344 345 void MacroAssembler::pushoop(jobject obj, Register rscratch) { 346 assert(rscratch == noreg, "redundant"); 347 push_literal32((int32_t)obj, oop_Relocation::spec_for_immediate()); 348 } 349 350 void MacroAssembler::pushklass(Metadata* obj, Register rscratch) { 351 assert(rscratch == noreg, "redundant"); 352 push_literal32((int32_t)obj, metadata_Relocation::spec_for_immediate()); 353 } 354 355 void MacroAssembler::pushptr(AddressLiteral src, Register rscratch) { 356 assert(rscratch == noreg, "redundant"); 357 if (src.is_lval()) { 358 push_literal32((int32_t)src.target(), src.rspec()); 359 } else { 360 pushl(as_Address(src)); 361 } 362 } 363 364 static void pass_arg0(MacroAssembler* masm, Register arg) { 365 masm->push(arg); 366 } 367 368 static void pass_arg1(MacroAssembler* masm, Register arg) { 369 masm->push(arg); 370 } 371 372 static void pass_arg2(MacroAssembler* masm, Register arg) { 373 masm->push(arg); 374 } 375 376 static void pass_arg3(MacroAssembler* masm, Register arg) { 377 masm->push(arg); 378 } 379 380 #ifndef PRODUCT 381 extern "C" void findpc(intptr_t x); 382 #endif 383 384 void MacroAssembler::debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg) { 385 // In order to get locks to work, we need to fake a in_VM state 386 JavaThread* thread = JavaThread::current(); 387 JavaThreadState saved_state = thread->thread_state(); 388 thread->set_thread_state(_thread_in_vm); 389 if (ShowMessageBoxOnError) { 390 JavaThread* thread = JavaThread::current(); 391 JavaThreadState saved_state = thread->thread_state(); 392 thread->set_thread_state(_thread_in_vm); 393 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 394 ttyLocker ttyl; 395 BytecodeCounter::print(); 396 } 397 // To see where a verify_oop failed, get $ebx+40/X for this frame. 398 // This is the value of eip which points to where verify_oop will return. 399 if (os::message_box(msg, "Execution stopped, print registers?")) { 400 print_state32(rdi, rsi, rbp, rsp, rbx, rdx, rcx, rax, eip); 401 BREAKPOINT; 402 } 403 } 404 fatal("DEBUG MESSAGE: %s", msg); 405 } 406 407 void MacroAssembler::print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip) { 408 ttyLocker ttyl; 409 DebuggingContext debugging{}; 410 tty->print_cr("eip = 0x%08x", eip); 411 #ifndef PRODUCT 412 if ((WizardMode || Verbose) && PrintMiscellaneous) { 413 tty->cr(); 414 findpc(eip); 415 tty->cr(); 416 } 417 #endif 418 #define PRINT_REG(rax) \ 419 { tty->print("%s = ", #rax); os::print_location(tty, rax); } 420 PRINT_REG(rax); 421 PRINT_REG(rbx); 422 PRINT_REG(rcx); 423 PRINT_REG(rdx); 424 PRINT_REG(rdi); 425 PRINT_REG(rsi); 426 PRINT_REG(rbp); 427 PRINT_REG(rsp); 428 #undef PRINT_REG 429 // Print some words near top of staack. 430 int* dump_sp = (int*) rsp; 431 for (int col1 = 0; col1 < 8; col1++) { 432 tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 433 os::print_location(tty, *dump_sp++); 434 } 435 for (int row = 0; row < 16; row++) { 436 tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 437 for (int col = 0; col < 8; col++) { 438 tty->print(" 0x%08x", *dump_sp++); 439 } 440 tty->cr(); 441 } 442 // Print some instructions around pc: 443 Disassembler::decode((address)eip-64, (address)eip); 444 tty->print_cr("--------"); 445 Disassembler::decode((address)eip, (address)eip+32); 446 } 447 448 void MacroAssembler::stop(const char* msg) { 449 // push address of message 450 ExternalAddress message((address)msg); 451 pushptr(message.addr(), noreg); 452 { Label L; call(L, relocInfo::none); bind(L); } // push eip 453 pusha(); // push registers 454 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32))); 455 hlt(); 456 } 457 458 void MacroAssembler::warn(const char* msg) { 459 push_CPU_state(); 460 461 // push address of message 462 ExternalAddress message((address)msg); 463 pushptr(message.addr(), noreg); 464 465 call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning))); 466 addl(rsp, wordSize); // discard argument 467 pop_CPU_state(); 468 } 469 470 void MacroAssembler::print_state() { 471 { Label L; call(L, relocInfo::none); bind(L); } // push eip 472 pusha(); // push registers 473 474 push_CPU_state(); 475 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::print_state32))); 476 pop_CPU_state(); 477 478 popa(); 479 addl(rsp, wordSize); 480 } 481 482 #else // _LP64 483 484 // 64 bit versions 485 486 Address MacroAssembler::as_Address(AddressLiteral adr) { 487 // amd64 always does this as a pc-rel 488 // we can be absolute or disp based on the instruction type 489 // jmp/call are displacements others are absolute 490 assert(!adr.is_lval(), "must be rval"); 491 assert(reachable(adr), "must be"); 492 return Address(checked_cast<int32_t>(adr.target() - pc()), adr.target(), adr.reloc()); 493 494 } 495 496 Address MacroAssembler::as_Address(ArrayAddress adr, Register rscratch) { 497 AddressLiteral base = adr.base(); 498 lea(rscratch, base); 499 Address index = adr.index(); 500 assert(index._disp == 0, "must not have disp"); // maybe it can? 501 Address array(rscratch, index._index, index._scale, index._disp); 502 return array; 503 } 504 505 void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) { 506 Label L, E; 507 508 #ifdef _WIN64 509 // Windows always allocates space for it's register args 510 assert(num_args <= 4, "only register arguments supported"); 511 subq(rsp, frame::arg_reg_save_area_bytes); 512 #endif 513 514 // Align stack if necessary 515 testl(rsp, 15); 516 jcc(Assembler::zero, L); 517 518 subq(rsp, 8); 519 call(RuntimeAddress(entry_point)); 520 addq(rsp, 8); 521 jmp(E); 522 523 bind(L); 524 call(RuntimeAddress(entry_point)); 525 526 bind(E); 527 528 #ifdef _WIN64 529 // restore stack pointer 530 addq(rsp, frame::arg_reg_save_area_bytes); 531 #endif 532 533 if (entry_point == CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter)) { 534 Label not_preempted; 535 movptr(rscratch1, Address(r15_thread, JavaThread::preempt_alternate_return_offset())); 536 cmpptr(rscratch1, NULL_WORD); 537 jccb(Assembler::zero, not_preempted); 538 movptr(Address(r15_thread, JavaThread::preempt_alternate_return_offset()), NULL_WORD); 539 jmp(rscratch1); 540 bind(not_preempted); 541 } 542 } 543 544 void MacroAssembler::cmp64(Register src1, AddressLiteral src2, Register rscratch) { 545 assert(!src2.is_lval(), "should use cmpptr"); 546 assert(rscratch != noreg || always_reachable(src2), "missing"); 547 548 if (reachable(src2)) { 549 cmpq(src1, as_Address(src2)); 550 } else { 551 lea(rscratch, src2); 552 Assembler::cmpq(src1, Address(rscratch, 0)); 553 } 554 } 555 556 int MacroAssembler::corrected_idivq(Register reg) { 557 // Full implementation of Java ldiv and lrem; checks for special 558 // case as described in JVM spec., p.243 & p.271. The function 559 // returns the (pc) offset of the idivl instruction - may be needed 560 // for implicit exceptions. 561 // 562 // normal case special case 563 // 564 // input : rax: dividend min_long 565 // reg: divisor (may not be eax/edx) -1 566 // 567 // output: rax: quotient (= rax idiv reg) min_long 568 // rdx: remainder (= rax irem reg) 0 569 assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register"); 570 static const int64_t min_long = 0x8000000000000000; 571 Label normal_case, special_case; 572 573 // check for special case 574 cmp64(rax, ExternalAddress((address) &min_long), rdx /*rscratch*/); 575 jcc(Assembler::notEqual, normal_case); 576 xorl(rdx, rdx); // prepare rdx for possible special case (where 577 // remainder = 0) 578 cmpq(reg, -1); 579 jcc(Assembler::equal, special_case); 580 581 // handle normal case 582 bind(normal_case); 583 cdqq(); 584 int idivq_offset = offset(); 585 idivq(reg); 586 587 // normal and special case exit 588 bind(special_case); 589 590 return idivq_offset; 591 } 592 593 void MacroAssembler::decrementq(Register reg, int value) { 594 if (value == min_jint) { subq(reg, value); return; } 595 if (value < 0) { incrementq(reg, -value); return; } 596 if (value == 0) { ; return; } 597 if (value == 1 && UseIncDec) { decq(reg) ; return; } 598 /* else */ { subq(reg, value) ; return; } 599 } 600 601 void MacroAssembler::decrementq(Address dst, int value) { 602 if (value == min_jint) { subq(dst, value); return; } 603 if (value < 0) { incrementq(dst, -value); return; } 604 if (value == 0) { ; return; } 605 if (value == 1 && UseIncDec) { decq(dst) ; return; } 606 /* else */ { subq(dst, value) ; return; } 607 } 608 609 void MacroAssembler::incrementq(AddressLiteral dst, Register rscratch) { 610 assert(rscratch != noreg || always_reachable(dst), "missing"); 611 612 if (reachable(dst)) { 613 incrementq(as_Address(dst)); 614 } else { 615 lea(rscratch, dst); 616 incrementq(Address(rscratch, 0)); 617 } 618 } 619 620 void MacroAssembler::incrementq(Register reg, int value) { 621 if (value == min_jint) { addq(reg, value); return; } 622 if (value < 0) { decrementq(reg, -value); return; } 623 if (value == 0) { ; return; } 624 if (value == 1 && UseIncDec) { incq(reg) ; return; } 625 /* else */ { addq(reg, value) ; return; } 626 } 627 628 void MacroAssembler::incrementq(Address dst, int value) { 629 if (value == min_jint) { addq(dst, value); return; } 630 if (value < 0) { decrementq(dst, -value); return; } 631 if (value == 0) { ; return; } 632 if (value == 1 && UseIncDec) { incq(dst) ; return; } 633 /* else */ { addq(dst, value) ; return; } 634 } 635 636 // 32bit can do a case table jump in one instruction but we no longer allow the base 637 // to be installed in the Address class 638 void MacroAssembler::jump(ArrayAddress entry, Register rscratch) { 639 lea(rscratch, entry.base()); 640 Address dispatch = entry.index(); 641 assert(dispatch._base == noreg, "must be"); 642 dispatch._base = rscratch; 643 jmp(dispatch); 644 } 645 646 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) { 647 ShouldNotReachHere(); // 64bit doesn't use two regs 648 cmpq(x_lo, y_lo); 649 } 650 651 void MacroAssembler::lea(Register dst, AddressLiteral src) { 652 mov_literal64(dst, (intptr_t)src.target(), src.rspec()); 653 } 654 655 void MacroAssembler::lea(Address dst, AddressLiteral adr, Register rscratch) { 656 lea(rscratch, adr); 657 movptr(dst, rscratch); 658 } 659 660 void MacroAssembler::leave() { 661 // %%% is this really better? Why not on 32bit too? 662 emit_int8((unsigned char)0xC9); // LEAVE 663 } 664 665 void MacroAssembler::lneg(Register hi, Register lo) { 666 ShouldNotReachHere(); // 64bit doesn't use two regs 667 negq(lo); 668 } 669 670 void MacroAssembler::movoop(Register dst, jobject obj) { 671 mov_literal64(dst, (intptr_t)obj, oop_Relocation::spec_for_immediate()); 672 } 673 674 void MacroAssembler::movoop(Address dst, jobject obj, Register rscratch) { 675 mov_literal64(rscratch, (intptr_t)obj, oop_Relocation::spec_for_immediate()); 676 movq(dst, rscratch); 677 } 678 679 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { 680 mov_literal64(dst, (intptr_t)obj, metadata_Relocation::spec_for_immediate()); 681 } 682 683 void MacroAssembler::mov_metadata(Address dst, Metadata* obj, Register rscratch) { 684 mov_literal64(rscratch, (intptr_t)obj, metadata_Relocation::spec_for_immediate()); 685 movq(dst, rscratch); 686 } 687 688 void MacroAssembler::movptr(Register dst, AddressLiteral src) { 689 if (src.is_lval()) { 690 mov_literal64(dst, (intptr_t)src.target(), src.rspec()); 691 } else { 692 if (reachable(src)) { 693 movq(dst, as_Address(src)); 694 } else { 695 lea(dst, src); 696 movq(dst, Address(dst, 0)); 697 } 698 } 699 } 700 701 void MacroAssembler::movptr(ArrayAddress dst, Register src, Register rscratch) { 702 movq(as_Address(dst, rscratch), src); 703 } 704 705 void MacroAssembler::movptr(Register dst, ArrayAddress src) { 706 movq(dst, as_Address(src, dst /*rscratch*/)); 707 } 708 709 // src should NEVER be a real pointer. Use AddressLiteral for true pointers 710 void MacroAssembler::movptr(Address dst, intptr_t src, Register rscratch) { 711 if (is_simm32(src)) { 712 movptr(dst, checked_cast<int32_t>(src)); 713 } else { 714 mov64(rscratch, src); 715 movq(dst, rscratch); 716 } 717 } 718 719 void MacroAssembler::pushoop(jobject obj, Register rscratch) { 720 movoop(rscratch, obj); 721 push(rscratch); 722 } 723 724 void MacroAssembler::pushklass(Metadata* obj, Register rscratch) { 725 mov_metadata(rscratch, obj); 726 push(rscratch); 727 } 728 729 void MacroAssembler::pushptr(AddressLiteral src, Register rscratch) { 730 lea(rscratch, src); 731 if (src.is_lval()) { 732 push(rscratch); 733 } else { 734 pushq(Address(rscratch, 0)); 735 } 736 } 737 738 void MacroAssembler::reset_last_Java_frame(bool clear_fp) { 739 reset_last_Java_frame(r15_thread, clear_fp); 740 } 741 742 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 743 Register last_java_fp, 744 address last_java_pc, 745 Register rscratch) { 746 set_last_Java_frame(r15_thread, last_java_sp, last_java_fp, last_java_pc, rscratch); 747 } 748 749 static void pass_arg0(MacroAssembler* masm, Register arg) { 750 if (c_rarg0 != arg ) { 751 masm->mov(c_rarg0, arg); 752 } 753 } 754 755 static void pass_arg1(MacroAssembler* masm, Register arg) { 756 if (c_rarg1 != arg ) { 757 masm->mov(c_rarg1, arg); 758 } 759 } 760 761 static void pass_arg2(MacroAssembler* masm, Register arg) { 762 if (c_rarg2 != arg ) { 763 masm->mov(c_rarg2, arg); 764 } 765 } 766 767 static void pass_arg3(MacroAssembler* masm, Register arg) { 768 if (c_rarg3 != arg ) { 769 masm->mov(c_rarg3, arg); 770 } 771 } 772 773 void MacroAssembler::stop(const char* msg) { 774 if (ShowMessageBoxOnError) { 775 address rip = pc(); 776 pusha(); // get regs on stack 777 lea(c_rarg1, InternalAddress(rip)); 778 movq(c_rarg2, rsp); // pass pointer to regs array 779 } 780 lea(c_rarg0, ExternalAddress((address) msg)); 781 andq(rsp, -16); // align stack as required by ABI 782 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); 783 hlt(); 784 } 785 786 void MacroAssembler::warn(const char* msg) { 787 push(rbp); 788 movq(rbp, rsp); 789 andq(rsp, -16); // align stack as required by push_CPU_state and call 790 push_CPU_state(); // keeps alignment at 16 bytes 791 792 lea(c_rarg0, ExternalAddress((address) msg)); 793 call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning))); 794 795 pop_CPU_state(); 796 mov(rsp, rbp); 797 pop(rbp); 798 } 799 800 void MacroAssembler::print_state() { 801 address rip = pc(); 802 pusha(); // get regs on stack 803 push(rbp); 804 movq(rbp, rsp); 805 andq(rsp, -16); // align stack as required by push_CPU_state and call 806 push_CPU_state(); // keeps alignment at 16 bytes 807 808 lea(c_rarg0, InternalAddress(rip)); 809 lea(c_rarg1, Address(rbp, wordSize)); // pass pointer to regs array 810 call_VM_leaf(CAST_FROM_FN_PTR(address, MacroAssembler::print_state64), c_rarg0, c_rarg1); 811 812 pop_CPU_state(); 813 mov(rsp, rbp); 814 pop(rbp); 815 popa(); 816 } 817 818 #ifndef PRODUCT 819 extern "C" void findpc(intptr_t x); 820 #endif 821 822 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) { 823 // In order to get locks to work, we need to fake a in_VM state 824 if (ShowMessageBoxOnError) { 825 JavaThread* thread = JavaThread::current(); 826 JavaThreadState saved_state = thread->thread_state(); 827 thread->set_thread_state(_thread_in_vm); 828 #ifndef PRODUCT 829 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 830 ttyLocker ttyl; 831 BytecodeCounter::print(); 832 } 833 #endif 834 // To see where a verify_oop failed, get $ebx+40/X for this frame. 835 // XXX correct this offset for amd64 836 // This is the value of eip which points to where verify_oop will return. 837 if (os::message_box(msg, "Execution stopped, print registers?")) { 838 print_state64(pc, regs); 839 BREAKPOINT; 840 } 841 } 842 fatal("DEBUG MESSAGE: %s", msg); 843 } 844 845 void MacroAssembler::print_state64(int64_t pc, int64_t regs[]) { 846 ttyLocker ttyl; 847 DebuggingContext debugging{}; 848 tty->print_cr("rip = 0x%016lx", (intptr_t)pc); 849 #ifndef PRODUCT 850 tty->cr(); 851 findpc(pc); 852 tty->cr(); 853 #endif 854 #define PRINT_REG(rax, value) \ 855 { tty->print("%s = ", #rax); os::print_location(tty, value); } 856 PRINT_REG(rax, regs[15]); 857 PRINT_REG(rbx, regs[12]); 858 PRINT_REG(rcx, regs[14]); 859 PRINT_REG(rdx, regs[13]); 860 PRINT_REG(rdi, regs[8]); 861 PRINT_REG(rsi, regs[9]); 862 PRINT_REG(rbp, regs[10]); 863 // rsp is actually not stored by pusha(), compute the old rsp from regs (rsp after pusha): regs + 16 = old rsp 864 PRINT_REG(rsp, (intptr_t)(®s[16])); 865 PRINT_REG(r8 , regs[7]); 866 PRINT_REG(r9 , regs[6]); 867 PRINT_REG(r10, regs[5]); 868 PRINT_REG(r11, regs[4]); 869 PRINT_REG(r12, regs[3]); 870 PRINT_REG(r13, regs[2]); 871 PRINT_REG(r14, regs[1]); 872 PRINT_REG(r15, regs[0]); 873 #undef PRINT_REG 874 // Print some words near the top of the stack. 875 int64_t* rsp = ®s[16]; 876 int64_t* dump_sp = rsp; 877 for (int col1 = 0; col1 < 8; col1++) { 878 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 879 os::print_location(tty, *dump_sp++); 880 } 881 for (int row = 0; row < 25; row++) { 882 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 883 for (int col = 0; col < 4; col++) { 884 tty->print(" 0x%016lx", (intptr_t)*dump_sp++); 885 } 886 tty->cr(); 887 } 888 // Print some instructions around pc: 889 Disassembler::decode((address)pc-64, (address)pc); 890 tty->print_cr("--------"); 891 Disassembler::decode((address)pc, (address)pc+32); 892 } 893 894 // The java_calling_convention describes stack locations as ideal slots on 895 // a frame with no abi restrictions. Since we must observe abi restrictions 896 // (like the placement of the register window) the slots must be biased by 897 // the following value. 898 static int reg2offset_in(VMReg r) { 899 // Account for saved rbp and return address 900 // This should really be in_preserve_stack_slots 901 return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size; 902 } 903 904 static int reg2offset_out(VMReg r) { 905 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 906 } 907 908 // A long move 909 void MacroAssembler::long_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 910 911 // The calling conventions assures us that each VMregpair is either 912 // all really one physical register or adjacent stack slots. 913 914 if (src.is_single_phys_reg() ) { 915 if (dst.is_single_phys_reg()) { 916 if (dst.first() != src.first()) { 917 mov(dst.first()->as_Register(), src.first()->as_Register()); 918 } 919 } else { 920 assert(dst.is_single_reg(), "not a stack pair: (%s, %s), (%s, %s)", 921 src.first()->name(), src.second()->name(), dst.first()->name(), dst.second()->name()); 922 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_Register()); 923 } 924 } else if (dst.is_single_phys_reg()) { 925 assert(src.is_single_reg(), "not a stack pair"); 926 movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 927 } else { 928 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs"); 929 movq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 930 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 931 } 932 } 933 934 // A double move 935 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 936 937 // The calling conventions assures us that each VMregpair is either 938 // all really one physical register or adjacent stack slots. 939 940 if (src.is_single_phys_reg() ) { 941 if (dst.is_single_phys_reg()) { 942 // In theory these overlap but the ordering is such that this is likely a nop 943 if ( src.first() != dst.first()) { 944 movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister()); 945 } 946 } else { 947 assert(dst.is_single_reg(), "not a stack pair"); 948 movdbl(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_XMMRegister()); 949 } 950 } else if (dst.is_single_phys_reg()) { 951 assert(src.is_single_reg(), "not a stack pair"); 952 movdbl(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 953 } else { 954 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs"); 955 movq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 956 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 957 } 958 } 959 960 961 // A float arg may have to do float reg int reg conversion 962 void MacroAssembler::float_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 963 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move"); 964 965 // The calling conventions assures us that each VMregpair is either 966 // all really one physical register or adjacent stack slots. 967 968 if (src.first()->is_stack()) { 969 if (dst.first()->is_stack()) { 970 movl(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 971 movptr(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 972 } else { 973 // stack to reg 974 assert(dst.first()->is_XMMRegister(), "only expect xmm registers as parameters"); 975 movflt(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 976 } 977 } else if (dst.first()->is_stack()) { 978 // reg to stack 979 assert(src.first()->is_XMMRegister(), "only expect xmm registers as parameters"); 980 movflt(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_XMMRegister()); 981 } else { 982 // reg to reg 983 // In theory these overlap but the ordering is such that this is likely a nop 984 if ( src.first() != dst.first()) { 985 movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister()); 986 } 987 } 988 } 989 990 // On 64 bit we will store integer like items to the stack as 991 // 64 bits items (x86_32/64 abi) even though java would only store 992 // 32bits for a parameter. On 32bit it will simply be 32 bits 993 // So this routine will do 32->32 on 32bit and 32->64 on 64bit 994 void MacroAssembler::move32_64(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 995 if (src.first()->is_stack()) { 996 if (dst.first()->is_stack()) { 997 // stack to stack 998 movslq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 999 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 1000 } else { 1001 // stack to reg 1002 movslq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 1003 } 1004 } else if (dst.first()->is_stack()) { 1005 // reg to stack 1006 // Do we really have to sign extend??? 1007 // __ movslq(src.first()->as_Register(), src.first()->as_Register()); 1008 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_Register()); 1009 } else { 1010 // Do we really have to sign extend??? 1011 // __ movslq(dst.first()->as_Register(), src.first()->as_Register()); 1012 if (dst.first() != src.first()) { 1013 movq(dst.first()->as_Register(), src.first()->as_Register()); 1014 } 1015 } 1016 } 1017 1018 void MacroAssembler::move_ptr(VMRegPair src, VMRegPair dst) { 1019 if (src.first()->is_stack()) { 1020 if (dst.first()->is_stack()) { 1021 // stack to stack 1022 movq(rax, Address(rbp, reg2offset_in(src.first()))); 1023 movq(Address(rsp, reg2offset_out(dst.first())), rax); 1024 } else { 1025 // stack to reg 1026 movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()))); 1027 } 1028 } else if (dst.first()->is_stack()) { 1029 // reg to stack 1030 movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register()); 1031 } else { 1032 if (dst.first() != src.first()) { 1033 movq(dst.first()->as_Register(), src.first()->as_Register()); 1034 } 1035 } 1036 } 1037 1038 // An oop arg. Must pass a handle not the oop itself 1039 void MacroAssembler::object_move(OopMap* map, 1040 int oop_handle_offset, 1041 int framesize_in_slots, 1042 VMRegPair src, 1043 VMRegPair dst, 1044 bool is_receiver, 1045 int* receiver_offset) { 1046 1047 // must pass a handle. First figure out the location we use as a handle 1048 1049 Register rHandle = dst.first()->is_stack() ? rax : dst.first()->as_Register(); 1050 1051 // See if oop is null if it is we need no handle 1052 1053 if (src.first()->is_stack()) { 1054 1055 // Oop is already on the stack as an argument 1056 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 1057 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots)); 1058 if (is_receiver) { 1059 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size; 1060 } 1061 1062 cmpptr(Address(rbp, reg2offset_in(src.first())), NULL_WORD); 1063 lea(rHandle, Address(rbp, reg2offset_in(src.first()))); 1064 // conditionally move a null 1065 cmovptr(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first()))); 1066 } else { 1067 1068 // Oop is in a register we must store it to the space we reserve 1069 // on the stack for oop_handles and pass a handle if oop is non-null 1070 1071 const Register rOop = src.first()->as_Register(); 1072 int oop_slot; 1073 if (rOop == j_rarg0) 1074 oop_slot = 0; 1075 else if (rOop == j_rarg1) 1076 oop_slot = 1; 1077 else if (rOop == j_rarg2) 1078 oop_slot = 2; 1079 else if (rOop == j_rarg3) 1080 oop_slot = 3; 1081 else if (rOop == j_rarg4) 1082 oop_slot = 4; 1083 else { 1084 assert(rOop == j_rarg5, "wrong register"); 1085 oop_slot = 5; 1086 } 1087 1088 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset; 1089 int offset = oop_slot*VMRegImpl::stack_slot_size; 1090 1091 map->set_oop(VMRegImpl::stack2reg(oop_slot)); 1092 // Store oop in handle area, may be null 1093 movptr(Address(rsp, offset), rOop); 1094 if (is_receiver) { 1095 *receiver_offset = offset; 1096 } 1097 1098 cmpptr(rOop, NULL_WORD); 1099 lea(rHandle, Address(rsp, offset)); 1100 // conditionally move a null from the handle area where it was just stored 1101 cmovptr(Assembler::equal, rHandle, Address(rsp, offset)); 1102 } 1103 1104 // If arg is on the stack then place it otherwise it is already in correct reg. 1105 if (dst.first()->is_stack()) { 1106 movptr(Address(rsp, reg2offset_out(dst.first())), rHandle); 1107 } 1108 } 1109 1110 #endif // _LP64 1111 1112 // Now versions that are common to 32/64 bit 1113 1114 void MacroAssembler::addptr(Register dst, int32_t imm32) { 1115 LP64_ONLY(addq(dst, imm32)) NOT_LP64(addl(dst, imm32)); 1116 } 1117 1118 void MacroAssembler::addptr(Register dst, Register src) { 1119 LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); 1120 } 1121 1122 void MacroAssembler::addptr(Address dst, Register src) { 1123 LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); 1124 } 1125 1126 void MacroAssembler::addsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 1127 assert(rscratch != noreg || always_reachable(src), "missing"); 1128 1129 if (reachable(src)) { 1130 Assembler::addsd(dst, as_Address(src)); 1131 } else { 1132 lea(rscratch, src); 1133 Assembler::addsd(dst, Address(rscratch, 0)); 1134 } 1135 } 1136 1137 void MacroAssembler::addss(XMMRegister dst, AddressLiteral src, Register rscratch) { 1138 assert(rscratch != noreg || always_reachable(src), "missing"); 1139 1140 if (reachable(src)) { 1141 addss(dst, as_Address(src)); 1142 } else { 1143 lea(rscratch, src); 1144 addss(dst, Address(rscratch, 0)); 1145 } 1146 } 1147 1148 void MacroAssembler::addpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 1149 assert(rscratch != noreg || always_reachable(src), "missing"); 1150 1151 if (reachable(src)) { 1152 Assembler::addpd(dst, as_Address(src)); 1153 } else { 1154 lea(rscratch, src); 1155 Assembler::addpd(dst, Address(rscratch, 0)); 1156 } 1157 } 1158 1159 // See 8273459. Function for ensuring 64-byte alignment, intended for stubs only. 1160 // Stub code is generated once and never copied. 1161 // NMethods can't use this because they get copied and we can't force alignment > 32 bytes. 1162 void MacroAssembler::align64() { 1163 align(64, (uint)(uintptr_t)pc()); 1164 } 1165 1166 void MacroAssembler::align32() { 1167 align(32, (uint)(uintptr_t)pc()); 1168 } 1169 1170 void MacroAssembler::align(uint modulus) { 1171 // 8273459: Ensure alignment is possible with current segment alignment 1172 assert(modulus <= (uintx)CodeEntryAlignment, "Alignment must be <= CodeEntryAlignment"); 1173 align(modulus, offset()); 1174 } 1175 1176 void MacroAssembler::align(uint modulus, uint target) { 1177 if (target % modulus != 0) { 1178 nop(modulus - (target % modulus)); 1179 } 1180 } 1181 1182 void MacroAssembler::push_f(XMMRegister r) { 1183 subptr(rsp, wordSize); 1184 movflt(Address(rsp, 0), r); 1185 } 1186 1187 void MacroAssembler::pop_f(XMMRegister r) { 1188 movflt(r, Address(rsp, 0)); 1189 addptr(rsp, wordSize); 1190 } 1191 1192 void MacroAssembler::push_d(XMMRegister r) { 1193 subptr(rsp, 2 * wordSize); 1194 movdbl(Address(rsp, 0), r); 1195 } 1196 1197 void MacroAssembler::pop_d(XMMRegister r) { 1198 movdbl(r, Address(rsp, 0)); 1199 addptr(rsp, 2 * Interpreter::stackElementSize); 1200 } 1201 1202 void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 1203 // Used in sign-masking with aligned address. 1204 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 1205 assert(rscratch != noreg || always_reachable(src), "missing"); 1206 1207 if (reachable(src)) { 1208 Assembler::andpd(dst, as_Address(src)); 1209 } else { 1210 lea(rscratch, src); 1211 Assembler::andpd(dst, Address(rscratch, 0)); 1212 } 1213 } 1214 1215 void MacroAssembler::andps(XMMRegister dst, AddressLiteral src, Register rscratch) { 1216 // Used in sign-masking with aligned address. 1217 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 1218 assert(rscratch != noreg || always_reachable(src), "missing"); 1219 1220 if (reachable(src)) { 1221 Assembler::andps(dst, as_Address(src)); 1222 } else { 1223 lea(rscratch, src); 1224 Assembler::andps(dst, Address(rscratch, 0)); 1225 } 1226 } 1227 1228 void MacroAssembler::andptr(Register dst, int32_t imm32) { 1229 LP64_ONLY(andq(dst, imm32)) NOT_LP64(andl(dst, imm32)); 1230 } 1231 1232 #ifdef _LP64 1233 void MacroAssembler::andq(Register dst, AddressLiteral src, Register rscratch) { 1234 assert(rscratch != noreg || always_reachable(src), "missing"); 1235 1236 if (reachable(src)) { 1237 andq(dst, as_Address(src)); 1238 } else { 1239 lea(rscratch, src); 1240 andq(dst, Address(rscratch, 0)); 1241 } 1242 } 1243 #endif 1244 1245 void MacroAssembler::atomic_incl(Address counter_addr) { 1246 lock(); 1247 incrementl(counter_addr); 1248 } 1249 1250 void MacroAssembler::atomic_incl(AddressLiteral counter_addr, Register rscratch) { 1251 assert(rscratch != noreg || always_reachable(counter_addr), "missing"); 1252 1253 if (reachable(counter_addr)) { 1254 atomic_incl(as_Address(counter_addr)); 1255 } else { 1256 lea(rscratch, counter_addr); 1257 atomic_incl(Address(rscratch, 0)); 1258 } 1259 } 1260 1261 #ifdef _LP64 1262 void MacroAssembler::atomic_incq(Address counter_addr) { 1263 lock(); 1264 incrementq(counter_addr); 1265 } 1266 1267 void MacroAssembler::atomic_incq(AddressLiteral counter_addr, Register rscratch) { 1268 assert(rscratch != noreg || always_reachable(counter_addr), "missing"); 1269 1270 if (reachable(counter_addr)) { 1271 atomic_incq(as_Address(counter_addr)); 1272 } else { 1273 lea(rscratch, counter_addr); 1274 atomic_incq(Address(rscratch, 0)); 1275 } 1276 } 1277 #endif 1278 1279 // Writes to stack successive pages until offset reached to check for 1280 // stack overflow + shadow pages. This clobbers tmp. 1281 void MacroAssembler::bang_stack_size(Register size, Register tmp) { 1282 movptr(tmp, rsp); 1283 // Bang stack for total size given plus shadow page size. 1284 // Bang one page at a time because large size can bang beyond yellow and 1285 // red zones. 1286 Label loop; 1287 bind(loop); 1288 movl(Address(tmp, (-(int)os::vm_page_size())), size ); 1289 subptr(tmp, (int)os::vm_page_size()); 1290 subl(size, (int)os::vm_page_size()); 1291 jcc(Assembler::greater, loop); 1292 1293 // Bang down shadow pages too. 1294 // At this point, (tmp-0) is the last address touched, so don't 1295 // touch it again. (It was touched as (tmp-pagesize) but then tmp 1296 // was post-decremented.) Skip this address by starting at i=1, and 1297 // touch a few more pages below. N.B. It is important to touch all 1298 // the way down including all pages in the shadow zone. 1299 for (int i = 1; i < ((int)StackOverflow::stack_shadow_zone_size() / (int)os::vm_page_size()); i++) { 1300 // this could be any sized move but this is can be a debugging crumb 1301 // so the bigger the better. 1302 movptr(Address(tmp, (-i*(int)os::vm_page_size())), size ); 1303 } 1304 } 1305 1306 void MacroAssembler::reserved_stack_check() { 1307 // testing if reserved zone needs to be enabled 1308 Label no_reserved_zone_enabling; 1309 Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); 1310 NOT_LP64(get_thread(rsi);) 1311 1312 cmpptr(rsp, Address(thread, JavaThread::reserved_stack_activation_offset())); 1313 jcc(Assembler::below, no_reserved_zone_enabling); 1314 1315 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), thread); 1316 jump(RuntimeAddress(SharedRuntime::throw_delayed_StackOverflowError_entry())); 1317 should_not_reach_here(); 1318 1319 bind(no_reserved_zone_enabling); 1320 } 1321 1322 void MacroAssembler::c2bool(Register x) { 1323 // implements x == 0 ? 0 : 1 1324 // note: must only look at least-significant byte of x 1325 // since C-style booleans are stored in one byte 1326 // only! (was bug) 1327 andl(x, 0xFF); 1328 setb(Assembler::notZero, x); 1329 } 1330 1331 // Wouldn't need if AddressLiteral version had new name 1332 void MacroAssembler::call(Label& L, relocInfo::relocType rtype) { 1333 Assembler::call(L, rtype); 1334 } 1335 1336 void MacroAssembler::call(Register entry) { 1337 Assembler::call(entry); 1338 } 1339 1340 void MacroAssembler::call(AddressLiteral entry, Register rscratch) { 1341 assert(rscratch != noreg || always_reachable(entry), "missing"); 1342 1343 if (reachable(entry)) { 1344 Assembler::call_literal(entry.target(), entry.rspec()); 1345 } else { 1346 lea(rscratch, entry); 1347 Assembler::call(rscratch); 1348 } 1349 } 1350 1351 void MacroAssembler::ic_call(address entry, jint method_index) { 1352 RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index); 1353 #ifdef _LP64 1354 // Needs full 64-bit immediate for later patching. 1355 mov64(rax, (int64_t)Universe::non_oop_word()); 1356 #else 1357 movptr(rax, (intptr_t)Universe::non_oop_word()); 1358 #endif 1359 call(AddressLiteral(entry, rh)); 1360 } 1361 1362 int MacroAssembler::ic_check_size() { 1363 return LP64_ONLY(14) NOT_LP64(12); 1364 } 1365 1366 int MacroAssembler::ic_check(int end_alignment) { 1367 Register receiver = LP64_ONLY(j_rarg0) NOT_LP64(rcx); 1368 Register data = rax; 1369 Register temp = LP64_ONLY(rscratch1) NOT_LP64(rbx); 1370 1371 // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed 1372 // before the inline cache check, so we don't have to execute any nop instructions when dispatching 1373 // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align 1374 // before the inline cache check here, and not after 1375 align(end_alignment, offset() + ic_check_size()); 1376 1377 int uep_offset = offset(); 1378 1379 if (UseCompressedClassPointers) { 1380 movl(temp, Address(receiver, oopDesc::klass_offset_in_bytes())); 1381 cmpl(temp, Address(data, CompiledICData::speculated_klass_offset())); 1382 } else { 1383 movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes())); 1384 cmpptr(temp, Address(data, CompiledICData::speculated_klass_offset())); 1385 } 1386 1387 // if inline cache check fails, then jump to runtime routine 1388 jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 1389 assert((offset() % end_alignment) == 0, "Misaligned verified entry point"); 1390 1391 return uep_offset; 1392 } 1393 1394 void MacroAssembler::emit_static_call_stub() { 1395 // Static stub relocation also tags the Method* in the code-stream. 1396 mov_metadata(rbx, (Metadata*) nullptr); // Method is zapped till fixup time. 1397 // This is recognized as unresolved by relocs/nativeinst/ic code. 1398 jump(RuntimeAddress(pc())); 1399 } 1400 1401 // Implementation of call_VM versions 1402 1403 void MacroAssembler::call_VM(Register oop_result, 1404 address entry_point, 1405 bool check_exceptions) { 1406 Label C, E; 1407 call(C, relocInfo::none); 1408 jmp(E); 1409 1410 bind(C); 1411 call_VM_helper(oop_result, entry_point, 0, check_exceptions); 1412 ret(0); 1413 1414 bind(E); 1415 } 1416 1417 void MacroAssembler::call_VM(Register oop_result, 1418 address entry_point, 1419 Register arg_1, 1420 bool check_exceptions) { 1421 Label C, E; 1422 call(C, relocInfo::none); 1423 jmp(E); 1424 1425 bind(C); 1426 pass_arg1(this, arg_1); 1427 call_VM_helper(oop_result, entry_point, 1, check_exceptions); 1428 ret(0); 1429 1430 bind(E); 1431 } 1432 1433 void MacroAssembler::call_VM(Register oop_result, 1434 address entry_point, 1435 Register arg_1, 1436 Register arg_2, 1437 bool check_exceptions) { 1438 Label C, E; 1439 call(C, relocInfo::none); 1440 jmp(E); 1441 1442 bind(C); 1443 1444 LP64_ONLY(assert_different_registers(arg_1, c_rarg2)); 1445 1446 pass_arg2(this, arg_2); 1447 pass_arg1(this, arg_1); 1448 call_VM_helper(oop_result, entry_point, 2, check_exceptions); 1449 ret(0); 1450 1451 bind(E); 1452 } 1453 1454 void MacroAssembler::call_VM(Register oop_result, 1455 address entry_point, 1456 Register arg_1, 1457 Register arg_2, 1458 Register arg_3, 1459 bool check_exceptions) { 1460 Label C, E; 1461 call(C, relocInfo::none); 1462 jmp(E); 1463 1464 bind(C); 1465 1466 LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3)); 1467 LP64_ONLY(assert_different_registers(arg_2, c_rarg3)); 1468 pass_arg3(this, arg_3); 1469 pass_arg2(this, arg_2); 1470 pass_arg1(this, arg_1); 1471 call_VM_helper(oop_result, entry_point, 3, check_exceptions); 1472 ret(0); 1473 1474 bind(E); 1475 } 1476 1477 void MacroAssembler::call_VM(Register oop_result, 1478 Register last_java_sp, 1479 address entry_point, 1480 int number_of_arguments, 1481 bool check_exceptions) { 1482 Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg); 1483 call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions); 1484 } 1485 1486 void MacroAssembler::call_VM(Register oop_result, 1487 Register last_java_sp, 1488 address entry_point, 1489 Register arg_1, 1490 bool check_exceptions) { 1491 pass_arg1(this, arg_1); 1492 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 1493 } 1494 1495 void MacroAssembler::call_VM(Register oop_result, 1496 Register last_java_sp, 1497 address entry_point, 1498 Register arg_1, 1499 Register arg_2, 1500 bool check_exceptions) { 1501 1502 LP64_ONLY(assert_different_registers(arg_1, c_rarg2)); 1503 pass_arg2(this, arg_2); 1504 pass_arg1(this, arg_1); 1505 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 1506 } 1507 1508 void MacroAssembler::call_VM(Register oop_result, 1509 Register last_java_sp, 1510 address entry_point, 1511 Register arg_1, 1512 Register arg_2, 1513 Register arg_3, 1514 bool check_exceptions) { 1515 LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3)); 1516 LP64_ONLY(assert_different_registers(arg_2, c_rarg3)); 1517 pass_arg3(this, arg_3); 1518 pass_arg2(this, arg_2); 1519 pass_arg1(this, arg_1); 1520 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 1521 } 1522 1523 void MacroAssembler::super_call_VM(Register oop_result, 1524 Register last_java_sp, 1525 address entry_point, 1526 int number_of_arguments, 1527 bool check_exceptions) { 1528 Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg); 1529 MacroAssembler::call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions); 1530 } 1531 1532 void MacroAssembler::super_call_VM(Register oop_result, 1533 Register last_java_sp, 1534 address entry_point, 1535 Register arg_1, 1536 bool check_exceptions) { 1537 pass_arg1(this, arg_1); 1538 super_call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 1539 } 1540 1541 void MacroAssembler::super_call_VM(Register oop_result, 1542 Register last_java_sp, 1543 address entry_point, 1544 Register arg_1, 1545 Register arg_2, 1546 bool check_exceptions) { 1547 1548 LP64_ONLY(assert_different_registers(arg_1, c_rarg2)); 1549 pass_arg2(this, arg_2); 1550 pass_arg1(this, arg_1); 1551 super_call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 1552 } 1553 1554 void MacroAssembler::super_call_VM(Register oop_result, 1555 Register last_java_sp, 1556 address entry_point, 1557 Register arg_1, 1558 Register arg_2, 1559 Register arg_3, 1560 bool check_exceptions) { 1561 LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3)); 1562 LP64_ONLY(assert_different_registers(arg_2, c_rarg3)); 1563 pass_arg3(this, arg_3); 1564 pass_arg2(this, arg_2); 1565 pass_arg1(this, arg_1); 1566 super_call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 1567 } 1568 1569 void MacroAssembler::call_VM_base(Register oop_result, 1570 Register java_thread, 1571 Register last_java_sp, 1572 address entry_point, 1573 int number_of_arguments, 1574 bool check_exceptions) { 1575 // determine java_thread register 1576 if (!java_thread->is_valid()) { 1577 #ifdef _LP64 1578 java_thread = r15_thread; 1579 #else 1580 java_thread = rdi; 1581 get_thread(java_thread); 1582 #endif // LP64 1583 } 1584 // determine last_java_sp register 1585 if (!last_java_sp->is_valid()) { 1586 last_java_sp = rsp; 1587 } 1588 // debugging support 1589 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 1590 LP64_ONLY(assert(java_thread == r15_thread, "unexpected register")); 1591 #ifdef ASSERT 1592 // TraceBytecodes does not use r12 but saves it over the call, so don't verify 1593 // r12 is the heapbase. 1594 LP64_ONLY(if (UseCompressedOops && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");) 1595 #endif // ASSERT 1596 1597 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"); 1598 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp"); 1599 1600 // push java thread (becomes first argument of C function) 1601 1602 NOT_LP64(push(java_thread); number_of_arguments++); 1603 LP64_ONLY(mov(c_rarg0, r15_thread)); 1604 1605 // set last Java frame before call 1606 assert(last_java_sp != rbp, "can't use ebp/rbp"); 1607 1608 // Only interpreter should have to set fp 1609 set_last_Java_frame(java_thread, last_java_sp, rbp, nullptr, rscratch1); 1610 1611 // do the call, remove parameters 1612 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments); 1613 1614 // restore the thread (cannot use the pushed argument since arguments 1615 // may be overwritten by C code generated by an optimizing compiler); 1616 // however can use the register value directly if it is callee saved. 1617 if (LP64_ONLY(true ||) java_thread == rdi || java_thread == rsi) { 1618 // rdi & rsi (also r15) are callee saved -> nothing to do 1619 #ifdef ASSERT 1620 guarantee(java_thread != rax, "change this code"); 1621 push(rax); 1622 { Label L; 1623 get_thread(rax); 1624 cmpptr(java_thread, rax); 1625 jcc(Assembler::equal, L); 1626 STOP("MacroAssembler::call_VM_base: rdi not callee saved?"); 1627 bind(L); 1628 } 1629 pop(rax); 1630 #endif 1631 } else { 1632 get_thread(java_thread); 1633 } 1634 // reset last Java frame 1635 // Only interpreter should have to clear fp 1636 reset_last_Java_frame(java_thread, true); 1637 1638 // C++ interp handles this in the interpreter 1639 check_and_handle_popframe(java_thread); 1640 check_and_handle_earlyret(java_thread); 1641 1642 if (check_exceptions) { 1643 // check for pending exceptions (java_thread is set upon return) 1644 cmpptr(Address(java_thread, Thread::pending_exception_offset()), NULL_WORD); 1645 #ifndef _LP64 1646 jump_cc(Assembler::notEqual, 1647 RuntimeAddress(StubRoutines::forward_exception_entry())); 1648 #else 1649 // This used to conditionally jump to forward_exception however it is 1650 // possible if we relocate that the branch will not reach. So we must jump 1651 // around so we can always reach 1652 1653 Label ok; 1654 jcc(Assembler::equal, ok); 1655 jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 1656 bind(ok); 1657 #endif // LP64 1658 } 1659 1660 // get oop result if there is one and reset the value in the thread 1661 if (oop_result->is_valid()) { 1662 get_vm_result(oop_result, java_thread); 1663 } 1664 } 1665 1666 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 1667 1668 // Calculate the value for last_Java_sp 1669 // somewhat subtle. call_VM does an intermediate call 1670 // which places a return address on the stack just under the 1671 // stack pointer as the user finished with it. This allows 1672 // use to retrieve last_Java_pc from last_Java_sp[-1]. 1673 // On 32bit we then have to push additional args on the stack to accomplish 1674 // the actual requested call. On 64bit call_VM only can use register args 1675 // so the only extra space is the return address that call_VM created. 1676 // This hopefully explains the calculations here. 1677 1678 #ifdef _LP64 1679 // We've pushed one address, correct last_Java_sp 1680 lea(rax, Address(rsp, wordSize)); 1681 #else 1682 lea(rax, Address(rsp, (1 + number_of_arguments) * wordSize)); 1683 #endif // LP64 1684 1685 call_VM_base(oop_result, noreg, rax, entry_point, number_of_arguments, check_exceptions); 1686 1687 } 1688 1689 // Use this method when MacroAssembler version of call_VM_leaf_base() should be called from Interpreter. 1690 void MacroAssembler::call_VM_leaf0(address entry_point) { 1691 MacroAssembler::call_VM_leaf_base(entry_point, 0); 1692 } 1693 1694 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) { 1695 call_VM_leaf_base(entry_point, number_of_arguments); 1696 } 1697 1698 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) { 1699 pass_arg0(this, arg_0); 1700 call_VM_leaf(entry_point, 1); 1701 } 1702 1703 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 1704 1705 LP64_ONLY(assert_different_registers(arg_0, c_rarg1)); 1706 pass_arg1(this, arg_1); 1707 pass_arg0(this, arg_0); 1708 call_VM_leaf(entry_point, 2); 1709 } 1710 1711 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { 1712 LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2)); 1713 LP64_ONLY(assert_different_registers(arg_1, c_rarg2)); 1714 pass_arg2(this, arg_2); 1715 pass_arg1(this, arg_1); 1716 pass_arg0(this, arg_0); 1717 call_VM_leaf(entry_point, 3); 1718 } 1719 1720 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { 1721 LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3)); 1722 LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3)); 1723 LP64_ONLY(assert_different_registers(arg_2, c_rarg3)); 1724 pass_arg3(this, arg_3); 1725 pass_arg2(this, arg_2); 1726 pass_arg1(this, arg_1); 1727 pass_arg0(this, arg_0); 1728 call_VM_leaf(entry_point, 3); 1729 } 1730 1731 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) { 1732 pass_arg0(this, arg_0); 1733 MacroAssembler::call_VM_leaf_base(entry_point, 1); 1734 } 1735 1736 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 1737 LP64_ONLY(assert_different_registers(arg_0, c_rarg1)); 1738 pass_arg1(this, arg_1); 1739 pass_arg0(this, arg_0); 1740 MacroAssembler::call_VM_leaf_base(entry_point, 2); 1741 } 1742 1743 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { 1744 LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2)); 1745 LP64_ONLY(assert_different_registers(arg_1, c_rarg2)); 1746 pass_arg2(this, arg_2); 1747 pass_arg1(this, arg_1); 1748 pass_arg0(this, arg_0); 1749 MacroAssembler::call_VM_leaf_base(entry_point, 3); 1750 } 1751 1752 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { 1753 LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3)); 1754 LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3)); 1755 LP64_ONLY(assert_different_registers(arg_2, c_rarg3)); 1756 pass_arg3(this, arg_3); 1757 pass_arg2(this, arg_2); 1758 pass_arg1(this, arg_1); 1759 pass_arg0(this, arg_0); 1760 MacroAssembler::call_VM_leaf_base(entry_point, 4); 1761 } 1762 1763 void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) { 1764 movptr(oop_result, Address(java_thread, JavaThread::vm_result_offset())); 1765 movptr(Address(java_thread, JavaThread::vm_result_offset()), NULL_WORD); 1766 verify_oop_msg(oop_result, "broken oop in call_VM_base"); 1767 } 1768 1769 void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) { 1770 movptr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset())); 1771 movptr(Address(java_thread, JavaThread::vm_result_2_offset()), NULL_WORD); 1772 } 1773 1774 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { 1775 } 1776 1777 void MacroAssembler::check_and_handle_popframe(Register java_thread) { 1778 } 1779 1780 void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm, Register rscratch) { 1781 assert(rscratch != noreg || always_reachable(src1), "missing"); 1782 1783 if (reachable(src1)) { 1784 cmpl(as_Address(src1), imm); 1785 } else { 1786 lea(rscratch, src1); 1787 cmpl(Address(rscratch, 0), imm); 1788 } 1789 } 1790 1791 void MacroAssembler::cmp32(Register src1, AddressLiteral src2, Register rscratch) { 1792 assert(!src2.is_lval(), "use cmpptr"); 1793 assert(rscratch != noreg || always_reachable(src2), "missing"); 1794 1795 if (reachable(src2)) { 1796 cmpl(src1, as_Address(src2)); 1797 } else { 1798 lea(rscratch, src2); 1799 cmpl(src1, Address(rscratch, 0)); 1800 } 1801 } 1802 1803 void MacroAssembler::cmp32(Register src1, int32_t imm) { 1804 Assembler::cmpl(src1, imm); 1805 } 1806 1807 void MacroAssembler::cmp32(Register src1, Address src2) { 1808 Assembler::cmpl(src1, src2); 1809 } 1810 1811 void MacroAssembler::cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) { 1812 ucomisd(opr1, opr2); 1813 1814 Label L; 1815 if (unordered_is_less) { 1816 movl(dst, -1); 1817 jcc(Assembler::parity, L); 1818 jcc(Assembler::below , L); 1819 movl(dst, 0); 1820 jcc(Assembler::equal , L); 1821 increment(dst); 1822 } else { // unordered is greater 1823 movl(dst, 1); 1824 jcc(Assembler::parity, L); 1825 jcc(Assembler::above , L); 1826 movl(dst, 0); 1827 jcc(Assembler::equal , L); 1828 decrementl(dst); 1829 } 1830 bind(L); 1831 } 1832 1833 void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) { 1834 ucomiss(opr1, opr2); 1835 1836 Label L; 1837 if (unordered_is_less) { 1838 movl(dst, -1); 1839 jcc(Assembler::parity, L); 1840 jcc(Assembler::below , L); 1841 movl(dst, 0); 1842 jcc(Assembler::equal , L); 1843 increment(dst); 1844 } else { // unordered is greater 1845 movl(dst, 1); 1846 jcc(Assembler::parity, L); 1847 jcc(Assembler::above , L); 1848 movl(dst, 0); 1849 jcc(Assembler::equal , L); 1850 decrementl(dst); 1851 } 1852 bind(L); 1853 } 1854 1855 1856 void MacroAssembler::cmp8(AddressLiteral src1, int imm, Register rscratch) { 1857 assert(rscratch != noreg || always_reachable(src1), "missing"); 1858 1859 if (reachable(src1)) { 1860 cmpb(as_Address(src1), imm); 1861 } else { 1862 lea(rscratch, src1); 1863 cmpb(Address(rscratch, 0), imm); 1864 } 1865 } 1866 1867 void MacroAssembler::cmpptr(Register src1, AddressLiteral src2, Register rscratch) { 1868 #ifdef _LP64 1869 assert(rscratch != noreg || always_reachable(src2), "missing"); 1870 1871 if (src2.is_lval()) { 1872 movptr(rscratch, src2); 1873 Assembler::cmpq(src1, rscratch); 1874 } else if (reachable(src2)) { 1875 cmpq(src1, as_Address(src2)); 1876 } else { 1877 lea(rscratch, src2); 1878 Assembler::cmpq(src1, Address(rscratch, 0)); 1879 } 1880 #else 1881 assert(rscratch == noreg, "not needed"); 1882 if (src2.is_lval()) { 1883 cmp_literal32(src1, (int32_t)src2.target(), src2.rspec()); 1884 } else { 1885 cmpl(src1, as_Address(src2)); 1886 } 1887 #endif // _LP64 1888 } 1889 1890 void MacroAssembler::cmpptr(Address src1, AddressLiteral src2, Register rscratch) { 1891 assert(src2.is_lval(), "not a mem-mem compare"); 1892 #ifdef _LP64 1893 // moves src2's literal address 1894 movptr(rscratch, src2); 1895 Assembler::cmpq(src1, rscratch); 1896 #else 1897 assert(rscratch == noreg, "not needed"); 1898 cmp_literal32(src1, (int32_t)src2.target(), src2.rspec()); 1899 #endif // _LP64 1900 } 1901 1902 void MacroAssembler::cmpoop(Register src1, Register src2) { 1903 cmpptr(src1, src2); 1904 } 1905 1906 void MacroAssembler::cmpoop(Register src1, Address src2) { 1907 cmpptr(src1, src2); 1908 } 1909 1910 #ifdef _LP64 1911 void MacroAssembler::cmpoop(Register src1, jobject src2, Register rscratch) { 1912 movoop(rscratch, src2); 1913 cmpptr(src1, rscratch); 1914 } 1915 #endif 1916 1917 void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch) { 1918 assert(rscratch != noreg || always_reachable(adr), "missing"); 1919 1920 if (reachable(adr)) { 1921 lock(); 1922 cmpxchgptr(reg, as_Address(adr)); 1923 } else { 1924 lea(rscratch, adr); 1925 lock(); 1926 cmpxchgptr(reg, Address(rscratch, 0)); 1927 } 1928 } 1929 1930 void MacroAssembler::cmpxchgptr(Register reg, Address adr) { 1931 LP64_ONLY(cmpxchgq(reg, adr)) NOT_LP64(cmpxchgl(reg, adr)); 1932 } 1933 1934 void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src, Register rscratch) { 1935 assert(rscratch != noreg || always_reachable(src), "missing"); 1936 1937 if (reachable(src)) { 1938 Assembler::comisd(dst, as_Address(src)); 1939 } else { 1940 lea(rscratch, src); 1941 Assembler::comisd(dst, Address(rscratch, 0)); 1942 } 1943 } 1944 1945 void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src, Register rscratch) { 1946 assert(rscratch != noreg || always_reachable(src), "missing"); 1947 1948 if (reachable(src)) { 1949 Assembler::comiss(dst, as_Address(src)); 1950 } else { 1951 lea(rscratch, src); 1952 Assembler::comiss(dst, Address(rscratch, 0)); 1953 } 1954 } 1955 1956 1957 void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr, Register rscratch) { 1958 assert(rscratch != noreg || always_reachable(counter_addr), "missing"); 1959 1960 Condition negated_cond = negate_condition(cond); 1961 Label L; 1962 jcc(negated_cond, L); 1963 pushf(); // Preserve flags 1964 atomic_incl(counter_addr, rscratch); 1965 popf(); 1966 bind(L); 1967 } 1968 1969 int MacroAssembler::corrected_idivl(Register reg) { 1970 // Full implementation of Java idiv and irem; checks for 1971 // special case as described in JVM spec., p.243 & p.271. 1972 // The function returns the (pc) offset of the idivl 1973 // instruction - may be needed for implicit exceptions. 1974 // 1975 // normal case special case 1976 // 1977 // input : rax,: dividend min_int 1978 // reg: divisor (may not be rax,/rdx) -1 1979 // 1980 // output: rax,: quotient (= rax, idiv reg) min_int 1981 // rdx: remainder (= rax, irem reg) 0 1982 assert(reg != rax && reg != rdx, "reg cannot be rax, or rdx register"); 1983 const int min_int = 0x80000000; 1984 Label normal_case, special_case; 1985 1986 // check for special case 1987 cmpl(rax, min_int); 1988 jcc(Assembler::notEqual, normal_case); 1989 xorl(rdx, rdx); // prepare rdx for possible special case (where remainder = 0) 1990 cmpl(reg, -1); 1991 jcc(Assembler::equal, special_case); 1992 1993 // handle normal case 1994 bind(normal_case); 1995 cdql(); 1996 int idivl_offset = offset(); 1997 idivl(reg); 1998 1999 // normal and special case exit 2000 bind(special_case); 2001 2002 return idivl_offset; 2003 } 2004 2005 2006 2007 void MacroAssembler::decrementl(Register reg, int value) { 2008 if (value == min_jint) {subl(reg, value) ; return; } 2009 if (value < 0) { incrementl(reg, -value); return; } 2010 if (value == 0) { ; return; } 2011 if (value == 1 && UseIncDec) { decl(reg) ; return; } 2012 /* else */ { subl(reg, value) ; return; } 2013 } 2014 2015 void MacroAssembler::decrementl(Address dst, int value) { 2016 if (value == min_jint) {subl(dst, value) ; return; } 2017 if (value < 0) { incrementl(dst, -value); return; } 2018 if (value == 0) { ; return; } 2019 if (value == 1 && UseIncDec) { decl(dst) ; return; } 2020 /* else */ { subl(dst, value) ; return; } 2021 } 2022 2023 void MacroAssembler::division_with_shift (Register reg, int shift_value) { 2024 assert(shift_value > 0, "illegal shift value"); 2025 Label _is_positive; 2026 testl (reg, reg); 2027 jcc (Assembler::positive, _is_positive); 2028 int offset = (1 << shift_value) - 1 ; 2029 2030 if (offset == 1) { 2031 incrementl(reg); 2032 } else { 2033 addl(reg, offset); 2034 } 2035 2036 bind (_is_positive); 2037 sarl(reg, shift_value); 2038 } 2039 2040 void MacroAssembler::divsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2041 assert(rscratch != noreg || always_reachable(src), "missing"); 2042 2043 if (reachable(src)) { 2044 Assembler::divsd(dst, as_Address(src)); 2045 } else { 2046 lea(rscratch, src); 2047 Assembler::divsd(dst, Address(rscratch, 0)); 2048 } 2049 } 2050 2051 void MacroAssembler::divss(XMMRegister dst, AddressLiteral src, Register rscratch) { 2052 assert(rscratch != noreg || always_reachable(src), "missing"); 2053 2054 if (reachable(src)) { 2055 Assembler::divss(dst, as_Address(src)); 2056 } else { 2057 lea(rscratch, src); 2058 Assembler::divss(dst, Address(rscratch, 0)); 2059 } 2060 } 2061 2062 void MacroAssembler::enter() { 2063 push(rbp); 2064 mov(rbp, rsp); 2065 } 2066 2067 void MacroAssembler::post_call_nop() { 2068 if (!Continuations::enabled()) { 2069 return; 2070 } 2071 InstructionMark im(this); 2072 relocate(post_call_nop_Relocation::spec()); 2073 InlineSkippedInstructionsCounter skipCounter(this); 2074 emit_int8((uint8_t)0x0f); 2075 emit_int8((uint8_t)0x1f); 2076 emit_int8((uint8_t)0x84); 2077 emit_int8((uint8_t)0x00); 2078 emit_int32(0x00); 2079 } 2080 2081 // A 5 byte nop that is safe for patching (see patch_verified_entry) 2082 void MacroAssembler::fat_nop() { 2083 if (UseAddressNop) { 2084 addr_nop_5(); 2085 } else { 2086 emit_int8((uint8_t)0x26); // es: 2087 emit_int8((uint8_t)0x2e); // cs: 2088 emit_int8((uint8_t)0x64); // fs: 2089 emit_int8((uint8_t)0x65); // gs: 2090 emit_int8((uint8_t)0x90); 2091 } 2092 } 2093 2094 #ifndef _LP64 2095 void MacroAssembler::fcmp(Register tmp) { 2096 fcmp(tmp, 1, true, true); 2097 } 2098 2099 void MacroAssembler::fcmp(Register tmp, int index, bool pop_left, bool pop_right) { 2100 assert(!pop_right || pop_left, "usage error"); 2101 if (VM_Version::supports_cmov()) { 2102 assert(tmp == noreg, "unneeded temp"); 2103 if (pop_left) { 2104 fucomip(index); 2105 } else { 2106 fucomi(index); 2107 } 2108 if (pop_right) { 2109 fpop(); 2110 } 2111 } else { 2112 assert(tmp != noreg, "need temp"); 2113 if (pop_left) { 2114 if (pop_right) { 2115 fcompp(); 2116 } else { 2117 fcomp(index); 2118 } 2119 } else { 2120 fcom(index); 2121 } 2122 // convert FPU condition into eflags condition via rax, 2123 save_rax(tmp); 2124 fwait(); fnstsw_ax(); 2125 sahf(); 2126 restore_rax(tmp); 2127 } 2128 // condition codes set as follows: 2129 // 2130 // CF (corresponds to C0) if x < y 2131 // PF (corresponds to C2) if unordered 2132 // ZF (corresponds to C3) if x = y 2133 } 2134 2135 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less) { 2136 fcmp2int(dst, unordered_is_less, 1, true, true); 2137 } 2138 2139 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right) { 2140 fcmp(VM_Version::supports_cmov() ? noreg : dst, index, pop_left, pop_right); 2141 Label L; 2142 if (unordered_is_less) { 2143 movl(dst, -1); 2144 jcc(Assembler::parity, L); 2145 jcc(Assembler::below , L); 2146 movl(dst, 0); 2147 jcc(Assembler::equal , L); 2148 increment(dst); 2149 } else { // unordered is greater 2150 movl(dst, 1); 2151 jcc(Assembler::parity, L); 2152 jcc(Assembler::above , L); 2153 movl(dst, 0); 2154 jcc(Assembler::equal , L); 2155 decrementl(dst); 2156 } 2157 bind(L); 2158 } 2159 2160 void MacroAssembler::fld_d(AddressLiteral src) { 2161 fld_d(as_Address(src)); 2162 } 2163 2164 void MacroAssembler::fld_s(AddressLiteral src) { 2165 fld_s(as_Address(src)); 2166 } 2167 2168 void MacroAssembler::fldcw(AddressLiteral src) { 2169 fldcw(as_Address(src)); 2170 } 2171 2172 void MacroAssembler::fpop() { 2173 ffree(); 2174 fincstp(); 2175 } 2176 2177 void MacroAssembler::fremr(Register tmp) { 2178 save_rax(tmp); 2179 { Label L; 2180 bind(L); 2181 fprem(); 2182 fwait(); fnstsw_ax(); 2183 sahf(); 2184 jcc(Assembler::parity, L); 2185 } 2186 restore_rax(tmp); 2187 // Result is in ST0. 2188 // Note: fxch & fpop to get rid of ST1 2189 // (otherwise FPU stack could overflow eventually) 2190 fxch(1); 2191 fpop(); 2192 } 2193 2194 void MacroAssembler::empty_FPU_stack() { 2195 if (VM_Version::supports_mmx()) { 2196 emms(); 2197 } else { 2198 for (int i = 8; i-- > 0; ) ffree(i); 2199 } 2200 } 2201 #endif // !LP64 2202 2203 void MacroAssembler::mulpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2204 assert(rscratch != noreg || always_reachable(src), "missing"); 2205 if (reachable(src)) { 2206 Assembler::mulpd(dst, as_Address(src)); 2207 } else { 2208 lea(rscratch, src); 2209 Assembler::mulpd(dst, Address(rscratch, 0)); 2210 } 2211 } 2212 2213 void MacroAssembler::load_float(Address src) { 2214 #ifdef _LP64 2215 movflt(xmm0, src); 2216 #else 2217 if (UseSSE >= 1) { 2218 movflt(xmm0, src); 2219 } else { 2220 fld_s(src); 2221 } 2222 #endif // LP64 2223 } 2224 2225 void MacroAssembler::store_float(Address dst) { 2226 #ifdef _LP64 2227 movflt(dst, xmm0); 2228 #else 2229 if (UseSSE >= 1) { 2230 movflt(dst, xmm0); 2231 } else { 2232 fstp_s(dst); 2233 } 2234 #endif // LP64 2235 } 2236 2237 void MacroAssembler::load_double(Address src) { 2238 #ifdef _LP64 2239 movdbl(xmm0, src); 2240 #else 2241 if (UseSSE >= 2) { 2242 movdbl(xmm0, src); 2243 } else { 2244 fld_d(src); 2245 } 2246 #endif // LP64 2247 } 2248 2249 void MacroAssembler::store_double(Address dst) { 2250 #ifdef _LP64 2251 movdbl(dst, xmm0); 2252 #else 2253 if (UseSSE >= 2) { 2254 movdbl(dst, xmm0); 2255 } else { 2256 fstp_d(dst); 2257 } 2258 #endif // LP64 2259 } 2260 2261 // dst = c = a * b + c 2262 void MacroAssembler::fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c) { 2263 Assembler::vfmadd231sd(c, a, b); 2264 if (dst != c) { 2265 movdbl(dst, c); 2266 } 2267 } 2268 2269 // dst = c = a * b + c 2270 void MacroAssembler::fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c) { 2271 Assembler::vfmadd231ss(c, a, b); 2272 if (dst != c) { 2273 movflt(dst, c); 2274 } 2275 } 2276 2277 // dst = c = a * b + c 2278 void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) { 2279 Assembler::vfmadd231pd(c, a, b, vector_len); 2280 if (dst != c) { 2281 vmovdqu(dst, c); 2282 } 2283 } 2284 2285 // dst = c = a * b + c 2286 void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) { 2287 Assembler::vfmadd231ps(c, a, b, vector_len); 2288 if (dst != c) { 2289 vmovdqu(dst, c); 2290 } 2291 } 2292 2293 // dst = c = a * b + c 2294 void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) { 2295 Assembler::vfmadd231pd(c, a, b, vector_len); 2296 if (dst != c) { 2297 vmovdqu(dst, c); 2298 } 2299 } 2300 2301 // dst = c = a * b + c 2302 void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) { 2303 Assembler::vfmadd231ps(c, a, b, vector_len); 2304 if (dst != c) { 2305 vmovdqu(dst, c); 2306 } 2307 } 2308 2309 void MacroAssembler::incrementl(AddressLiteral dst, Register rscratch) { 2310 assert(rscratch != noreg || always_reachable(dst), "missing"); 2311 2312 if (reachable(dst)) { 2313 incrementl(as_Address(dst)); 2314 } else { 2315 lea(rscratch, dst); 2316 incrementl(Address(rscratch, 0)); 2317 } 2318 } 2319 2320 void MacroAssembler::incrementl(ArrayAddress dst, Register rscratch) { 2321 incrementl(as_Address(dst, rscratch)); 2322 } 2323 2324 void MacroAssembler::incrementl(Register reg, int value) { 2325 if (value == min_jint) {addl(reg, value) ; return; } 2326 if (value < 0) { decrementl(reg, -value); return; } 2327 if (value == 0) { ; return; } 2328 if (value == 1 && UseIncDec) { incl(reg) ; return; } 2329 /* else */ { addl(reg, value) ; return; } 2330 } 2331 2332 void MacroAssembler::incrementl(Address dst, int value) { 2333 if (value == min_jint) {addl(dst, value) ; return; } 2334 if (value < 0) { decrementl(dst, -value); return; } 2335 if (value == 0) { ; return; } 2336 if (value == 1 && UseIncDec) { incl(dst) ; return; } 2337 /* else */ { addl(dst, value) ; return; } 2338 } 2339 2340 void MacroAssembler::jump(AddressLiteral dst, Register rscratch) { 2341 assert(rscratch != noreg || always_reachable(dst), "missing"); 2342 assert(!dst.rspec().reloc()->is_data(), "should not use ExternalAddress for jump"); 2343 if (reachable(dst)) { 2344 jmp_literal(dst.target(), dst.rspec()); 2345 } else { 2346 lea(rscratch, dst); 2347 jmp(rscratch); 2348 } 2349 } 2350 2351 void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst, Register rscratch) { 2352 assert(rscratch != noreg || always_reachable(dst), "missing"); 2353 assert(!dst.rspec().reloc()->is_data(), "should not use ExternalAddress for jump_cc"); 2354 if (reachable(dst)) { 2355 InstructionMark im(this); 2356 relocate(dst.reloc()); 2357 const int short_size = 2; 2358 const int long_size = 6; 2359 int offs = (intptr_t)dst.target() - ((intptr_t)pc()); 2360 if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) { 2361 // 0111 tttn #8-bit disp 2362 emit_int8(0x70 | cc); 2363 emit_int8((offs - short_size) & 0xFF); 2364 } else { 2365 // 0000 1111 1000 tttn #32-bit disp 2366 emit_int8(0x0F); 2367 emit_int8((unsigned char)(0x80 | cc)); 2368 emit_int32(offs - long_size); 2369 } 2370 } else { 2371 #ifdef ASSERT 2372 warning("reversing conditional branch"); 2373 #endif /* ASSERT */ 2374 Label skip; 2375 jccb(reverse[cc], skip); 2376 lea(rscratch, dst); 2377 Assembler::jmp(rscratch); 2378 bind(skip); 2379 } 2380 } 2381 2382 void MacroAssembler::ldmxcsr(AddressLiteral src, Register rscratch) { 2383 assert(rscratch != noreg || always_reachable(src), "missing"); 2384 2385 if (reachable(src)) { 2386 Assembler::ldmxcsr(as_Address(src)); 2387 } else { 2388 lea(rscratch, src); 2389 Assembler::ldmxcsr(Address(rscratch, 0)); 2390 } 2391 } 2392 2393 int MacroAssembler::load_signed_byte(Register dst, Address src) { 2394 int off; 2395 if (LP64_ONLY(true ||) VM_Version::is_P6()) { 2396 off = offset(); 2397 movsbl(dst, src); // movsxb 2398 } else { 2399 off = load_unsigned_byte(dst, src); 2400 shll(dst, 24); 2401 sarl(dst, 24); 2402 } 2403 return off; 2404 } 2405 2406 // Note: load_signed_short used to be called load_signed_word. 2407 // Although the 'w' in x86 opcodes refers to the term "word" in the assembler 2408 // manual, which means 16 bits, that usage is found nowhere in HotSpot code. 2409 // The term "word" in HotSpot means a 32- or 64-bit machine word. 2410 int MacroAssembler::load_signed_short(Register dst, Address src) { 2411 int off; 2412 if (LP64_ONLY(true ||) VM_Version::is_P6()) { 2413 // This is dubious to me since it seems safe to do a signed 16 => 64 bit 2414 // version but this is what 64bit has always done. This seems to imply 2415 // that users are only using 32bits worth. 2416 off = offset(); 2417 movswl(dst, src); // movsxw 2418 } else { 2419 off = load_unsigned_short(dst, src); 2420 shll(dst, 16); 2421 sarl(dst, 16); 2422 } 2423 return off; 2424 } 2425 2426 int MacroAssembler::load_unsigned_byte(Register dst, Address src) { 2427 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16, 2428 // and "3.9 Partial Register Penalties", p. 22). 2429 int off; 2430 if (LP64_ONLY(true || ) VM_Version::is_P6() || src.uses(dst)) { 2431 off = offset(); 2432 movzbl(dst, src); // movzxb 2433 } else { 2434 xorl(dst, dst); 2435 off = offset(); 2436 movb(dst, src); 2437 } 2438 return off; 2439 } 2440 2441 // Note: load_unsigned_short used to be called load_unsigned_word. 2442 int MacroAssembler::load_unsigned_short(Register dst, Address src) { 2443 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16, 2444 // and "3.9 Partial Register Penalties", p. 22). 2445 int off; 2446 if (LP64_ONLY(true ||) VM_Version::is_P6() || src.uses(dst)) { 2447 off = offset(); 2448 movzwl(dst, src); // movzxw 2449 } else { 2450 xorl(dst, dst); 2451 off = offset(); 2452 movw(dst, src); 2453 } 2454 return off; 2455 } 2456 2457 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) { 2458 switch (size_in_bytes) { 2459 #ifndef _LP64 2460 case 8: 2461 assert(dst2 != noreg, "second dest register required"); 2462 movl(dst, src); 2463 movl(dst2, src.plus_disp(BytesPerInt)); 2464 break; 2465 #else 2466 case 8: movq(dst, src); break; 2467 #endif 2468 case 4: movl(dst, src); break; 2469 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break; 2470 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break; 2471 default: ShouldNotReachHere(); 2472 } 2473 } 2474 2475 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) { 2476 switch (size_in_bytes) { 2477 #ifndef _LP64 2478 case 8: 2479 assert(src2 != noreg, "second source register required"); 2480 movl(dst, src); 2481 movl(dst.plus_disp(BytesPerInt), src2); 2482 break; 2483 #else 2484 case 8: movq(dst, src); break; 2485 #endif 2486 case 4: movl(dst, src); break; 2487 case 2: movw(dst, src); break; 2488 case 1: movb(dst, src); break; 2489 default: ShouldNotReachHere(); 2490 } 2491 } 2492 2493 void MacroAssembler::mov32(AddressLiteral dst, Register src, Register rscratch) { 2494 assert(rscratch != noreg || always_reachable(dst), "missing"); 2495 2496 if (reachable(dst)) { 2497 movl(as_Address(dst), src); 2498 } else { 2499 lea(rscratch, dst); 2500 movl(Address(rscratch, 0), src); 2501 } 2502 } 2503 2504 void MacroAssembler::mov32(Register dst, AddressLiteral src) { 2505 if (reachable(src)) { 2506 movl(dst, as_Address(src)); 2507 } else { 2508 lea(dst, src); 2509 movl(dst, Address(dst, 0)); 2510 } 2511 } 2512 2513 // C++ bool manipulation 2514 2515 void MacroAssembler::movbool(Register dst, Address src) { 2516 if(sizeof(bool) == 1) 2517 movb(dst, src); 2518 else if(sizeof(bool) == 2) 2519 movw(dst, src); 2520 else if(sizeof(bool) == 4) 2521 movl(dst, src); 2522 else 2523 // unsupported 2524 ShouldNotReachHere(); 2525 } 2526 2527 void MacroAssembler::movbool(Address dst, bool boolconst) { 2528 if(sizeof(bool) == 1) 2529 movb(dst, (int) boolconst); 2530 else if(sizeof(bool) == 2) 2531 movw(dst, (int) boolconst); 2532 else if(sizeof(bool) == 4) 2533 movl(dst, (int) boolconst); 2534 else 2535 // unsupported 2536 ShouldNotReachHere(); 2537 } 2538 2539 void MacroAssembler::movbool(Address dst, Register src) { 2540 if(sizeof(bool) == 1) 2541 movb(dst, src); 2542 else if(sizeof(bool) == 2) 2543 movw(dst, src); 2544 else if(sizeof(bool) == 4) 2545 movl(dst, src); 2546 else 2547 // unsupported 2548 ShouldNotReachHere(); 2549 } 2550 2551 void MacroAssembler::movdl(XMMRegister dst, AddressLiteral src, Register rscratch) { 2552 assert(rscratch != noreg || always_reachable(src), "missing"); 2553 2554 if (reachable(src)) { 2555 movdl(dst, as_Address(src)); 2556 } else { 2557 lea(rscratch, src); 2558 movdl(dst, Address(rscratch, 0)); 2559 } 2560 } 2561 2562 void MacroAssembler::movq(XMMRegister dst, AddressLiteral src, Register rscratch) { 2563 assert(rscratch != noreg || always_reachable(src), "missing"); 2564 2565 if (reachable(src)) { 2566 movq(dst, as_Address(src)); 2567 } else { 2568 lea(rscratch, src); 2569 movq(dst, Address(rscratch, 0)); 2570 } 2571 } 2572 2573 void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src, Register rscratch) { 2574 assert(rscratch != noreg || always_reachable(src), "missing"); 2575 2576 if (reachable(src)) { 2577 if (UseXmmLoadAndClearUpper) { 2578 movsd (dst, as_Address(src)); 2579 } else { 2580 movlpd(dst, as_Address(src)); 2581 } 2582 } else { 2583 lea(rscratch, src); 2584 if (UseXmmLoadAndClearUpper) { 2585 movsd (dst, Address(rscratch, 0)); 2586 } else { 2587 movlpd(dst, Address(rscratch, 0)); 2588 } 2589 } 2590 } 2591 2592 void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src, Register rscratch) { 2593 assert(rscratch != noreg || always_reachable(src), "missing"); 2594 2595 if (reachable(src)) { 2596 movss(dst, as_Address(src)); 2597 } else { 2598 lea(rscratch, src); 2599 movss(dst, Address(rscratch, 0)); 2600 } 2601 } 2602 2603 void MacroAssembler::movptr(Register dst, Register src) { 2604 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); 2605 } 2606 2607 void MacroAssembler::movptr(Register dst, Address src) { 2608 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); 2609 } 2610 2611 // src should NEVER be a real pointer. Use AddressLiteral for true pointers 2612 void MacroAssembler::movptr(Register dst, intptr_t src) { 2613 #ifdef _LP64 2614 if (is_uimm32(src)) { 2615 movl(dst, checked_cast<uint32_t>(src)); 2616 } else if (is_simm32(src)) { 2617 movq(dst, checked_cast<int32_t>(src)); 2618 } else { 2619 mov64(dst, src); 2620 } 2621 #else 2622 movl(dst, src); 2623 #endif 2624 } 2625 2626 void MacroAssembler::movptr(Address dst, Register src) { 2627 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); 2628 } 2629 2630 void MacroAssembler::movptr(Address dst, int32_t src) { 2631 LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); 2632 } 2633 2634 void MacroAssembler::movdqu(Address dst, XMMRegister src) { 2635 assert(((src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2636 Assembler::movdqu(dst, src); 2637 } 2638 2639 void MacroAssembler::movdqu(XMMRegister dst, Address src) { 2640 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2641 Assembler::movdqu(dst, src); 2642 } 2643 2644 void MacroAssembler::movdqu(XMMRegister dst, XMMRegister src) { 2645 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2646 Assembler::movdqu(dst, src); 2647 } 2648 2649 void MacroAssembler::movdqu(XMMRegister dst, AddressLiteral src, Register rscratch) { 2650 assert(rscratch != noreg || always_reachable(src), "missing"); 2651 2652 if (reachable(src)) { 2653 movdqu(dst, as_Address(src)); 2654 } else { 2655 lea(rscratch, src); 2656 movdqu(dst, Address(rscratch, 0)); 2657 } 2658 } 2659 2660 void MacroAssembler::vmovdqu(Address dst, XMMRegister src) { 2661 assert(((src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2662 Assembler::vmovdqu(dst, src); 2663 } 2664 2665 void MacroAssembler::vmovdqu(XMMRegister dst, Address src) { 2666 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2667 Assembler::vmovdqu(dst, src); 2668 } 2669 2670 void MacroAssembler::vmovdqu(XMMRegister dst, XMMRegister src) { 2671 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2672 Assembler::vmovdqu(dst, src); 2673 } 2674 2675 void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src, Register rscratch) { 2676 assert(rscratch != noreg || always_reachable(src), "missing"); 2677 2678 if (reachable(src)) { 2679 vmovdqu(dst, as_Address(src)); 2680 } 2681 else { 2682 lea(rscratch, src); 2683 vmovdqu(dst, Address(rscratch, 0)); 2684 } 2685 } 2686 2687 void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2688 assert(rscratch != noreg || always_reachable(src), "missing"); 2689 2690 if (vector_len == AVX_512bit) { 2691 evmovdquq(dst, src, AVX_512bit, rscratch); 2692 } else if (vector_len == AVX_256bit) { 2693 vmovdqu(dst, src, rscratch); 2694 } else { 2695 movdqu(dst, src, rscratch); 2696 } 2697 } 2698 2699 void MacroAssembler::kmov(KRegister dst, Address src) { 2700 if (VM_Version::supports_avx512bw()) { 2701 kmovql(dst, src); 2702 } else { 2703 assert(VM_Version::supports_evex(), ""); 2704 kmovwl(dst, src); 2705 } 2706 } 2707 2708 void MacroAssembler::kmov(Address dst, KRegister src) { 2709 if (VM_Version::supports_avx512bw()) { 2710 kmovql(dst, src); 2711 } else { 2712 assert(VM_Version::supports_evex(), ""); 2713 kmovwl(dst, src); 2714 } 2715 } 2716 2717 void MacroAssembler::kmov(KRegister dst, KRegister src) { 2718 if (VM_Version::supports_avx512bw()) { 2719 kmovql(dst, src); 2720 } else { 2721 assert(VM_Version::supports_evex(), ""); 2722 kmovwl(dst, src); 2723 } 2724 } 2725 2726 void MacroAssembler::kmov(Register dst, KRegister src) { 2727 if (VM_Version::supports_avx512bw()) { 2728 kmovql(dst, src); 2729 } else { 2730 assert(VM_Version::supports_evex(), ""); 2731 kmovwl(dst, src); 2732 } 2733 } 2734 2735 void MacroAssembler::kmov(KRegister dst, Register src) { 2736 if (VM_Version::supports_avx512bw()) { 2737 kmovql(dst, src); 2738 } else { 2739 assert(VM_Version::supports_evex(), ""); 2740 kmovwl(dst, src); 2741 } 2742 } 2743 2744 void MacroAssembler::kmovql(KRegister dst, AddressLiteral src, Register rscratch) { 2745 assert(rscratch != noreg || always_reachable(src), "missing"); 2746 2747 if (reachable(src)) { 2748 kmovql(dst, as_Address(src)); 2749 } else { 2750 lea(rscratch, src); 2751 kmovql(dst, Address(rscratch, 0)); 2752 } 2753 } 2754 2755 void MacroAssembler::kmovwl(KRegister dst, AddressLiteral src, Register rscratch) { 2756 assert(rscratch != noreg || always_reachable(src), "missing"); 2757 2758 if (reachable(src)) { 2759 kmovwl(dst, as_Address(src)); 2760 } else { 2761 lea(rscratch, src); 2762 kmovwl(dst, Address(rscratch, 0)); 2763 } 2764 } 2765 2766 void MacroAssembler::evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, 2767 int vector_len, Register rscratch) { 2768 assert(rscratch != noreg || always_reachable(src), "missing"); 2769 2770 if (reachable(src)) { 2771 Assembler::evmovdqub(dst, mask, as_Address(src), merge, vector_len); 2772 } else { 2773 lea(rscratch, src); 2774 Assembler::evmovdqub(dst, mask, Address(rscratch, 0), merge, vector_len); 2775 } 2776 } 2777 2778 void MacroAssembler::evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, 2779 int vector_len, Register rscratch) { 2780 assert(rscratch != noreg || always_reachable(src), "missing"); 2781 2782 if (reachable(src)) { 2783 Assembler::evmovdquw(dst, mask, as_Address(src), merge, vector_len); 2784 } else { 2785 lea(rscratch, src); 2786 Assembler::evmovdquw(dst, mask, Address(rscratch, 0), merge, vector_len); 2787 } 2788 } 2789 2790 void MacroAssembler::evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) { 2791 assert(rscratch != noreg || always_reachable(src), "missing"); 2792 2793 if (reachable(src)) { 2794 Assembler::evmovdqul(dst, mask, as_Address(src), merge, vector_len); 2795 } else { 2796 lea(rscratch, src); 2797 Assembler::evmovdqul(dst, mask, Address(rscratch, 0), merge, vector_len); 2798 } 2799 } 2800 2801 void MacroAssembler::evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) { 2802 assert(rscratch != noreg || always_reachable(src), "missing"); 2803 2804 if (reachable(src)) { 2805 Assembler::evmovdquq(dst, mask, as_Address(src), merge, vector_len); 2806 } else { 2807 lea(rscratch, src); 2808 Assembler::evmovdquq(dst, mask, Address(rscratch, 0), merge, vector_len); 2809 } 2810 } 2811 2812 void MacroAssembler::evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2813 assert(rscratch != noreg || always_reachable(src), "missing"); 2814 2815 if (reachable(src)) { 2816 Assembler::evmovdquq(dst, as_Address(src), vector_len); 2817 } else { 2818 lea(rscratch, src); 2819 Assembler::evmovdquq(dst, Address(rscratch, 0), vector_len); 2820 } 2821 } 2822 2823 void MacroAssembler::movdqa(XMMRegister dst, AddressLiteral src, Register rscratch) { 2824 assert(rscratch != noreg || always_reachable(src), "missing"); 2825 2826 if (reachable(src)) { 2827 Assembler::movdqa(dst, as_Address(src)); 2828 } else { 2829 lea(rscratch, src); 2830 Assembler::movdqa(dst, Address(rscratch, 0)); 2831 } 2832 } 2833 2834 void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2835 assert(rscratch != noreg || always_reachable(src), "missing"); 2836 2837 if (reachable(src)) { 2838 Assembler::movsd(dst, as_Address(src)); 2839 } else { 2840 lea(rscratch, src); 2841 Assembler::movsd(dst, Address(rscratch, 0)); 2842 } 2843 } 2844 2845 void MacroAssembler::movss(XMMRegister dst, AddressLiteral src, Register rscratch) { 2846 assert(rscratch != noreg || always_reachable(src), "missing"); 2847 2848 if (reachable(src)) { 2849 Assembler::movss(dst, as_Address(src)); 2850 } else { 2851 lea(rscratch, src); 2852 Assembler::movss(dst, Address(rscratch, 0)); 2853 } 2854 } 2855 2856 void MacroAssembler::movddup(XMMRegister dst, AddressLiteral src, Register rscratch) { 2857 assert(rscratch != noreg || always_reachable(src), "missing"); 2858 2859 if (reachable(src)) { 2860 Assembler::movddup(dst, as_Address(src)); 2861 } else { 2862 lea(rscratch, src); 2863 Assembler::movddup(dst, Address(rscratch, 0)); 2864 } 2865 } 2866 2867 void MacroAssembler::vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2868 assert(rscratch != noreg || always_reachable(src), "missing"); 2869 2870 if (reachable(src)) { 2871 Assembler::vmovddup(dst, as_Address(src), vector_len); 2872 } else { 2873 lea(rscratch, src); 2874 Assembler::vmovddup(dst, Address(rscratch, 0), vector_len); 2875 } 2876 } 2877 2878 void MacroAssembler::mulsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2879 assert(rscratch != noreg || always_reachable(src), "missing"); 2880 2881 if (reachable(src)) { 2882 Assembler::mulsd(dst, as_Address(src)); 2883 } else { 2884 lea(rscratch, src); 2885 Assembler::mulsd(dst, Address(rscratch, 0)); 2886 } 2887 } 2888 2889 void MacroAssembler::mulss(XMMRegister dst, AddressLiteral src, Register rscratch) { 2890 assert(rscratch != noreg || always_reachable(src), "missing"); 2891 2892 if (reachable(src)) { 2893 Assembler::mulss(dst, as_Address(src)); 2894 } else { 2895 lea(rscratch, src); 2896 Assembler::mulss(dst, Address(rscratch, 0)); 2897 } 2898 } 2899 2900 void MacroAssembler::null_check(Register reg, int offset) { 2901 if (needs_explicit_null_check(offset)) { 2902 // provoke OS null exception if reg is null by 2903 // accessing M[reg] w/o changing any (non-CC) registers 2904 // NOTE: cmpl is plenty here to provoke a segv 2905 cmpptr(rax, Address(reg, 0)); 2906 // Note: should probably use testl(rax, Address(reg, 0)); 2907 // may be shorter code (however, this version of 2908 // testl needs to be implemented first) 2909 } else { 2910 // nothing to do, (later) access of M[reg + offset] 2911 // will provoke OS null exception if reg is null 2912 } 2913 } 2914 2915 void MacroAssembler::os_breakpoint() { 2916 // instead of directly emitting a breakpoint, call os:breakpoint for better debugability 2917 // (e.g., MSVC can't call ps() otherwise) 2918 call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint))); 2919 } 2920 2921 void MacroAssembler::unimplemented(const char* what) { 2922 const char* buf = nullptr; 2923 { 2924 ResourceMark rm; 2925 stringStream ss; 2926 ss.print("unimplemented: %s", what); 2927 buf = code_string(ss.as_string()); 2928 } 2929 stop(buf); 2930 } 2931 2932 #ifdef _LP64 2933 #define XSTATE_BV 0x200 2934 #endif 2935 2936 void MacroAssembler::pop_CPU_state() { 2937 pop_FPU_state(); 2938 pop_IU_state(); 2939 } 2940 2941 void MacroAssembler::pop_FPU_state() { 2942 #ifndef _LP64 2943 frstor(Address(rsp, 0)); 2944 #else 2945 fxrstor(Address(rsp, 0)); 2946 #endif 2947 addptr(rsp, FPUStateSizeInWords * wordSize); 2948 } 2949 2950 void MacroAssembler::pop_IU_state() { 2951 popa(); 2952 LP64_ONLY(addq(rsp, 8)); 2953 popf(); 2954 } 2955 2956 // Save Integer and Float state 2957 // Warning: Stack must be 16 byte aligned (64bit) 2958 void MacroAssembler::push_CPU_state() { 2959 push_IU_state(); 2960 push_FPU_state(); 2961 } 2962 2963 void MacroAssembler::push_FPU_state() { 2964 subptr(rsp, FPUStateSizeInWords * wordSize); 2965 #ifndef _LP64 2966 fnsave(Address(rsp, 0)); 2967 fwait(); 2968 #else 2969 fxsave(Address(rsp, 0)); 2970 #endif // LP64 2971 } 2972 2973 void MacroAssembler::push_IU_state() { 2974 // Push flags first because pusha kills them 2975 pushf(); 2976 // Make sure rsp stays 16-byte aligned 2977 LP64_ONLY(subq(rsp, 8)); 2978 pusha(); 2979 } 2980 2981 void MacroAssembler::push_cont_fastpath() { 2982 if (!Continuations::enabled()) return; 2983 2984 #ifndef _LP64 2985 Register rthread = rax; 2986 Register rrealsp = rbx; 2987 push(rthread); 2988 push(rrealsp); 2989 2990 get_thread(rthread); 2991 2992 // The code below wants the original RSP. 2993 // Move it back after the pushes above. 2994 movptr(rrealsp, rsp); 2995 addptr(rrealsp, 2*wordSize); 2996 #else 2997 Register rthread = r15_thread; 2998 Register rrealsp = rsp; 2999 #endif 3000 3001 Label done; 3002 cmpptr(rrealsp, Address(rthread, JavaThread::cont_fastpath_offset())); 3003 jccb(Assembler::belowEqual, done); 3004 movptr(Address(rthread, JavaThread::cont_fastpath_offset()), rrealsp); 3005 bind(done); 3006 3007 #ifndef _LP64 3008 pop(rrealsp); 3009 pop(rthread); 3010 #endif 3011 } 3012 3013 void MacroAssembler::pop_cont_fastpath() { 3014 if (!Continuations::enabled()) return; 3015 3016 #ifndef _LP64 3017 Register rthread = rax; 3018 Register rrealsp = rbx; 3019 push(rthread); 3020 push(rrealsp); 3021 3022 get_thread(rthread); 3023 3024 // The code below wants the original RSP. 3025 // Move it back after the pushes above. 3026 movptr(rrealsp, rsp); 3027 addptr(rrealsp, 2*wordSize); 3028 #else 3029 Register rthread = r15_thread; 3030 Register rrealsp = rsp; 3031 #endif 3032 3033 Label done; 3034 cmpptr(rrealsp, Address(rthread, JavaThread::cont_fastpath_offset())); 3035 jccb(Assembler::below, done); 3036 movptr(Address(rthread, JavaThread::cont_fastpath_offset()), 0); 3037 bind(done); 3038 3039 #ifndef _LP64 3040 pop(rrealsp); 3041 pop(rthread); 3042 #endif 3043 } 3044 3045 void MacroAssembler::inc_held_monitor_count() { 3046 #ifndef _LP64 3047 Register thread = rax; 3048 push(thread); 3049 get_thread(thread); 3050 incrementl(Address(thread, JavaThread::held_monitor_count_offset())); 3051 pop(thread); 3052 #else // LP64 3053 incrementq(Address(r15_thread, JavaThread::held_monitor_count_offset())); 3054 #endif 3055 } 3056 3057 void MacroAssembler::dec_held_monitor_count() { 3058 #ifndef _LP64 3059 Register thread = rax; 3060 push(thread); 3061 get_thread(thread); 3062 decrementl(Address(thread, JavaThread::held_monitor_count_offset())); 3063 pop(thread); 3064 #else // LP64 3065 decrementq(Address(r15_thread, JavaThread::held_monitor_count_offset())); 3066 #endif 3067 } 3068 3069 #ifdef ASSERT 3070 void MacroAssembler::stop_if_in_cont(Register cont, const char* name) { 3071 #ifdef _LP64 3072 Label no_cont; 3073 movptr(cont, Address(r15_thread, JavaThread::cont_entry_offset())); 3074 testl(cont, cont); 3075 jcc(Assembler::zero, no_cont); 3076 stop(name); 3077 bind(no_cont); 3078 #else 3079 Unimplemented(); 3080 #endif 3081 } 3082 #endif 3083 3084 void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp) { // determine java_thread register 3085 if (!java_thread->is_valid()) { 3086 java_thread = rdi; 3087 get_thread(java_thread); 3088 } 3089 // we must set sp to zero to clear frame 3090 movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), NULL_WORD); 3091 // must clear fp, so that compiled frames are not confused; it is 3092 // possible that we need it only for debugging 3093 if (clear_fp) { 3094 movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), NULL_WORD); 3095 } 3096 // Always clear the pc because it could have been set by make_walkable() 3097 movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), NULL_WORD); 3098 vzeroupper(); 3099 } 3100 3101 void MacroAssembler::restore_rax(Register tmp) { 3102 if (tmp == noreg) pop(rax); 3103 else if (tmp != rax) mov(rax, tmp); 3104 } 3105 3106 void MacroAssembler::round_to(Register reg, int modulus) { 3107 addptr(reg, modulus - 1); 3108 andptr(reg, -modulus); 3109 } 3110 3111 void MacroAssembler::save_rax(Register tmp) { 3112 if (tmp == noreg) push(rax); 3113 else if (tmp != rax) mov(tmp, rax); 3114 } 3115 3116 void MacroAssembler::safepoint_poll(Label& slow_path, Register thread_reg, bool at_return, bool in_nmethod) { 3117 if (at_return) { 3118 // Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore, 3119 // we may safely use rsp instead to perform the stack watermark check. 3120 cmpptr(in_nmethod ? rsp : rbp, Address(thread_reg, JavaThread::polling_word_offset())); 3121 jcc(Assembler::above, slow_path); 3122 return; 3123 } 3124 testb(Address(thread_reg, JavaThread::polling_word_offset()), SafepointMechanism::poll_bit()); 3125 jcc(Assembler::notZero, slow_path); // handshake bit set implies poll 3126 } 3127 3128 // Calls to C land 3129 // 3130 // When entering C land, the rbp, & rsp of the last Java frame have to be recorded 3131 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp 3132 // has to be reset to 0. This is required to allow proper stack traversal. 3133 void MacroAssembler::set_last_Java_frame(Register java_thread, 3134 Register last_java_sp, 3135 Register last_java_fp, 3136 address last_java_pc, 3137 Register rscratch) { 3138 vzeroupper(); 3139 // determine java_thread register 3140 if (!java_thread->is_valid()) { 3141 java_thread = rdi; 3142 get_thread(java_thread); 3143 } 3144 // determine last_java_sp register 3145 if (!last_java_sp->is_valid()) { 3146 last_java_sp = rsp; 3147 } 3148 // last_java_fp is optional 3149 if (last_java_fp->is_valid()) { 3150 movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), last_java_fp); 3151 } 3152 // last_java_pc is optional 3153 if (last_java_pc != nullptr) { 3154 Address java_pc(java_thread, 3155 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()); 3156 lea(java_pc, InternalAddress(last_java_pc), rscratch); 3157 } 3158 movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), last_java_sp); 3159 } 3160 3161 void MacroAssembler::shlptr(Register dst, int imm8) { 3162 LP64_ONLY(shlq(dst, imm8)) NOT_LP64(shll(dst, imm8)); 3163 } 3164 3165 void MacroAssembler::shrptr(Register dst, int imm8) { 3166 LP64_ONLY(shrq(dst, imm8)) NOT_LP64(shrl(dst, imm8)); 3167 } 3168 3169 void MacroAssembler::sign_extend_byte(Register reg) { 3170 if (LP64_ONLY(true ||) (VM_Version::is_P6() && reg->has_byte_register())) { 3171 movsbl(reg, reg); // movsxb 3172 } else { 3173 shll(reg, 24); 3174 sarl(reg, 24); 3175 } 3176 } 3177 3178 void MacroAssembler::sign_extend_short(Register reg) { 3179 if (LP64_ONLY(true ||) VM_Version::is_P6()) { 3180 movswl(reg, reg); // movsxw 3181 } else { 3182 shll(reg, 16); 3183 sarl(reg, 16); 3184 } 3185 } 3186 3187 void MacroAssembler::testl(Address dst, int32_t imm32) { 3188 if (imm32 >= 0 && is8bit(imm32)) { 3189 testb(dst, imm32); 3190 } else { 3191 Assembler::testl(dst, imm32); 3192 } 3193 } 3194 3195 void MacroAssembler::testl(Register dst, int32_t imm32) { 3196 if (imm32 >= 0 && is8bit(imm32) && dst->has_byte_register()) { 3197 testb(dst, imm32); 3198 } else { 3199 Assembler::testl(dst, imm32); 3200 } 3201 } 3202 3203 void MacroAssembler::testl(Register dst, AddressLiteral src) { 3204 assert(always_reachable(src), "Address should be reachable"); 3205 testl(dst, as_Address(src)); 3206 } 3207 3208 #ifdef _LP64 3209 3210 void MacroAssembler::testq(Address dst, int32_t imm32) { 3211 if (imm32 >= 0) { 3212 testl(dst, imm32); 3213 } else { 3214 Assembler::testq(dst, imm32); 3215 } 3216 } 3217 3218 void MacroAssembler::testq(Register dst, int32_t imm32) { 3219 if (imm32 >= 0) { 3220 testl(dst, imm32); 3221 } else { 3222 Assembler::testq(dst, imm32); 3223 } 3224 } 3225 3226 #endif 3227 3228 void MacroAssembler::pcmpeqb(XMMRegister dst, XMMRegister src) { 3229 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3230 Assembler::pcmpeqb(dst, src); 3231 } 3232 3233 void MacroAssembler::pcmpeqw(XMMRegister dst, XMMRegister src) { 3234 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3235 Assembler::pcmpeqw(dst, src); 3236 } 3237 3238 void MacroAssembler::pcmpestri(XMMRegister dst, Address src, int imm8) { 3239 assert((dst->encoding() < 16),"XMM register should be 0-15"); 3240 Assembler::pcmpestri(dst, src, imm8); 3241 } 3242 3243 void MacroAssembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) { 3244 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15"); 3245 Assembler::pcmpestri(dst, src, imm8); 3246 } 3247 3248 void MacroAssembler::pmovzxbw(XMMRegister dst, XMMRegister src) { 3249 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3250 Assembler::pmovzxbw(dst, src); 3251 } 3252 3253 void MacroAssembler::pmovzxbw(XMMRegister dst, Address src) { 3254 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3255 Assembler::pmovzxbw(dst, src); 3256 } 3257 3258 void MacroAssembler::pmovmskb(Register dst, XMMRegister src) { 3259 assert((src->encoding() < 16),"XMM register should be 0-15"); 3260 Assembler::pmovmskb(dst, src); 3261 } 3262 3263 void MacroAssembler::ptest(XMMRegister dst, XMMRegister src) { 3264 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15"); 3265 Assembler::ptest(dst, src); 3266 } 3267 3268 void MacroAssembler::sqrtss(XMMRegister dst, AddressLiteral src, Register rscratch) { 3269 assert(rscratch != noreg || always_reachable(src), "missing"); 3270 3271 if (reachable(src)) { 3272 Assembler::sqrtss(dst, as_Address(src)); 3273 } else { 3274 lea(rscratch, src); 3275 Assembler::sqrtss(dst, Address(rscratch, 0)); 3276 } 3277 } 3278 3279 void MacroAssembler::subsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 3280 assert(rscratch != noreg || always_reachable(src), "missing"); 3281 3282 if (reachable(src)) { 3283 Assembler::subsd(dst, as_Address(src)); 3284 } else { 3285 lea(rscratch, src); 3286 Assembler::subsd(dst, Address(rscratch, 0)); 3287 } 3288 } 3289 3290 void MacroAssembler::roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register rscratch) { 3291 assert(rscratch != noreg || always_reachable(src), "missing"); 3292 3293 if (reachable(src)) { 3294 Assembler::roundsd(dst, as_Address(src), rmode); 3295 } else { 3296 lea(rscratch, src); 3297 Assembler::roundsd(dst, Address(rscratch, 0), rmode); 3298 } 3299 } 3300 3301 void MacroAssembler::subss(XMMRegister dst, AddressLiteral src, Register rscratch) { 3302 assert(rscratch != noreg || always_reachable(src), "missing"); 3303 3304 if (reachable(src)) { 3305 Assembler::subss(dst, as_Address(src)); 3306 } else { 3307 lea(rscratch, src); 3308 Assembler::subss(dst, Address(rscratch, 0)); 3309 } 3310 } 3311 3312 void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch) { 3313 assert(rscratch != noreg || always_reachable(src), "missing"); 3314 3315 if (reachable(src)) { 3316 Assembler::ucomisd(dst, as_Address(src)); 3317 } else { 3318 lea(rscratch, src); 3319 Assembler::ucomisd(dst, Address(rscratch, 0)); 3320 } 3321 } 3322 3323 void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch) { 3324 assert(rscratch != noreg || always_reachable(src), "missing"); 3325 3326 if (reachable(src)) { 3327 Assembler::ucomiss(dst, as_Address(src)); 3328 } else { 3329 lea(rscratch, src); 3330 Assembler::ucomiss(dst, Address(rscratch, 0)); 3331 } 3332 } 3333 3334 void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 3335 assert(rscratch != noreg || always_reachable(src), "missing"); 3336 3337 // Used in sign-bit flipping with aligned address. 3338 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 3339 if (reachable(src)) { 3340 Assembler::xorpd(dst, as_Address(src)); 3341 } else { 3342 lea(rscratch, src); 3343 Assembler::xorpd(dst, Address(rscratch, 0)); 3344 } 3345 } 3346 3347 void MacroAssembler::xorpd(XMMRegister dst, XMMRegister src) { 3348 if (UseAVX > 2 && !VM_Version::supports_avx512dq() && (dst->encoding() == src->encoding())) { 3349 Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit); 3350 } 3351 else { 3352 Assembler::xorpd(dst, src); 3353 } 3354 } 3355 3356 void MacroAssembler::xorps(XMMRegister dst, XMMRegister src) { 3357 if (UseAVX > 2 && !VM_Version::supports_avx512dq() && (dst->encoding() == src->encoding())) { 3358 Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit); 3359 } else { 3360 Assembler::xorps(dst, src); 3361 } 3362 } 3363 3364 void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src, Register rscratch) { 3365 assert(rscratch != noreg || always_reachable(src), "missing"); 3366 3367 // Used in sign-bit flipping with aligned address. 3368 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 3369 if (reachable(src)) { 3370 Assembler::xorps(dst, as_Address(src)); 3371 } else { 3372 lea(rscratch, src); 3373 Assembler::xorps(dst, Address(rscratch, 0)); 3374 } 3375 } 3376 3377 void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src, Register rscratch) { 3378 assert(rscratch != noreg || always_reachable(src), "missing"); 3379 3380 // Used in sign-bit flipping with aligned address. 3381 bool aligned_adr = (((intptr_t)src.target() & 15) == 0); 3382 assert((UseAVX > 0) || aligned_adr, "SSE mode requires address alignment 16 bytes"); 3383 if (reachable(src)) { 3384 Assembler::pshufb(dst, as_Address(src)); 3385 } else { 3386 lea(rscratch, src); 3387 Assembler::pshufb(dst, Address(rscratch, 0)); 3388 } 3389 } 3390 3391 // AVX 3-operands instructions 3392 3393 void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3394 assert(rscratch != noreg || always_reachable(src), "missing"); 3395 3396 if (reachable(src)) { 3397 vaddsd(dst, nds, as_Address(src)); 3398 } else { 3399 lea(rscratch, src); 3400 vaddsd(dst, nds, Address(rscratch, 0)); 3401 } 3402 } 3403 3404 void MacroAssembler::vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3405 assert(rscratch != noreg || always_reachable(src), "missing"); 3406 3407 if (reachable(src)) { 3408 vaddss(dst, nds, as_Address(src)); 3409 } else { 3410 lea(rscratch, src); 3411 vaddss(dst, nds, Address(rscratch, 0)); 3412 } 3413 } 3414 3415 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3416 assert(UseAVX > 0, "requires some form of AVX"); 3417 assert(rscratch != noreg || always_reachable(src), "missing"); 3418 3419 if (reachable(src)) { 3420 Assembler::vpaddb(dst, nds, as_Address(src), vector_len); 3421 } else { 3422 lea(rscratch, src); 3423 Assembler::vpaddb(dst, nds, Address(rscratch, 0), vector_len); 3424 } 3425 } 3426 3427 void MacroAssembler::vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3428 assert(UseAVX > 0, "requires some form of AVX"); 3429 assert(rscratch != noreg || always_reachable(src), "missing"); 3430 3431 if (reachable(src)) { 3432 Assembler::vpaddd(dst, nds, as_Address(src), vector_len); 3433 } else { 3434 lea(rscratch, src); 3435 Assembler::vpaddd(dst, nds, Address(rscratch, 0), vector_len); 3436 } 3437 } 3438 3439 void MacroAssembler::vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch) { 3440 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 3441 assert(rscratch != noreg || always_reachable(negate_field), "missing"); 3442 3443 vandps(dst, nds, negate_field, vector_len, rscratch); 3444 } 3445 3446 void MacroAssembler::vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch) { 3447 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 3448 assert(rscratch != noreg || always_reachable(negate_field), "missing"); 3449 3450 vandpd(dst, nds, negate_field, vector_len, rscratch); 3451 } 3452 3453 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3454 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3455 Assembler::vpaddb(dst, nds, src, vector_len); 3456 } 3457 3458 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3459 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3460 Assembler::vpaddb(dst, nds, src, vector_len); 3461 } 3462 3463 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3464 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3465 Assembler::vpaddw(dst, nds, src, vector_len); 3466 } 3467 3468 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3469 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3470 Assembler::vpaddw(dst, nds, src, vector_len); 3471 } 3472 3473 void MacroAssembler::vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3474 assert(rscratch != noreg || always_reachable(src), "missing"); 3475 3476 if (reachable(src)) { 3477 Assembler::vpand(dst, nds, as_Address(src), vector_len); 3478 } else { 3479 lea(rscratch, src); 3480 Assembler::vpand(dst, nds, Address(rscratch, 0), vector_len); 3481 } 3482 } 3483 3484 void MacroAssembler::vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 3485 assert(rscratch != noreg || always_reachable(src), "missing"); 3486 3487 if (reachable(src)) { 3488 Assembler::vpbroadcastd(dst, as_Address(src), vector_len); 3489 } else { 3490 lea(rscratch, src); 3491 Assembler::vpbroadcastd(dst, Address(rscratch, 0), vector_len); 3492 } 3493 } 3494 3495 void MacroAssembler::vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 3496 assert(rscratch != noreg || always_reachable(src), "missing"); 3497 3498 if (reachable(src)) { 3499 Assembler::vpbroadcastq(dst, as_Address(src), vector_len); 3500 } else { 3501 lea(rscratch, src); 3502 Assembler::vpbroadcastq(dst, Address(rscratch, 0), vector_len); 3503 } 3504 } 3505 3506 void MacroAssembler::vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 3507 assert(rscratch != noreg || always_reachable(src), "missing"); 3508 3509 if (reachable(src)) { 3510 Assembler::vbroadcastsd(dst, as_Address(src), vector_len); 3511 } else { 3512 lea(rscratch, src); 3513 Assembler::vbroadcastsd(dst, Address(rscratch, 0), vector_len); 3514 } 3515 } 3516 3517 void MacroAssembler::vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 3518 assert(rscratch != noreg || always_reachable(src), "missing"); 3519 3520 if (reachable(src)) { 3521 Assembler::vbroadcastss(dst, as_Address(src), vector_len); 3522 } else { 3523 lea(rscratch, src); 3524 Assembler::vbroadcastss(dst, Address(rscratch, 0), vector_len); 3525 } 3526 } 3527 3528 // Vector float blend 3529 // vblendvps(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg) 3530 void MacroAssembler::vblendvps(XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister mask, int vector_len, bool compute_mask, XMMRegister scratch) { 3531 // WARN: Allow dst == (src1|src2), mask == scratch 3532 bool blend_emulation = EnableX86ECoreOpts && UseAVX > 1; 3533 bool scratch_available = scratch != xnoreg && scratch != src1 && scratch != src2 && scratch != dst; 3534 bool dst_available = dst != mask && (dst != src1 || dst != src2); 3535 if (blend_emulation && scratch_available && dst_available) { 3536 if (compute_mask) { 3537 vpsrad(scratch, mask, 32, vector_len); 3538 mask = scratch; 3539 } 3540 if (dst == src1) { 3541 vpandn(dst, mask, src1, vector_len); // if mask == 0, src1 3542 vpand (scratch, mask, src2, vector_len); // if mask == 1, src2 3543 } else { 3544 vpand (dst, mask, src2, vector_len); // if mask == 1, src2 3545 vpandn(scratch, mask, src1, vector_len); // if mask == 0, src1 3546 } 3547 vpor(dst, dst, scratch, vector_len); 3548 } else { 3549 Assembler::vblendvps(dst, src1, src2, mask, vector_len); 3550 } 3551 } 3552 3553 // vblendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg) 3554 void MacroAssembler::vblendvpd(XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister mask, int vector_len, bool compute_mask, XMMRegister scratch) { 3555 // WARN: Allow dst == (src1|src2), mask == scratch 3556 bool blend_emulation = EnableX86ECoreOpts && UseAVX > 1; 3557 bool scratch_available = scratch != xnoreg && scratch != src1 && scratch != src2 && scratch != dst && (!compute_mask || scratch != mask); 3558 bool dst_available = dst != mask && (dst != src1 || dst != src2); 3559 if (blend_emulation && scratch_available && dst_available) { 3560 if (compute_mask) { 3561 vpxor(scratch, scratch, scratch, vector_len); 3562 vpcmpgtq(scratch, scratch, mask, vector_len); 3563 mask = scratch; 3564 } 3565 if (dst == src1) { 3566 vpandn(dst, mask, src1, vector_len); // if mask == 0, src 3567 vpand (scratch, mask, src2, vector_len); // if mask == 1, src2 3568 } else { 3569 vpand (dst, mask, src2, vector_len); // if mask == 1, src2 3570 vpandn(scratch, mask, src1, vector_len); // if mask == 0, src 3571 } 3572 vpor(dst, dst, scratch, vector_len); 3573 } else { 3574 Assembler::vblendvpd(dst, src1, src2, mask, vector_len); 3575 } 3576 } 3577 3578 void MacroAssembler::vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3579 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3580 Assembler::vpcmpeqb(dst, nds, src, vector_len); 3581 } 3582 3583 void MacroAssembler::vpcmpeqb(XMMRegister dst, XMMRegister src1, Address src2, int vector_len) { 3584 assert(((dst->encoding() < 16 && src1->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3585 Assembler::vpcmpeqb(dst, src1, src2, vector_len); 3586 } 3587 3588 void MacroAssembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3589 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3590 Assembler::vpcmpeqw(dst, nds, src, vector_len); 3591 } 3592 3593 void MacroAssembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3594 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3595 Assembler::vpcmpeqw(dst, nds, src, vector_len); 3596 } 3597 3598 void MacroAssembler::evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3599 assert(rscratch != noreg || always_reachable(src), "missing"); 3600 3601 if (reachable(src)) { 3602 Assembler::evpcmpeqd(kdst, mask, nds, as_Address(src), vector_len); 3603 } else { 3604 lea(rscratch, src); 3605 Assembler::evpcmpeqd(kdst, mask, nds, Address(rscratch, 0), vector_len); 3606 } 3607 } 3608 3609 void MacroAssembler::evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 3610 int comparison, bool is_signed, int vector_len, Register rscratch) { 3611 assert(rscratch != noreg || always_reachable(src), "missing"); 3612 3613 if (reachable(src)) { 3614 Assembler::evpcmpd(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 3615 } else { 3616 lea(rscratch, src); 3617 Assembler::evpcmpd(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 3618 } 3619 } 3620 3621 void MacroAssembler::evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 3622 int comparison, bool is_signed, int vector_len, Register rscratch) { 3623 assert(rscratch != noreg || always_reachable(src), "missing"); 3624 3625 if (reachable(src)) { 3626 Assembler::evpcmpq(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 3627 } else { 3628 lea(rscratch, src); 3629 Assembler::evpcmpq(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 3630 } 3631 } 3632 3633 void MacroAssembler::evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 3634 int comparison, bool is_signed, int vector_len, Register rscratch) { 3635 assert(rscratch != noreg || always_reachable(src), "missing"); 3636 3637 if (reachable(src)) { 3638 Assembler::evpcmpb(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 3639 } else { 3640 lea(rscratch, src); 3641 Assembler::evpcmpb(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 3642 } 3643 } 3644 3645 void MacroAssembler::evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 3646 int comparison, bool is_signed, int vector_len, Register rscratch) { 3647 assert(rscratch != noreg || always_reachable(src), "missing"); 3648 3649 if (reachable(src)) { 3650 Assembler::evpcmpw(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 3651 } else { 3652 lea(rscratch, src); 3653 Assembler::evpcmpw(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 3654 } 3655 } 3656 3657 void MacroAssembler::vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len) { 3658 if (width == Assembler::Q) { 3659 Assembler::vpcmpCCq(dst, nds, src, cond_encoding, vector_len); 3660 } else { 3661 Assembler::vpcmpCCbwd(dst, nds, src, cond_encoding, vector_len); 3662 } 3663 } 3664 3665 void MacroAssembler::vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister xtmp, ComparisonPredicate cond, Width width, int vector_len) { 3666 int eq_cond_enc = 0x29; 3667 int gt_cond_enc = 0x37; 3668 if (width != Assembler::Q) { 3669 eq_cond_enc = 0x74 + width; 3670 gt_cond_enc = 0x64 + width; 3671 } 3672 switch (cond) { 3673 case eq: 3674 vpcmpCC(dst, nds, src, eq_cond_enc, width, vector_len); 3675 break; 3676 case neq: 3677 vpcmpCC(dst, nds, src, eq_cond_enc, width, vector_len); 3678 vallones(xtmp, vector_len); 3679 vpxor(dst, xtmp, dst, vector_len); 3680 break; 3681 case le: 3682 vpcmpCC(dst, nds, src, gt_cond_enc, width, vector_len); 3683 vallones(xtmp, vector_len); 3684 vpxor(dst, xtmp, dst, vector_len); 3685 break; 3686 case nlt: 3687 vpcmpCC(dst, src, nds, gt_cond_enc, width, vector_len); 3688 vallones(xtmp, vector_len); 3689 vpxor(dst, xtmp, dst, vector_len); 3690 break; 3691 case lt: 3692 vpcmpCC(dst, src, nds, gt_cond_enc, width, vector_len); 3693 break; 3694 case nle: 3695 vpcmpCC(dst, nds, src, gt_cond_enc, width, vector_len); 3696 break; 3697 default: 3698 assert(false, "Should not reach here"); 3699 } 3700 } 3701 3702 void MacroAssembler::vpmovzxbw(XMMRegister dst, Address src, int vector_len) { 3703 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3704 Assembler::vpmovzxbw(dst, src, vector_len); 3705 } 3706 3707 void MacroAssembler::vpmovmskb(Register dst, XMMRegister src, int vector_len) { 3708 assert((src->encoding() < 16),"XMM register should be 0-15"); 3709 Assembler::vpmovmskb(dst, src, vector_len); 3710 } 3711 3712 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3713 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3714 Assembler::vpmullw(dst, nds, src, vector_len); 3715 } 3716 3717 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3718 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3719 Assembler::vpmullw(dst, nds, src, vector_len); 3720 } 3721 3722 void MacroAssembler::vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3723 assert((UseAVX > 0), "AVX support is needed"); 3724 assert(rscratch != noreg || always_reachable(src), "missing"); 3725 3726 if (reachable(src)) { 3727 Assembler::vpmulld(dst, nds, as_Address(src), vector_len); 3728 } else { 3729 lea(rscratch, src); 3730 Assembler::vpmulld(dst, nds, Address(rscratch, 0), vector_len); 3731 } 3732 } 3733 3734 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3735 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3736 Assembler::vpsubb(dst, nds, src, vector_len); 3737 } 3738 3739 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3740 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3741 Assembler::vpsubb(dst, nds, src, vector_len); 3742 } 3743 3744 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3745 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3746 Assembler::vpsubw(dst, nds, src, vector_len); 3747 } 3748 3749 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3750 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3751 Assembler::vpsubw(dst, nds, src, vector_len); 3752 } 3753 3754 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3755 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3756 Assembler::vpsraw(dst, nds, shift, vector_len); 3757 } 3758 3759 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3760 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3761 Assembler::vpsraw(dst, nds, shift, vector_len); 3762 } 3763 3764 void MacroAssembler::evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3765 assert(UseAVX > 2,""); 3766 if (!VM_Version::supports_avx512vl() && vector_len < 2) { 3767 vector_len = 2; 3768 } 3769 Assembler::evpsraq(dst, nds, shift, vector_len); 3770 } 3771 3772 void MacroAssembler::evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3773 assert(UseAVX > 2,""); 3774 if (!VM_Version::supports_avx512vl() && vector_len < 2) { 3775 vector_len = 2; 3776 } 3777 Assembler::evpsraq(dst, nds, shift, vector_len); 3778 } 3779 3780 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3781 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3782 Assembler::vpsrlw(dst, nds, shift, vector_len); 3783 } 3784 3785 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3786 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3787 Assembler::vpsrlw(dst, nds, shift, vector_len); 3788 } 3789 3790 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3791 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3792 Assembler::vpsllw(dst, nds, shift, vector_len); 3793 } 3794 3795 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3796 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3797 Assembler::vpsllw(dst, nds, shift, vector_len); 3798 } 3799 3800 void MacroAssembler::vptest(XMMRegister dst, XMMRegister src) { 3801 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15"); 3802 Assembler::vptest(dst, src); 3803 } 3804 3805 void MacroAssembler::punpcklbw(XMMRegister dst, XMMRegister src) { 3806 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3807 Assembler::punpcklbw(dst, src); 3808 } 3809 3810 void MacroAssembler::pshufd(XMMRegister dst, Address src, int mode) { 3811 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 3812 Assembler::pshufd(dst, src, mode); 3813 } 3814 3815 void MacroAssembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) { 3816 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3817 Assembler::pshuflw(dst, src, mode); 3818 } 3819 3820 void MacroAssembler::vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3821 assert(rscratch != noreg || always_reachable(src), "missing"); 3822 3823 if (reachable(src)) { 3824 vandpd(dst, nds, as_Address(src), vector_len); 3825 } else { 3826 lea(rscratch, src); 3827 vandpd(dst, nds, Address(rscratch, 0), vector_len); 3828 } 3829 } 3830 3831 void MacroAssembler::vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3832 assert(rscratch != noreg || always_reachable(src), "missing"); 3833 3834 if (reachable(src)) { 3835 vandps(dst, nds, as_Address(src), vector_len); 3836 } else { 3837 lea(rscratch, src); 3838 vandps(dst, nds, Address(rscratch, 0), vector_len); 3839 } 3840 } 3841 3842 void MacroAssembler::evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, 3843 bool merge, int vector_len, Register rscratch) { 3844 assert(rscratch != noreg || always_reachable(src), "missing"); 3845 3846 if (reachable(src)) { 3847 Assembler::evpord(dst, mask, nds, as_Address(src), merge, vector_len); 3848 } else { 3849 lea(rscratch, src); 3850 Assembler::evpord(dst, mask, nds, Address(rscratch, 0), merge, vector_len); 3851 } 3852 } 3853 3854 void MacroAssembler::vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3855 assert(rscratch != noreg || always_reachable(src), "missing"); 3856 3857 if (reachable(src)) { 3858 vdivsd(dst, nds, as_Address(src)); 3859 } else { 3860 lea(rscratch, src); 3861 vdivsd(dst, nds, Address(rscratch, 0)); 3862 } 3863 } 3864 3865 void MacroAssembler::vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3866 assert(rscratch != noreg || always_reachable(src), "missing"); 3867 3868 if (reachable(src)) { 3869 vdivss(dst, nds, as_Address(src)); 3870 } else { 3871 lea(rscratch, src); 3872 vdivss(dst, nds, Address(rscratch, 0)); 3873 } 3874 } 3875 3876 void MacroAssembler::vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3877 assert(rscratch != noreg || always_reachable(src), "missing"); 3878 3879 if (reachable(src)) { 3880 vmulsd(dst, nds, as_Address(src)); 3881 } else { 3882 lea(rscratch, src); 3883 vmulsd(dst, nds, Address(rscratch, 0)); 3884 } 3885 } 3886 3887 void MacroAssembler::vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3888 assert(rscratch != noreg || always_reachable(src), "missing"); 3889 3890 if (reachable(src)) { 3891 vmulss(dst, nds, as_Address(src)); 3892 } else { 3893 lea(rscratch, src); 3894 vmulss(dst, nds, Address(rscratch, 0)); 3895 } 3896 } 3897 3898 void MacroAssembler::vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3899 assert(rscratch != noreg || always_reachable(src), "missing"); 3900 3901 if (reachable(src)) { 3902 vsubsd(dst, nds, as_Address(src)); 3903 } else { 3904 lea(rscratch, src); 3905 vsubsd(dst, nds, Address(rscratch, 0)); 3906 } 3907 } 3908 3909 void MacroAssembler::vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3910 assert(rscratch != noreg || always_reachable(src), "missing"); 3911 3912 if (reachable(src)) { 3913 vsubss(dst, nds, as_Address(src)); 3914 } else { 3915 lea(rscratch, src); 3916 vsubss(dst, nds, Address(rscratch, 0)); 3917 } 3918 } 3919 3920 void MacroAssembler::vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3921 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 3922 assert(rscratch != noreg || always_reachable(src), "missing"); 3923 3924 vxorps(dst, nds, src, Assembler::AVX_128bit, rscratch); 3925 } 3926 3927 void MacroAssembler::vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3928 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 3929 assert(rscratch != noreg || always_reachable(src), "missing"); 3930 3931 vxorpd(dst, nds, src, Assembler::AVX_128bit, rscratch); 3932 } 3933 3934 void MacroAssembler::vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3935 assert(rscratch != noreg || always_reachable(src), "missing"); 3936 3937 if (reachable(src)) { 3938 vxorpd(dst, nds, as_Address(src), vector_len); 3939 } else { 3940 lea(rscratch, src); 3941 vxorpd(dst, nds, Address(rscratch, 0), vector_len); 3942 } 3943 } 3944 3945 void MacroAssembler::vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3946 assert(rscratch != noreg || always_reachable(src), "missing"); 3947 3948 if (reachable(src)) { 3949 vxorps(dst, nds, as_Address(src), vector_len); 3950 } else { 3951 lea(rscratch, src); 3952 vxorps(dst, nds, Address(rscratch, 0), vector_len); 3953 } 3954 } 3955 3956 void MacroAssembler::vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3957 assert(rscratch != noreg || always_reachable(src), "missing"); 3958 3959 if (UseAVX > 1 || (vector_len < 1)) { 3960 if (reachable(src)) { 3961 Assembler::vpxor(dst, nds, as_Address(src), vector_len); 3962 } else { 3963 lea(rscratch, src); 3964 Assembler::vpxor(dst, nds, Address(rscratch, 0), vector_len); 3965 } 3966 } else { 3967 MacroAssembler::vxorpd(dst, nds, src, vector_len, rscratch); 3968 } 3969 } 3970 3971 void MacroAssembler::vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3972 assert(rscratch != noreg || always_reachable(src), "missing"); 3973 3974 if (reachable(src)) { 3975 Assembler::vpermd(dst, nds, as_Address(src), vector_len); 3976 } else { 3977 lea(rscratch, src); 3978 Assembler::vpermd(dst, nds, Address(rscratch, 0), vector_len); 3979 } 3980 } 3981 3982 void MacroAssembler::clear_jobject_tag(Register possibly_non_local) { 3983 const int32_t inverted_mask = ~static_cast<int32_t>(JNIHandles::tag_mask); 3984 STATIC_ASSERT(inverted_mask == -4); // otherwise check this code 3985 // The inverted mask is sign-extended 3986 andptr(possibly_non_local, inverted_mask); 3987 } 3988 3989 void MacroAssembler::resolve_jobject(Register value, 3990 Register thread, 3991 Register tmp) { 3992 assert_different_registers(value, thread, tmp); 3993 Label done, tagged, weak_tagged; 3994 testptr(value, value); 3995 jcc(Assembler::zero, done); // Use null as-is. 3996 testptr(value, JNIHandles::tag_mask); // Test for tag. 3997 jcc(Assembler::notZero, tagged); 3998 3999 // Resolve local handle 4000 access_load_at(T_OBJECT, IN_NATIVE | AS_RAW, value, Address(value, 0), tmp, thread); 4001 verify_oop(value); 4002 jmp(done); 4003 4004 bind(tagged); 4005 testptr(value, JNIHandles::TypeTag::weak_global); // Test for weak tag. 4006 jcc(Assembler::notZero, weak_tagged); 4007 4008 // Resolve global handle 4009 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp, thread); 4010 verify_oop(value); 4011 jmp(done); 4012 4013 bind(weak_tagged); 4014 // Resolve jweak. 4015 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 4016 value, Address(value, -JNIHandles::TypeTag::weak_global), tmp, thread); 4017 verify_oop(value); 4018 4019 bind(done); 4020 } 4021 4022 void MacroAssembler::resolve_global_jobject(Register value, 4023 Register thread, 4024 Register tmp) { 4025 assert_different_registers(value, thread, tmp); 4026 Label done; 4027 4028 testptr(value, value); 4029 jcc(Assembler::zero, done); // Use null as-is. 4030 4031 #ifdef ASSERT 4032 { 4033 Label valid_global_tag; 4034 testptr(value, JNIHandles::TypeTag::global); // Test for global tag. 4035 jcc(Assembler::notZero, valid_global_tag); 4036 stop("non global jobject using resolve_global_jobject"); 4037 bind(valid_global_tag); 4038 } 4039 #endif 4040 4041 // Resolve global handle 4042 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp, thread); 4043 verify_oop(value); 4044 4045 bind(done); 4046 } 4047 4048 void MacroAssembler::subptr(Register dst, int32_t imm32) { 4049 LP64_ONLY(subq(dst, imm32)) NOT_LP64(subl(dst, imm32)); 4050 } 4051 4052 // Force generation of a 4 byte immediate value even if it fits into 8bit 4053 void MacroAssembler::subptr_imm32(Register dst, int32_t imm32) { 4054 LP64_ONLY(subq_imm32(dst, imm32)) NOT_LP64(subl_imm32(dst, imm32)); 4055 } 4056 4057 void MacroAssembler::subptr(Register dst, Register src) { 4058 LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); 4059 } 4060 4061 // C++ bool manipulation 4062 void MacroAssembler::testbool(Register dst) { 4063 if(sizeof(bool) == 1) 4064 testb(dst, 0xff); 4065 else if(sizeof(bool) == 2) { 4066 // testw implementation needed for two byte bools 4067 ShouldNotReachHere(); 4068 } else if(sizeof(bool) == 4) 4069 testl(dst, dst); 4070 else 4071 // unsupported 4072 ShouldNotReachHere(); 4073 } 4074 4075 void MacroAssembler::testptr(Register dst, Register src) { 4076 LP64_ONLY(testq(dst, src)) NOT_LP64(testl(dst, src)); 4077 } 4078 4079 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. 4080 void MacroAssembler::tlab_allocate(Register thread, Register obj, 4081 Register var_size_in_bytes, 4082 int con_size_in_bytes, 4083 Register t1, 4084 Register t2, 4085 Label& slow_case) { 4086 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 4087 bs->tlab_allocate(this, thread, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case); 4088 } 4089 4090 RegSet MacroAssembler::call_clobbered_gp_registers() { 4091 RegSet regs; 4092 #ifdef _LP64 4093 regs += RegSet::of(rax, rcx, rdx); 4094 #ifndef WINDOWS 4095 regs += RegSet::of(rsi, rdi); 4096 #endif 4097 regs += RegSet::range(r8, r11); 4098 #else 4099 regs += RegSet::of(rax, rcx, rdx); 4100 #endif 4101 #ifdef _LP64 4102 if (UseAPX) { 4103 regs += RegSet::range(r16, as_Register(Register::number_of_registers - 1)); 4104 } 4105 #endif 4106 return regs; 4107 } 4108 4109 XMMRegSet MacroAssembler::call_clobbered_xmm_registers() { 4110 int num_xmm_registers = XMMRegister::available_xmm_registers(); 4111 #if defined(WINDOWS) && defined(_LP64) 4112 XMMRegSet result = XMMRegSet::range(xmm0, xmm5); 4113 if (num_xmm_registers > 16) { 4114 result += XMMRegSet::range(xmm16, as_XMMRegister(num_xmm_registers - 1)); 4115 } 4116 return result; 4117 #else 4118 return XMMRegSet::range(xmm0, as_XMMRegister(num_xmm_registers - 1)); 4119 #endif 4120 } 4121 4122 static int FPUSaveAreaSize = align_up(108, StackAlignmentInBytes); // 108 bytes needed for FPU state by fsave/frstor 4123 4124 #ifndef _LP64 4125 static bool use_x87_registers() { return UseSSE < 2; } 4126 #endif 4127 static bool use_xmm_registers() { return UseSSE >= 1; } 4128 4129 // C1 only ever uses the first double/float of the XMM register. 4130 static int xmm_save_size() { return UseSSE >= 2 ? sizeof(double) : sizeof(float); } 4131 4132 static void save_xmm_register(MacroAssembler* masm, int offset, XMMRegister reg) { 4133 if (UseSSE == 1) { 4134 masm->movflt(Address(rsp, offset), reg); 4135 } else { 4136 masm->movdbl(Address(rsp, offset), reg); 4137 } 4138 } 4139 4140 static void restore_xmm_register(MacroAssembler* masm, int offset, XMMRegister reg) { 4141 if (UseSSE == 1) { 4142 masm->movflt(reg, Address(rsp, offset)); 4143 } else { 4144 masm->movdbl(reg, Address(rsp, offset)); 4145 } 4146 } 4147 4148 static int register_section_sizes(RegSet gp_registers, XMMRegSet xmm_registers, 4149 bool save_fpu, int& gp_area_size, 4150 int& fp_area_size, int& xmm_area_size) { 4151 4152 gp_area_size = align_up(gp_registers.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size, 4153 StackAlignmentInBytes); 4154 #ifdef _LP64 4155 fp_area_size = 0; 4156 #else 4157 fp_area_size = (save_fpu && use_x87_registers()) ? FPUSaveAreaSize : 0; 4158 #endif 4159 xmm_area_size = (save_fpu && use_xmm_registers()) ? xmm_registers.size() * xmm_save_size() : 0; 4160 4161 return gp_area_size + fp_area_size + xmm_area_size; 4162 } 4163 4164 void MacroAssembler::push_call_clobbered_registers_except(RegSet exclude, bool save_fpu) { 4165 block_comment("push_call_clobbered_registers start"); 4166 // Regular registers 4167 RegSet gp_registers_to_push = call_clobbered_gp_registers() - exclude; 4168 4169 int gp_area_size; 4170 int fp_area_size; 4171 int xmm_area_size; 4172 int total_save_size = register_section_sizes(gp_registers_to_push, call_clobbered_xmm_registers(), save_fpu, 4173 gp_area_size, fp_area_size, xmm_area_size); 4174 subptr(rsp, total_save_size); 4175 4176 push_set(gp_registers_to_push, 0); 4177 4178 #ifndef _LP64 4179 if (save_fpu && use_x87_registers()) { 4180 fnsave(Address(rsp, gp_area_size)); 4181 fwait(); 4182 } 4183 #endif 4184 if (save_fpu && use_xmm_registers()) { 4185 push_set(call_clobbered_xmm_registers(), gp_area_size + fp_area_size); 4186 } 4187 4188 block_comment("push_call_clobbered_registers end"); 4189 } 4190 4191 void MacroAssembler::pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu) { 4192 block_comment("pop_call_clobbered_registers start"); 4193 4194 RegSet gp_registers_to_pop = call_clobbered_gp_registers() - exclude; 4195 4196 int gp_area_size; 4197 int fp_area_size; 4198 int xmm_area_size; 4199 int total_save_size = register_section_sizes(gp_registers_to_pop, call_clobbered_xmm_registers(), restore_fpu, 4200 gp_area_size, fp_area_size, xmm_area_size); 4201 4202 if (restore_fpu && use_xmm_registers()) { 4203 pop_set(call_clobbered_xmm_registers(), gp_area_size + fp_area_size); 4204 } 4205 #ifndef _LP64 4206 if (restore_fpu && use_x87_registers()) { 4207 frstor(Address(rsp, gp_area_size)); 4208 } 4209 #endif 4210 4211 pop_set(gp_registers_to_pop, 0); 4212 4213 addptr(rsp, total_save_size); 4214 4215 vzeroupper(); 4216 4217 block_comment("pop_call_clobbered_registers end"); 4218 } 4219 4220 void MacroAssembler::push_set(XMMRegSet set, int offset) { 4221 assert(is_aligned(set.size() * xmm_save_size(), StackAlignmentInBytes), "must be"); 4222 int spill_offset = offset; 4223 4224 for (RegSetIterator<XMMRegister> it = set.begin(); *it != xnoreg; ++it) { 4225 save_xmm_register(this, spill_offset, *it); 4226 spill_offset += xmm_save_size(); 4227 } 4228 } 4229 4230 void MacroAssembler::pop_set(XMMRegSet set, int offset) { 4231 int restore_size = set.size() * xmm_save_size(); 4232 assert(is_aligned(restore_size, StackAlignmentInBytes), "must be"); 4233 4234 int restore_offset = offset + restore_size - xmm_save_size(); 4235 4236 for (ReverseRegSetIterator<XMMRegister> it = set.rbegin(); *it != xnoreg; ++it) { 4237 restore_xmm_register(this, restore_offset, *it); 4238 restore_offset -= xmm_save_size(); 4239 } 4240 } 4241 4242 void MacroAssembler::push_set(RegSet set, int offset) { 4243 int spill_offset; 4244 if (offset == -1) { 4245 int register_push_size = set.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size; 4246 int aligned_size = align_up(register_push_size, StackAlignmentInBytes); 4247 subptr(rsp, aligned_size); 4248 spill_offset = 0; 4249 } else { 4250 spill_offset = offset; 4251 } 4252 4253 for (RegSetIterator<Register> it = set.begin(); *it != noreg; ++it) { 4254 movptr(Address(rsp, spill_offset), *it); 4255 spill_offset += Register::max_slots_per_register * VMRegImpl::stack_slot_size; 4256 } 4257 } 4258 4259 void MacroAssembler::pop_set(RegSet set, int offset) { 4260 4261 int gp_reg_size = Register::max_slots_per_register * VMRegImpl::stack_slot_size; 4262 int restore_size = set.size() * gp_reg_size; 4263 int aligned_size = align_up(restore_size, StackAlignmentInBytes); 4264 4265 int restore_offset; 4266 if (offset == -1) { 4267 restore_offset = restore_size - gp_reg_size; 4268 } else { 4269 restore_offset = offset + restore_size - gp_reg_size; 4270 } 4271 for (ReverseRegSetIterator<Register> it = set.rbegin(); *it != noreg; ++it) { 4272 movptr(*it, Address(rsp, restore_offset)); 4273 restore_offset -= gp_reg_size; 4274 } 4275 4276 if (offset == -1) { 4277 addptr(rsp, aligned_size); 4278 } 4279 } 4280 4281 // Preserves the contents of address, destroys the contents length_in_bytes and temp. 4282 void MacroAssembler::zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp) { 4283 assert(address != length_in_bytes && address != temp && temp != length_in_bytes, "registers must be different"); 4284 assert((offset_in_bytes & (BytesPerWord - 1)) == 0, "offset must be a multiple of BytesPerWord"); 4285 Label done; 4286 4287 testptr(length_in_bytes, length_in_bytes); 4288 jcc(Assembler::zero, done); 4289 4290 // initialize topmost word, divide index by 2, check if odd and test if zero 4291 // note: for the remaining code to work, index must be a multiple of BytesPerWord 4292 #ifdef ASSERT 4293 { 4294 Label L; 4295 testptr(length_in_bytes, BytesPerWord - 1); 4296 jcc(Assembler::zero, L); 4297 stop("length must be a multiple of BytesPerWord"); 4298 bind(L); 4299 } 4300 #endif 4301 Register index = length_in_bytes; 4302 xorptr(temp, temp); // use _zero reg to clear memory (shorter code) 4303 if (UseIncDec) { 4304 shrptr(index, 3); // divide by 8/16 and set carry flag if bit 2 was set 4305 } else { 4306 shrptr(index, 2); // use 2 instructions to avoid partial flag stall 4307 shrptr(index, 1); 4308 } 4309 #ifndef _LP64 4310 // index could have not been a multiple of 8 (i.e., bit 2 was set) 4311 { 4312 Label even; 4313 // note: if index was a multiple of 8, then it cannot 4314 // be 0 now otherwise it must have been 0 before 4315 // => if it is even, we don't need to check for 0 again 4316 jcc(Assembler::carryClear, even); 4317 // clear topmost word (no jump would be needed if conditional assignment worked here) 4318 movptr(Address(address, index, Address::times_8, offset_in_bytes - 0*BytesPerWord), temp); 4319 // index could be 0 now, must check again 4320 jcc(Assembler::zero, done); 4321 bind(even); 4322 } 4323 #endif // !_LP64 4324 // initialize remaining object fields: index is a multiple of 2 now 4325 { 4326 Label loop; 4327 bind(loop); 4328 movptr(Address(address, index, Address::times_8, offset_in_bytes - 1*BytesPerWord), temp); 4329 NOT_LP64(movptr(Address(address, index, Address::times_8, offset_in_bytes - 2*BytesPerWord), temp);) 4330 decrement(index); 4331 jcc(Assembler::notZero, loop); 4332 } 4333 4334 bind(done); 4335 } 4336 4337 // Look up the method for a megamorphic invokeinterface call. 4338 // The target method is determined by <intf_klass, itable_index>. 4339 // The receiver klass is in recv_klass. 4340 // On success, the result will be in method_result, and execution falls through. 4341 // On failure, execution transfers to the given label. 4342 void MacroAssembler::lookup_interface_method(Register recv_klass, 4343 Register intf_klass, 4344 RegisterOrConstant itable_index, 4345 Register method_result, 4346 Register scan_temp, 4347 Label& L_no_such_interface, 4348 bool return_method) { 4349 assert_different_registers(recv_klass, intf_klass, scan_temp); 4350 assert_different_registers(method_result, intf_klass, scan_temp); 4351 assert(recv_klass != method_result || !return_method, 4352 "recv_klass can be destroyed when method isn't needed"); 4353 4354 assert(itable_index.is_constant() || itable_index.as_register() == method_result, 4355 "caller must use same register for non-constant itable index as for method"); 4356 4357 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 4358 int vtable_base = in_bytes(Klass::vtable_start_offset()); 4359 int itentry_off = in_bytes(itableMethodEntry::method_offset()); 4360 int scan_step = itableOffsetEntry::size() * wordSize; 4361 int vte_size = vtableEntry::size_in_bytes(); 4362 Address::ScaleFactor times_vte_scale = Address::times_ptr; 4363 assert(vte_size == wordSize, "else adjust times_vte_scale"); 4364 4365 movl(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 4366 4367 // Could store the aligned, prescaled offset in the klass. 4368 lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base)); 4369 4370 if (return_method) { 4371 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 4372 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 4373 lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off)); 4374 } 4375 4376 // for (scan = klass->itable(); scan->interface() != nullptr; scan += scan_step) { 4377 // if (scan->interface() == intf) { 4378 // result = (klass + scan->offset() + itable_index); 4379 // } 4380 // } 4381 Label search, found_method; 4382 4383 for (int peel = 1; peel >= 0; peel--) { 4384 movptr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset())); 4385 cmpptr(intf_klass, method_result); 4386 4387 if (peel) { 4388 jccb(Assembler::equal, found_method); 4389 } else { 4390 jccb(Assembler::notEqual, search); 4391 // (invert the test to fall through to found_method...) 4392 } 4393 4394 if (!peel) break; 4395 4396 bind(search); 4397 4398 // Check that the previous entry is non-null. A null entry means that 4399 // the receiver class doesn't implement the interface, and wasn't the 4400 // same as when the caller was compiled. 4401 testptr(method_result, method_result); 4402 jcc(Assembler::zero, L_no_such_interface); 4403 addptr(scan_temp, scan_step); 4404 } 4405 4406 bind(found_method); 4407 4408 if (return_method) { 4409 // Got a hit. 4410 movl(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset())); 4411 movptr(method_result, Address(recv_klass, scan_temp, Address::times_1)); 4412 } 4413 } 4414 4415 // Look up the method for a megamorphic invokeinterface call in a single pass over itable: 4416 // - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICData 4417 // - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index 4418 // The target method is determined by <holder_klass, itable_index>. 4419 // The receiver klass is in recv_klass. 4420 // On success, the result will be in method_result, and execution falls through. 4421 // On failure, execution transfers to the given label. 4422 void MacroAssembler::lookup_interface_method_stub(Register recv_klass, 4423 Register holder_klass, 4424 Register resolved_klass, 4425 Register method_result, 4426 Register scan_temp, 4427 Register temp_reg2, 4428 Register receiver, 4429 int itable_index, 4430 Label& L_no_such_interface) { 4431 assert_different_registers(recv_klass, method_result, holder_klass, resolved_klass, scan_temp, temp_reg2, receiver); 4432 Register temp_itbl_klass = method_result; 4433 Register temp_reg = (temp_reg2 == noreg ? recv_klass : temp_reg2); // reuse recv_klass register on 32-bit x86 impl 4434 4435 int vtable_base = in_bytes(Klass::vtable_start_offset()); 4436 int itentry_off = in_bytes(itableMethodEntry::method_offset()); 4437 int scan_step = itableOffsetEntry::size() * wordSize; 4438 int vte_size = vtableEntry::size_in_bytes(); 4439 int ioffset = in_bytes(itableOffsetEntry::interface_offset()); 4440 int ooffset = in_bytes(itableOffsetEntry::offset_offset()); 4441 Address::ScaleFactor times_vte_scale = Address::times_ptr; 4442 assert(vte_size == wordSize, "adjust times_vte_scale"); 4443 4444 Label L_loop_scan_resolved_entry, L_resolved_found, L_holder_found; 4445 4446 // temp_itbl_klass = recv_klass.itable[0] 4447 // scan_temp = &recv_klass.itable[0] + step 4448 movl(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 4449 movptr(temp_itbl_klass, Address(recv_klass, scan_temp, times_vte_scale, vtable_base + ioffset)); 4450 lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base + ioffset + scan_step)); 4451 xorptr(temp_reg, temp_reg); 4452 4453 // Initial checks: 4454 // - if (holder_klass != resolved_klass), go to "scan for resolved" 4455 // - if (itable[0] == 0), no such interface 4456 // - if (itable[0] == holder_klass), shortcut to "holder found" 4457 cmpptr(holder_klass, resolved_klass); 4458 jccb(Assembler::notEqual, L_loop_scan_resolved_entry); 4459 testptr(temp_itbl_klass, temp_itbl_klass); 4460 jccb(Assembler::zero, L_no_such_interface); 4461 cmpptr(holder_klass, temp_itbl_klass); 4462 jccb(Assembler::equal, L_holder_found); 4463 4464 // Loop: Look for holder_klass record in itable 4465 // do { 4466 // tmp = itable[index]; 4467 // index += step; 4468 // if (tmp == holder_klass) { 4469 // goto L_holder_found; // Found! 4470 // } 4471 // } while (tmp != 0); 4472 // goto L_no_such_interface // Not found. 4473 Label L_scan_holder; 4474 bind(L_scan_holder); 4475 movptr(temp_itbl_klass, Address(scan_temp, 0)); 4476 addptr(scan_temp, scan_step); 4477 cmpptr(holder_klass, temp_itbl_klass); 4478 jccb(Assembler::equal, L_holder_found); 4479 testptr(temp_itbl_klass, temp_itbl_klass); 4480 jccb(Assembler::notZero, L_scan_holder); 4481 4482 jmpb(L_no_such_interface); 4483 4484 // Loop: Look for resolved_class record in itable 4485 // do { 4486 // tmp = itable[index]; 4487 // index += step; 4488 // if (tmp == holder_klass) { 4489 // // Also check if we have met a holder klass 4490 // holder_tmp = itable[index-step-ioffset]; 4491 // } 4492 // if (tmp == resolved_klass) { 4493 // goto L_resolved_found; // Found! 4494 // } 4495 // } while (tmp != 0); 4496 // goto L_no_such_interface // Not found. 4497 // 4498 Label L_loop_scan_resolved; 4499 bind(L_loop_scan_resolved); 4500 movptr(temp_itbl_klass, Address(scan_temp, 0)); 4501 addptr(scan_temp, scan_step); 4502 bind(L_loop_scan_resolved_entry); 4503 cmpptr(holder_klass, temp_itbl_klass); 4504 cmovl(Assembler::equal, temp_reg, Address(scan_temp, ooffset - ioffset - scan_step)); 4505 cmpptr(resolved_klass, temp_itbl_klass); 4506 jccb(Assembler::equal, L_resolved_found); 4507 testptr(temp_itbl_klass, temp_itbl_klass); 4508 jccb(Assembler::notZero, L_loop_scan_resolved); 4509 4510 jmpb(L_no_such_interface); 4511 4512 Label L_ready; 4513 4514 // See if we already have a holder klass. If not, go and scan for it. 4515 bind(L_resolved_found); 4516 testptr(temp_reg, temp_reg); 4517 jccb(Assembler::zero, L_scan_holder); 4518 jmpb(L_ready); 4519 4520 bind(L_holder_found); 4521 movl(temp_reg, Address(scan_temp, ooffset - ioffset - scan_step)); 4522 4523 // Finally, temp_reg contains holder_klass vtable offset 4524 bind(L_ready); 4525 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 4526 if (temp_reg2 == noreg) { // recv_klass register is clobbered for 32-bit x86 impl 4527 load_klass(scan_temp, receiver, noreg); 4528 movptr(method_result, Address(scan_temp, temp_reg, Address::times_1, itable_index * wordSize + itentry_off)); 4529 } else { 4530 movptr(method_result, Address(recv_klass, temp_reg, Address::times_1, itable_index * wordSize + itentry_off)); 4531 } 4532 } 4533 4534 4535 // virtual method calling 4536 void MacroAssembler::lookup_virtual_method(Register recv_klass, 4537 RegisterOrConstant vtable_index, 4538 Register method_result) { 4539 const ByteSize base = Klass::vtable_start_offset(); 4540 assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below"); 4541 Address vtable_entry_addr(recv_klass, 4542 vtable_index, Address::times_ptr, 4543 base + vtableEntry::method_offset()); 4544 movptr(method_result, vtable_entry_addr); 4545 } 4546 4547 4548 void MacroAssembler::check_klass_subtype(Register sub_klass, 4549 Register super_klass, 4550 Register temp_reg, 4551 Label& L_success) { 4552 Label L_failure; 4553 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, nullptr); 4554 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, nullptr); 4555 bind(L_failure); 4556 } 4557 4558 4559 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 4560 Register super_klass, 4561 Register temp_reg, 4562 Label* L_success, 4563 Label* L_failure, 4564 Label* L_slow_path, 4565 RegisterOrConstant super_check_offset) { 4566 assert_different_registers(sub_klass, super_klass, temp_reg); 4567 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 4568 if (super_check_offset.is_register()) { 4569 assert_different_registers(sub_klass, super_klass, 4570 super_check_offset.as_register()); 4571 } else if (must_load_sco) { 4572 assert(temp_reg != noreg, "supply either a temp or a register offset"); 4573 } 4574 4575 Label L_fallthrough; 4576 int label_nulls = 0; 4577 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 4578 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 4579 if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; } 4580 assert(label_nulls <= 1, "at most one null in the batch"); 4581 4582 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 4583 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 4584 Address super_check_offset_addr(super_klass, sco_offset); 4585 4586 // Hacked jcc, which "knows" that L_fallthrough, at least, is in 4587 // range of a jccb. If this routine grows larger, reconsider at 4588 // least some of these. 4589 #define local_jcc(assembler_cond, label) \ 4590 if (&(label) == &L_fallthrough) jccb(assembler_cond, label); \ 4591 else jcc( assembler_cond, label) /*omit semi*/ 4592 4593 // Hacked jmp, which may only be used just before L_fallthrough. 4594 #define final_jmp(label) \ 4595 if (&(label) == &L_fallthrough) { /*do nothing*/ } \ 4596 else jmp(label) /*omit semi*/ 4597 4598 // If the pointers are equal, we are done (e.g., String[] elements). 4599 // This self-check enables sharing of secondary supertype arrays among 4600 // non-primary types such as array-of-interface. Otherwise, each such 4601 // type would need its own customized SSA. 4602 // We move this check to the front of the fast path because many 4603 // type checks are in fact trivially successful in this manner, 4604 // so we get a nicely predicted branch right at the start of the check. 4605 cmpptr(sub_klass, super_klass); 4606 local_jcc(Assembler::equal, *L_success); 4607 4608 // Check the supertype display: 4609 if (must_load_sco) { 4610 // Positive movl does right thing on LP64. 4611 movl(temp_reg, super_check_offset_addr); 4612 super_check_offset = RegisterOrConstant(temp_reg); 4613 } 4614 Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0); 4615 cmpptr(super_klass, super_check_addr); // load displayed supertype 4616 4617 // This check has worked decisively for primary supers. 4618 // Secondary supers are sought in the super_cache ('super_cache_addr'). 4619 // (Secondary supers are interfaces and very deeply nested subtypes.) 4620 // This works in the same check above because of a tricky aliasing 4621 // between the super_cache and the primary super display elements. 4622 // (The 'super_check_addr' can address either, as the case requires.) 4623 // Note that the cache is updated below if it does not help us find 4624 // what we need immediately. 4625 // So if it was a primary super, we can just fail immediately. 4626 // Otherwise, it's the slow path for us (no success at this point). 4627 4628 if (super_check_offset.is_register()) { 4629 local_jcc(Assembler::equal, *L_success); 4630 cmpl(super_check_offset.as_register(), sc_offset); 4631 if (L_failure == &L_fallthrough) { 4632 local_jcc(Assembler::equal, *L_slow_path); 4633 } else { 4634 local_jcc(Assembler::notEqual, *L_failure); 4635 final_jmp(*L_slow_path); 4636 } 4637 } else if (super_check_offset.as_constant() == sc_offset) { 4638 // Need a slow path; fast failure is impossible. 4639 if (L_slow_path == &L_fallthrough) { 4640 local_jcc(Assembler::equal, *L_success); 4641 } else { 4642 local_jcc(Assembler::notEqual, *L_slow_path); 4643 final_jmp(*L_success); 4644 } 4645 } else { 4646 // No slow path; it's a fast decision. 4647 if (L_failure == &L_fallthrough) { 4648 local_jcc(Assembler::equal, *L_success); 4649 } else { 4650 local_jcc(Assembler::notEqual, *L_failure); 4651 final_jmp(*L_success); 4652 } 4653 } 4654 4655 bind(L_fallthrough); 4656 4657 #undef local_jcc 4658 #undef final_jmp 4659 } 4660 4661 4662 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 4663 Register super_klass, 4664 Register temp_reg, 4665 Register temp2_reg, 4666 Label* L_success, 4667 Label* L_failure, 4668 bool set_cond_codes) { 4669 assert_different_registers(sub_klass, super_klass, temp_reg); 4670 if (temp2_reg != noreg) 4671 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg); 4672 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg) 4673 4674 Label L_fallthrough; 4675 int label_nulls = 0; 4676 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 4677 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 4678 assert(label_nulls <= 1, "at most one null in the batch"); 4679 4680 // a couple of useful fields in sub_klass: 4681 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 4682 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 4683 Address secondary_supers_addr(sub_klass, ss_offset); 4684 Address super_cache_addr( sub_klass, sc_offset); 4685 4686 // Do a linear scan of the secondary super-klass chain. 4687 // This code is rarely used, so simplicity is a virtue here. 4688 // The repne_scan instruction uses fixed registers, which we must spill. 4689 // Don't worry too much about pre-existing connections with the input regs. 4690 4691 assert(sub_klass != rax, "killed reg"); // killed by mov(rax, super) 4692 assert(sub_klass != rcx, "killed reg"); // killed by lea(rcx, &pst_counter) 4693 4694 // Get super_klass value into rax (even if it was in rdi or rcx). 4695 bool pushed_rax = false, pushed_rcx = false, pushed_rdi = false; 4696 if (super_klass != rax) { 4697 if (!IS_A_TEMP(rax)) { push(rax); pushed_rax = true; } 4698 mov(rax, super_klass); 4699 } 4700 if (!IS_A_TEMP(rcx)) { push(rcx); pushed_rcx = true; } 4701 if (!IS_A_TEMP(rdi)) { push(rdi); pushed_rdi = true; } 4702 4703 #ifndef PRODUCT 4704 uint* pst_counter = &SharedRuntime::_partial_subtype_ctr; 4705 ExternalAddress pst_counter_addr((address) pst_counter); 4706 NOT_LP64( incrementl(pst_counter_addr) ); 4707 LP64_ONLY( lea(rcx, pst_counter_addr) ); 4708 LP64_ONLY( incrementl(Address(rcx, 0)) ); 4709 #endif //PRODUCT 4710 4711 // We will consult the secondary-super array. 4712 movptr(rdi, secondary_supers_addr); 4713 // Load the array length. (Positive movl does right thing on LP64.) 4714 movl(rcx, Address(rdi, Array<Klass*>::length_offset_in_bytes())); 4715 // Skip to start of data. 4716 addptr(rdi, Array<Klass*>::base_offset_in_bytes()); 4717 4718 // Scan RCX words at [RDI] for an occurrence of RAX. 4719 // Set NZ/Z based on last compare. 4720 // Z flag value will not be set by 'repne' if RCX == 0 since 'repne' does 4721 // not change flags (only scas instruction which is repeated sets flags). 4722 // Set Z = 0 (not equal) before 'repne' to indicate that class was not found. 4723 4724 testptr(rax,rax); // Set Z = 0 4725 repne_scan(); 4726 4727 // Unspill the temp. registers: 4728 if (pushed_rdi) pop(rdi); 4729 if (pushed_rcx) pop(rcx); 4730 if (pushed_rax) pop(rax); 4731 4732 if (set_cond_codes) { 4733 // Special hack for the AD files: rdi is guaranteed non-zero. 4734 assert(!pushed_rdi, "rdi must be left non-null"); 4735 // Also, the condition codes are properly set Z/NZ on succeed/failure. 4736 } 4737 4738 if (L_failure == &L_fallthrough) 4739 jccb(Assembler::notEqual, *L_failure); 4740 else jcc(Assembler::notEqual, *L_failure); 4741 4742 // Success. Cache the super we found and proceed in triumph. 4743 movptr(super_cache_addr, super_klass); 4744 4745 if (L_success != &L_fallthrough) { 4746 jmp(*L_success); 4747 } 4748 4749 #undef IS_A_TEMP 4750 4751 bind(L_fallthrough); 4752 } 4753 4754 #ifdef _LP64 4755 4756 // population_count variant for running without the POPCNT 4757 // instruction, which was introduced with SSE4.2 in 2008. 4758 void MacroAssembler::population_count(Register dst, Register src, 4759 Register scratch1, Register scratch2) { 4760 assert_different_registers(src, scratch1, scratch2); 4761 if (UsePopCountInstruction) { 4762 Assembler::popcntq(dst, src); 4763 } else { 4764 assert_different_registers(src, scratch1, scratch2); 4765 assert_different_registers(dst, scratch1, scratch2); 4766 Label loop, done; 4767 4768 mov(scratch1, src); 4769 // dst = 0; 4770 // while(scratch1 != 0) { 4771 // dst++; 4772 // scratch1 &= (scratch1 - 1); 4773 // } 4774 xorl(dst, dst); 4775 testq(scratch1, scratch1); 4776 jccb(Assembler::equal, done); 4777 { 4778 bind(loop); 4779 incq(dst); 4780 movq(scratch2, scratch1); 4781 decq(scratch2); 4782 andq(scratch1, scratch2); 4783 jccb(Assembler::notEqual, loop); 4784 } 4785 bind(done); 4786 } 4787 } 4788 4789 // Ensure that the inline code and the stub are using the same registers. 4790 #define LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS \ 4791 do { \ 4792 assert(r_super_klass == rax, "mismatch"); \ 4793 assert(r_array_base == rbx, "mismatch"); \ 4794 assert(r_array_length == rcx, "mismatch"); \ 4795 assert(r_array_index == rdx, "mismatch"); \ 4796 assert(r_sub_klass == rsi || r_sub_klass == noreg, "mismatch"); \ 4797 assert(r_bitmap == r11 || r_bitmap == noreg, "mismatch"); \ 4798 assert(result == rdi || result == noreg, "mismatch"); \ 4799 } while(0) 4800 4801 void MacroAssembler::lookup_secondary_supers_table(Register r_sub_klass, 4802 Register r_super_klass, 4803 Register temp1, 4804 Register temp2, 4805 Register temp3, 4806 Register temp4, 4807 Register result, 4808 u1 super_klass_slot) { 4809 assert_different_registers(r_sub_klass, r_super_klass, temp1, temp2, temp3, temp4, result); 4810 4811 Label L_fallthrough, L_success, L_failure; 4812 4813 BLOCK_COMMENT("lookup_secondary_supers_table {"); 4814 4815 const Register 4816 r_array_index = temp1, 4817 r_array_length = temp2, 4818 r_array_base = temp3, 4819 r_bitmap = temp4; 4820 4821 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS; 4822 4823 xorq(result, result); // = 0 4824 4825 movq(r_bitmap, Address(r_sub_klass, Klass::bitmap_offset())); 4826 movq(r_array_index, r_bitmap); 4827 4828 // First check the bitmap to see if super_klass might be present. If 4829 // the bit is zero, we are certain that super_klass is not one of 4830 // the secondary supers. 4831 u1 bit = super_klass_slot; 4832 { 4833 // NB: If the count in a x86 shift instruction is 0, the flags are 4834 // not affected, so we do a testq instead. 4835 int shift_count = Klass::SECONDARY_SUPERS_TABLE_MASK - bit; 4836 if (shift_count != 0) { 4837 salq(r_array_index, shift_count); 4838 } else { 4839 testq(r_array_index, r_array_index); 4840 } 4841 } 4842 // We test the MSB of r_array_index, i.e. its sign bit 4843 jcc(Assembler::positive, L_failure); 4844 4845 // Get the first array index that can contain super_klass into r_array_index. 4846 if (bit != 0) { 4847 population_count(r_array_index, r_array_index, temp2, temp3); 4848 } else { 4849 movl(r_array_index, 1); 4850 } 4851 // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word. 4852 4853 // We will consult the secondary-super array. 4854 movptr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset()))); 4855 4856 // We're asserting that the first word in an Array<Klass*> is the 4857 // length, and the second word is the first word of the data. If 4858 // that ever changes, r_array_base will have to be adjusted here. 4859 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code"); 4860 assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code"); 4861 4862 cmpq(r_super_klass, Address(r_array_base, r_array_index, Address::times_8)); 4863 jccb(Assembler::equal, L_success); 4864 4865 // Is there another entry to check? Consult the bitmap. 4866 btq(r_bitmap, (bit + 1) & Klass::SECONDARY_SUPERS_TABLE_MASK); 4867 jccb(Assembler::carryClear, L_failure); 4868 4869 // Linear probe. Rotate the bitmap so that the next bit to test is 4870 // in Bit 1. 4871 if (bit != 0) { 4872 rorq(r_bitmap, bit); 4873 } 4874 4875 // Calls into the stub generated by lookup_secondary_supers_table_slow_path. 4876 // Arguments: r_super_klass, r_array_base, r_array_index, r_bitmap. 4877 // Kills: r_array_length. 4878 // Returns: result. 4879 call(RuntimeAddress(StubRoutines::lookup_secondary_supers_table_slow_path_stub())); 4880 // Result (0/1) is in rdi 4881 jmpb(L_fallthrough); 4882 4883 bind(L_failure); 4884 incq(result); // 0 => 1 4885 4886 bind(L_success); 4887 // result = 0; 4888 4889 bind(L_fallthrough); 4890 BLOCK_COMMENT("} lookup_secondary_supers_table"); 4891 4892 if (VerifySecondarySupers) { 4893 verify_secondary_supers_table(r_sub_klass, r_super_klass, result, 4894 temp1, temp2, temp3); 4895 } 4896 } 4897 4898 void MacroAssembler::repne_scanq(Register addr, Register value, Register count, Register limit, 4899 Label* L_success, Label* L_failure) { 4900 Label L_loop, L_fallthrough; 4901 { 4902 int label_nulls = 0; 4903 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 4904 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 4905 assert(label_nulls <= 1, "at most one null in the batch"); 4906 } 4907 bind(L_loop); 4908 cmpq(value, Address(addr, count, Address::times_8)); 4909 jcc(Assembler::equal, *L_success); 4910 addl(count, 1); 4911 cmpl(count, limit); 4912 jcc(Assembler::less, L_loop); 4913 4914 if (&L_fallthrough != L_failure) { 4915 jmp(*L_failure); 4916 } 4917 bind(L_fallthrough); 4918 } 4919 4920 // Called by code generated by check_klass_subtype_slow_path 4921 // above. This is called when there is a collision in the hashed 4922 // lookup in the secondary supers array. 4923 void MacroAssembler::lookup_secondary_supers_table_slow_path(Register r_super_klass, 4924 Register r_array_base, 4925 Register r_array_index, 4926 Register r_bitmap, 4927 Register temp1, 4928 Register temp2, 4929 Label* L_success, 4930 Label* L_failure) { 4931 assert_different_registers(r_super_klass, r_array_base, r_array_index, r_bitmap, temp1, temp2); 4932 4933 const Register 4934 r_array_length = temp1, 4935 r_sub_klass = noreg, 4936 result = noreg; 4937 4938 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS; 4939 4940 Label L_fallthrough; 4941 int label_nulls = 0; 4942 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 4943 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 4944 assert(label_nulls <= 1, "at most one null in the batch"); 4945 4946 // Load the array length. 4947 movl(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes())); 4948 // And adjust the array base to point to the data. 4949 // NB! Effectively increments current slot index by 1. 4950 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, ""); 4951 addptr(r_array_base, Array<Klass*>::base_offset_in_bytes()); 4952 4953 // Linear probe 4954 Label L_huge; 4955 4956 // The bitmap is full to bursting. 4957 // Implicit invariant: BITMAP_FULL implies (length > 0) 4958 cmpl(r_array_length, (int32_t)Klass::SECONDARY_SUPERS_TABLE_SIZE - 2); 4959 jcc(Assembler::greater, L_huge); 4960 4961 // NB! Our caller has checked bits 0 and 1 in the bitmap. The 4962 // current slot (at secondary_supers[r_array_index]) has not yet 4963 // been inspected, and r_array_index may be out of bounds if we 4964 // wrapped around the end of the array. 4965 4966 { // This is conventional linear probing, but instead of terminating 4967 // when a null entry is found in the table, we maintain a bitmap 4968 // in which a 0 indicates missing entries. 4969 // The check above guarantees there are 0s in the bitmap, so the loop 4970 // eventually terminates. 4971 4972 xorl(temp2, temp2); // = 0; 4973 4974 Label L_again; 4975 bind(L_again); 4976 4977 // Check for array wraparound. 4978 cmpl(r_array_index, r_array_length); 4979 cmovl(Assembler::greaterEqual, r_array_index, temp2); 4980 4981 cmpq(r_super_klass, Address(r_array_base, r_array_index, Address::times_8)); 4982 jcc(Assembler::equal, *L_success); 4983 4984 // If the next bit in bitmap is zero, we're done. 4985 btq(r_bitmap, 2); // look-ahead check (Bit 2); Bits 0 and 1 are tested by now 4986 jcc(Assembler::carryClear, *L_failure); 4987 4988 rorq(r_bitmap, 1); // Bits 1/2 => 0/1 4989 addl(r_array_index, 1); 4990 4991 jmp(L_again); 4992 } 4993 4994 { // Degenerate case: more than 64 secondary supers. 4995 // FIXME: We could do something smarter here, maybe a vectorized 4996 // comparison or a binary search, but is that worth any added 4997 // complexity? 4998 bind(L_huge); 4999 xorl(r_array_index, r_array_index); // = 0 5000 repne_scanq(r_array_base, r_super_klass, r_array_index, r_array_length, 5001 L_success, 5002 (&L_fallthrough != L_failure ? L_failure : nullptr)); 5003 5004 bind(L_fallthrough); 5005 } 5006 } 5007 5008 struct VerifyHelperArguments { 5009 Klass* _super; 5010 Klass* _sub; 5011 intptr_t _linear_result; 5012 intptr_t _table_result; 5013 }; 5014 5015 static void verify_secondary_supers_table_helper(const char* msg, VerifyHelperArguments* args) { 5016 Klass::on_secondary_supers_verification_failure(args->_super, 5017 args->_sub, 5018 args->_linear_result, 5019 args->_table_result, 5020 msg); 5021 } 5022 5023 // Make sure that the hashed lookup and a linear scan agree. 5024 void MacroAssembler::verify_secondary_supers_table(Register r_sub_klass, 5025 Register r_super_klass, 5026 Register result, 5027 Register temp1, 5028 Register temp2, 5029 Register temp3) { 5030 const Register 5031 r_array_index = temp1, 5032 r_array_length = temp2, 5033 r_array_base = temp3, 5034 r_bitmap = noreg; 5035 5036 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS; 5037 5038 BLOCK_COMMENT("verify_secondary_supers_table {"); 5039 5040 Label L_success, L_failure, L_check, L_done; 5041 5042 movptr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset()))); 5043 movl(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes())); 5044 // And adjust the array base to point to the data. 5045 addptr(r_array_base, Array<Klass*>::base_offset_in_bytes()); 5046 5047 testl(r_array_length, r_array_length); // array_length == 0? 5048 jcc(Assembler::zero, L_failure); 5049 5050 movl(r_array_index, 0); 5051 repne_scanq(r_array_base, r_super_klass, r_array_index, r_array_length, &L_success); 5052 // fall through to L_failure 5053 5054 const Register linear_result = r_array_index; // reuse temp1 5055 5056 bind(L_failure); // not present 5057 movl(linear_result, 1); 5058 jmp(L_check); 5059 5060 bind(L_success); // present 5061 movl(linear_result, 0); 5062 5063 bind(L_check); 5064 cmpl(linear_result, result); 5065 jcc(Assembler::equal, L_done); 5066 5067 { // To avoid calling convention issues, build a record on the stack 5068 // and pass the pointer to that instead. 5069 push(result); 5070 push(linear_result); 5071 push(r_sub_klass); 5072 push(r_super_klass); 5073 movptr(c_rarg1, rsp); 5074 movptr(c_rarg0, (uintptr_t) "mismatch"); 5075 call(RuntimeAddress(CAST_FROM_FN_PTR(address, verify_secondary_supers_table_helper))); 5076 should_not_reach_here(); 5077 } 5078 bind(L_done); 5079 5080 BLOCK_COMMENT("} verify_secondary_supers_table"); 5081 } 5082 5083 #undef LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS 5084 5085 #endif // LP64 5086 5087 void MacroAssembler::clinit_barrier(Register klass, Register thread, Label* L_fast_path, Label* L_slow_path) { 5088 assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required"); 5089 5090 Label L_fallthrough; 5091 if (L_fast_path == nullptr) { 5092 L_fast_path = &L_fallthrough; 5093 } else if (L_slow_path == nullptr) { 5094 L_slow_path = &L_fallthrough; 5095 } 5096 5097 // Fast path check: class is fully initialized 5098 cmpb(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized); 5099 jcc(Assembler::equal, *L_fast_path); 5100 5101 // Fast path check: current thread is initializer thread 5102 cmpptr(thread, Address(klass, InstanceKlass::init_thread_offset())); 5103 if (L_slow_path == &L_fallthrough) { 5104 jcc(Assembler::equal, *L_fast_path); 5105 bind(*L_slow_path); 5106 } else if (L_fast_path == &L_fallthrough) { 5107 jcc(Assembler::notEqual, *L_slow_path); 5108 bind(*L_fast_path); 5109 } else { 5110 Unimplemented(); 5111 } 5112 } 5113 5114 void MacroAssembler::cmov32(Condition cc, Register dst, Address src) { 5115 if (VM_Version::supports_cmov()) { 5116 cmovl(cc, dst, src); 5117 } else { 5118 Label L; 5119 jccb(negate_condition(cc), L); 5120 movl(dst, src); 5121 bind(L); 5122 } 5123 } 5124 5125 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) { 5126 if (VM_Version::supports_cmov()) { 5127 cmovl(cc, dst, src); 5128 } else { 5129 Label L; 5130 jccb(negate_condition(cc), L); 5131 movl(dst, src); 5132 bind(L); 5133 } 5134 } 5135 5136 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) { 5137 if (!VerifyOops) return; 5138 5139 BLOCK_COMMENT("verify_oop {"); 5140 #ifdef _LP64 5141 push(rscratch1); 5142 #endif 5143 push(rax); // save rax 5144 push(reg); // pass register argument 5145 5146 // Pass register number to verify_oop_subroutine 5147 const char* b = nullptr; 5148 { 5149 ResourceMark rm; 5150 stringStream ss; 5151 ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line); 5152 b = code_string(ss.as_string()); 5153 } 5154 AddressLiteral buffer((address) b, external_word_Relocation::spec_for_immediate()); 5155 pushptr(buffer.addr(), rscratch1); 5156 5157 // call indirectly to solve generation ordering problem 5158 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 5159 call(rax); 5160 // Caller pops the arguments (oop, message) and restores rax, r10 5161 BLOCK_COMMENT("} verify_oop"); 5162 } 5163 5164 void MacroAssembler::vallones(XMMRegister dst, int vector_len) { 5165 if (UseAVX > 2 && (vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl())) { 5166 // Only pcmpeq has dependency breaking treatment (i.e the execution can begin without 5167 // waiting for the previous result on dst), not vpcmpeqd, so just use vpternlog 5168 vpternlogd(dst, 0xFF, dst, dst, vector_len); 5169 } else if (VM_Version::supports_avx()) { 5170 vpcmpeqd(dst, dst, dst, vector_len); 5171 } else { 5172 assert(VM_Version::supports_sse2(), ""); 5173 pcmpeqd(dst, dst); 5174 } 5175 } 5176 5177 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 5178 int extra_slot_offset) { 5179 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 5180 int stackElementSize = Interpreter::stackElementSize; 5181 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0); 5182 #ifdef ASSERT 5183 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1); 5184 assert(offset1 - offset == stackElementSize, "correct arithmetic"); 5185 #endif 5186 Register scale_reg = noreg; 5187 Address::ScaleFactor scale_factor = Address::no_scale; 5188 if (arg_slot.is_constant()) { 5189 offset += arg_slot.as_constant() * stackElementSize; 5190 } else { 5191 scale_reg = arg_slot.as_register(); 5192 scale_factor = Address::times(stackElementSize); 5193 } 5194 offset += wordSize; // return PC is on stack 5195 return Address(rsp, scale_reg, scale_factor, offset); 5196 } 5197 5198 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) { 5199 if (!VerifyOops) return; 5200 5201 #ifdef _LP64 5202 push(rscratch1); 5203 #endif 5204 push(rax); // save rax, 5205 // addr may contain rsp so we will have to adjust it based on the push 5206 // we just did (and on 64 bit we do two pushes) 5207 // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which 5208 // stores rax into addr which is backwards of what was intended. 5209 if (addr.uses(rsp)) { 5210 lea(rax, addr); 5211 pushptr(Address(rax, LP64_ONLY(2 *) BytesPerWord)); 5212 } else { 5213 pushptr(addr); 5214 } 5215 5216 // Pass register number to verify_oop_subroutine 5217 const char* b = nullptr; 5218 { 5219 ResourceMark rm; 5220 stringStream ss; 5221 ss.print("verify_oop_addr: %s (%s:%d)", s, file, line); 5222 b = code_string(ss.as_string()); 5223 } 5224 AddressLiteral buffer((address) b, external_word_Relocation::spec_for_immediate()); 5225 pushptr(buffer.addr(), rscratch1); 5226 5227 // call indirectly to solve generation ordering problem 5228 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 5229 call(rax); 5230 // Caller pops the arguments (addr, message) and restores rax, r10. 5231 } 5232 5233 void MacroAssembler::verify_tlab() { 5234 #ifdef ASSERT 5235 if (UseTLAB && VerifyOops) { 5236 Label next, ok; 5237 Register t1 = rsi; 5238 Register thread_reg = NOT_LP64(rbx) LP64_ONLY(r15_thread); 5239 5240 push(t1); 5241 NOT_LP64(push(thread_reg)); 5242 NOT_LP64(get_thread(thread_reg)); 5243 5244 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset()))); 5245 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset()))); 5246 jcc(Assembler::aboveEqual, next); 5247 STOP("assert(top >= start)"); 5248 should_not_reach_here(); 5249 5250 bind(next); 5251 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset()))); 5252 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset()))); 5253 jcc(Assembler::aboveEqual, ok); 5254 STOP("assert(top <= end)"); 5255 should_not_reach_here(); 5256 5257 bind(ok); 5258 NOT_LP64(pop(thread_reg)); 5259 pop(t1); 5260 } 5261 #endif 5262 } 5263 5264 class ControlWord { 5265 public: 5266 int32_t _value; 5267 5268 int rounding_control() const { return (_value >> 10) & 3 ; } 5269 int precision_control() const { return (_value >> 8) & 3 ; } 5270 bool precision() const { return ((_value >> 5) & 1) != 0; } 5271 bool underflow() const { return ((_value >> 4) & 1) != 0; } 5272 bool overflow() const { return ((_value >> 3) & 1) != 0; } 5273 bool zero_divide() const { return ((_value >> 2) & 1) != 0; } 5274 bool denormalized() const { return ((_value >> 1) & 1) != 0; } 5275 bool invalid() const { return ((_value >> 0) & 1) != 0; } 5276 5277 void print() const { 5278 // rounding control 5279 const char* rc; 5280 switch (rounding_control()) { 5281 case 0: rc = "round near"; break; 5282 case 1: rc = "round down"; break; 5283 case 2: rc = "round up "; break; 5284 case 3: rc = "chop "; break; 5285 default: 5286 rc = nullptr; // silence compiler warnings 5287 fatal("Unknown rounding control: %d", rounding_control()); 5288 }; 5289 // precision control 5290 const char* pc; 5291 switch (precision_control()) { 5292 case 0: pc = "24 bits "; break; 5293 case 1: pc = "reserved"; break; 5294 case 2: pc = "53 bits "; break; 5295 case 3: pc = "64 bits "; break; 5296 default: 5297 pc = nullptr; // silence compiler warnings 5298 fatal("Unknown precision control: %d", precision_control()); 5299 }; 5300 // flags 5301 char f[9]; 5302 f[0] = ' '; 5303 f[1] = ' '; 5304 f[2] = (precision ()) ? 'P' : 'p'; 5305 f[3] = (underflow ()) ? 'U' : 'u'; 5306 f[4] = (overflow ()) ? 'O' : 'o'; 5307 f[5] = (zero_divide ()) ? 'Z' : 'z'; 5308 f[6] = (denormalized()) ? 'D' : 'd'; 5309 f[7] = (invalid ()) ? 'I' : 'i'; 5310 f[8] = '\x0'; 5311 // output 5312 printf("%04x masks = %s, %s, %s", _value & 0xFFFF, f, rc, pc); 5313 } 5314 5315 }; 5316 5317 class StatusWord { 5318 public: 5319 int32_t _value; 5320 5321 bool busy() const { return ((_value >> 15) & 1) != 0; } 5322 bool C3() const { return ((_value >> 14) & 1) != 0; } 5323 bool C2() const { return ((_value >> 10) & 1) != 0; } 5324 bool C1() const { return ((_value >> 9) & 1) != 0; } 5325 bool C0() const { return ((_value >> 8) & 1) != 0; } 5326 int top() const { return (_value >> 11) & 7 ; } 5327 bool error_status() const { return ((_value >> 7) & 1) != 0; } 5328 bool stack_fault() const { return ((_value >> 6) & 1) != 0; } 5329 bool precision() const { return ((_value >> 5) & 1) != 0; } 5330 bool underflow() const { return ((_value >> 4) & 1) != 0; } 5331 bool overflow() const { return ((_value >> 3) & 1) != 0; } 5332 bool zero_divide() const { return ((_value >> 2) & 1) != 0; } 5333 bool denormalized() const { return ((_value >> 1) & 1) != 0; } 5334 bool invalid() const { return ((_value >> 0) & 1) != 0; } 5335 5336 void print() const { 5337 // condition codes 5338 char c[5]; 5339 c[0] = (C3()) ? '3' : '-'; 5340 c[1] = (C2()) ? '2' : '-'; 5341 c[2] = (C1()) ? '1' : '-'; 5342 c[3] = (C0()) ? '0' : '-'; 5343 c[4] = '\x0'; 5344 // flags 5345 char f[9]; 5346 f[0] = (error_status()) ? 'E' : '-'; 5347 f[1] = (stack_fault ()) ? 'S' : '-'; 5348 f[2] = (precision ()) ? 'P' : '-'; 5349 f[3] = (underflow ()) ? 'U' : '-'; 5350 f[4] = (overflow ()) ? 'O' : '-'; 5351 f[5] = (zero_divide ()) ? 'Z' : '-'; 5352 f[6] = (denormalized()) ? 'D' : '-'; 5353 f[7] = (invalid ()) ? 'I' : '-'; 5354 f[8] = '\x0'; 5355 // output 5356 printf("%04x flags = %s, cc = %s, top = %d", _value & 0xFFFF, f, c, top()); 5357 } 5358 5359 }; 5360 5361 class TagWord { 5362 public: 5363 int32_t _value; 5364 5365 int tag_at(int i) const { return (_value >> (i*2)) & 3; } 5366 5367 void print() const { 5368 printf("%04x", _value & 0xFFFF); 5369 } 5370 5371 }; 5372 5373 class FPU_Register { 5374 public: 5375 int32_t _m0; 5376 int32_t _m1; 5377 int16_t _ex; 5378 5379 bool is_indefinite() const { 5380 return _ex == -1 && _m1 == (int32_t)0xC0000000 && _m0 == 0; 5381 } 5382 5383 void print() const { 5384 char sign = (_ex < 0) ? '-' : '+'; 5385 const char* kind = (_ex == 0x7FFF || _ex == (int16_t)-1) ? "NaN" : " "; 5386 printf("%c%04hx.%08x%08x %s", sign, _ex, _m1, _m0, kind); 5387 }; 5388 5389 }; 5390 5391 class FPU_State { 5392 public: 5393 enum { 5394 register_size = 10, 5395 number_of_registers = 8, 5396 register_mask = 7 5397 }; 5398 5399 ControlWord _control_word; 5400 StatusWord _status_word; 5401 TagWord _tag_word; 5402 int32_t _error_offset; 5403 int32_t _error_selector; 5404 int32_t _data_offset; 5405 int32_t _data_selector; 5406 int8_t _register[register_size * number_of_registers]; 5407 5408 int tag_for_st(int i) const { return _tag_word.tag_at((_status_word.top() + i) & register_mask); } 5409 FPU_Register* st(int i) const { return (FPU_Register*)&_register[register_size * i]; } 5410 5411 const char* tag_as_string(int tag) const { 5412 switch (tag) { 5413 case 0: return "valid"; 5414 case 1: return "zero"; 5415 case 2: return "special"; 5416 case 3: return "empty"; 5417 } 5418 ShouldNotReachHere(); 5419 return nullptr; 5420 } 5421 5422 void print() const { 5423 // print computation registers 5424 { int t = _status_word.top(); 5425 for (int i = 0; i < number_of_registers; i++) { 5426 int j = (i - t) & register_mask; 5427 printf("%c r%d = ST%d = ", (j == 0 ? '*' : ' '), i, j); 5428 st(j)->print(); 5429 printf(" %s\n", tag_as_string(_tag_word.tag_at(i))); 5430 } 5431 } 5432 printf("\n"); 5433 // print control registers 5434 printf("ctrl = "); _control_word.print(); printf("\n"); 5435 printf("stat = "); _status_word .print(); printf("\n"); 5436 printf("tags = "); _tag_word .print(); printf("\n"); 5437 } 5438 5439 }; 5440 5441 class Flag_Register { 5442 public: 5443 int32_t _value; 5444 5445 bool overflow() const { return ((_value >> 11) & 1) != 0; } 5446 bool direction() const { return ((_value >> 10) & 1) != 0; } 5447 bool sign() const { return ((_value >> 7) & 1) != 0; } 5448 bool zero() const { return ((_value >> 6) & 1) != 0; } 5449 bool auxiliary_carry() const { return ((_value >> 4) & 1) != 0; } 5450 bool parity() const { return ((_value >> 2) & 1) != 0; } 5451 bool carry() const { return ((_value >> 0) & 1) != 0; } 5452 5453 void print() const { 5454 // flags 5455 char f[8]; 5456 f[0] = (overflow ()) ? 'O' : '-'; 5457 f[1] = (direction ()) ? 'D' : '-'; 5458 f[2] = (sign ()) ? 'S' : '-'; 5459 f[3] = (zero ()) ? 'Z' : '-'; 5460 f[4] = (auxiliary_carry()) ? 'A' : '-'; 5461 f[5] = (parity ()) ? 'P' : '-'; 5462 f[6] = (carry ()) ? 'C' : '-'; 5463 f[7] = '\x0'; 5464 // output 5465 printf("%08x flags = %s", _value, f); 5466 } 5467 5468 }; 5469 5470 class IU_Register { 5471 public: 5472 int32_t _value; 5473 5474 void print() const { 5475 printf("%08x %11d", _value, _value); 5476 } 5477 5478 }; 5479 5480 class IU_State { 5481 public: 5482 Flag_Register _eflags; 5483 IU_Register _rdi; 5484 IU_Register _rsi; 5485 IU_Register _rbp; 5486 IU_Register _rsp; 5487 IU_Register _rbx; 5488 IU_Register _rdx; 5489 IU_Register _rcx; 5490 IU_Register _rax; 5491 5492 void print() const { 5493 // computation registers 5494 printf("rax, = "); _rax.print(); printf("\n"); 5495 printf("rbx, = "); _rbx.print(); printf("\n"); 5496 printf("rcx = "); _rcx.print(); printf("\n"); 5497 printf("rdx = "); _rdx.print(); printf("\n"); 5498 printf("rdi = "); _rdi.print(); printf("\n"); 5499 printf("rsi = "); _rsi.print(); printf("\n"); 5500 printf("rbp, = "); _rbp.print(); printf("\n"); 5501 printf("rsp = "); _rsp.print(); printf("\n"); 5502 printf("\n"); 5503 // control registers 5504 printf("flgs = "); _eflags.print(); printf("\n"); 5505 } 5506 }; 5507 5508 5509 class CPU_State { 5510 public: 5511 FPU_State _fpu_state; 5512 IU_State _iu_state; 5513 5514 void print() const { 5515 printf("--------------------------------------------------\n"); 5516 _iu_state .print(); 5517 printf("\n"); 5518 _fpu_state.print(); 5519 printf("--------------------------------------------------\n"); 5520 } 5521 5522 }; 5523 5524 5525 static void _print_CPU_state(CPU_State* state) { 5526 state->print(); 5527 }; 5528 5529 5530 void MacroAssembler::print_CPU_state() { 5531 push_CPU_state(); 5532 push(rsp); // pass CPU state 5533 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _print_CPU_state))); 5534 addptr(rsp, wordSize); // discard argument 5535 pop_CPU_state(); 5536 } 5537 5538 5539 #ifndef _LP64 5540 static bool _verify_FPU(int stack_depth, char* s, CPU_State* state) { 5541 static int counter = 0; 5542 FPU_State* fs = &state->_fpu_state; 5543 counter++; 5544 // For leaf calls, only verify that the top few elements remain empty. 5545 // We only need 1 empty at the top for C2 code. 5546 if( stack_depth < 0 ) { 5547 if( fs->tag_for_st(7) != 3 ) { 5548 printf("FPR7 not empty\n"); 5549 state->print(); 5550 assert(false, "error"); 5551 return false; 5552 } 5553 return true; // All other stack states do not matter 5554 } 5555 5556 assert((fs->_control_word._value & 0xffff) == StubRoutines::x86::fpu_cntrl_wrd_std(), 5557 "bad FPU control word"); 5558 5559 // compute stack depth 5560 int i = 0; 5561 while (i < FPU_State::number_of_registers && fs->tag_for_st(i) < 3) i++; 5562 int d = i; 5563 while (i < FPU_State::number_of_registers && fs->tag_for_st(i) == 3) i++; 5564 // verify findings 5565 if (i != FPU_State::number_of_registers) { 5566 // stack not contiguous 5567 printf("%s: stack not contiguous at ST%d\n", s, i); 5568 state->print(); 5569 assert(false, "error"); 5570 return false; 5571 } 5572 // check if computed stack depth corresponds to expected stack depth 5573 if (stack_depth < 0) { 5574 // expected stack depth is -stack_depth or less 5575 if (d > -stack_depth) { 5576 // too many elements on the stack 5577 printf("%s: <= %d stack elements expected but found %d\n", s, -stack_depth, d); 5578 state->print(); 5579 assert(false, "error"); 5580 return false; 5581 } 5582 } else { 5583 // expected stack depth is stack_depth 5584 if (d != stack_depth) { 5585 // wrong stack depth 5586 printf("%s: %d stack elements expected but found %d\n", s, stack_depth, d); 5587 state->print(); 5588 assert(false, "error"); 5589 return false; 5590 } 5591 } 5592 // everything is cool 5593 return true; 5594 } 5595 5596 void MacroAssembler::verify_FPU(int stack_depth, const char* s) { 5597 if (!VerifyFPU) return; 5598 push_CPU_state(); 5599 push(rsp); // pass CPU state 5600 ExternalAddress msg((address) s); 5601 // pass message string s 5602 pushptr(msg.addr(), noreg); 5603 push(stack_depth); // pass stack depth 5604 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _verify_FPU))); 5605 addptr(rsp, 3 * wordSize); // discard arguments 5606 // check for error 5607 { Label L; 5608 testl(rax, rax); 5609 jcc(Assembler::notZero, L); 5610 int3(); // break if error condition 5611 bind(L); 5612 } 5613 pop_CPU_state(); 5614 } 5615 #endif // _LP64 5616 5617 void MacroAssembler::restore_cpu_control_state_after_jni(Register rscratch) { 5618 // Either restore the MXCSR register after returning from the JNI Call 5619 // or verify that it wasn't changed (with -Xcheck:jni flag). 5620 if (VM_Version::supports_sse()) { 5621 if (RestoreMXCSROnJNICalls) { 5622 ldmxcsr(ExternalAddress(StubRoutines::x86::addr_mxcsr_std()), rscratch); 5623 } else if (CheckJNICalls) { 5624 call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry())); 5625 } 5626 } 5627 // Clear upper bits of YMM registers to avoid SSE <-> AVX transition penalty. 5628 vzeroupper(); 5629 5630 #ifndef _LP64 5631 // Either restore the x87 floating pointer control word after returning 5632 // from the JNI call or verify that it wasn't changed. 5633 if (CheckJNICalls) { 5634 call(RuntimeAddress(StubRoutines::x86::verify_fpu_cntrl_wrd_entry())); 5635 } 5636 #endif // _LP64 5637 } 5638 5639 // ((OopHandle)result).resolve(); 5640 void MacroAssembler::resolve_oop_handle(Register result, Register tmp) { 5641 assert_different_registers(result, tmp); 5642 5643 // Only 64 bit platforms support GCs that require a tmp register 5644 // Only IN_HEAP loads require a thread_tmp register 5645 // OopHandle::resolve is an indirection like jobject. 5646 access_load_at(T_OBJECT, IN_NATIVE, 5647 result, Address(result, 0), tmp, /*tmp_thread*/noreg); 5648 } 5649 5650 // ((WeakHandle)result).resolve(); 5651 void MacroAssembler::resolve_weak_handle(Register rresult, Register rtmp) { 5652 assert_different_registers(rresult, rtmp); 5653 Label resolved; 5654 5655 // A null weak handle resolves to null. 5656 cmpptr(rresult, 0); 5657 jcc(Assembler::equal, resolved); 5658 5659 // Only 64 bit platforms support GCs that require a tmp register 5660 // Only IN_HEAP loads require a thread_tmp register 5661 // WeakHandle::resolve is an indirection like jweak. 5662 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 5663 rresult, Address(rresult, 0), rtmp, /*tmp_thread*/noreg); 5664 bind(resolved); 5665 } 5666 5667 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) { 5668 // get mirror 5669 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 5670 load_method_holder(mirror, method); 5671 movptr(mirror, Address(mirror, mirror_offset)); 5672 resolve_oop_handle(mirror, tmp); 5673 } 5674 5675 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) { 5676 load_method_holder(rresult, rmethod); 5677 movptr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset())); 5678 } 5679 5680 void MacroAssembler::load_method_holder(Register holder, Register method) { 5681 movptr(holder, Address(method, Method::const_offset())); // ConstMethod* 5682 movptr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool* 5683 movptr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass* 5684 } 5685 5686 void MacroAssembler::load_klass(Register dst, Register src, Register tmp) { 5687 assert_different_registers(src, tmp); 5688 assert_different_registers(dst, tmp); 5689 #ifdef _LP64 5690 if (UseCompressedClassPointers) { 5691 movl(dst, Address(src, oopDesc::klass_offset_in_bytes())); 5692 decode_klass_not_null(dst, tmp); 5693 } else 5694 #endif 5695 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes())); 5696 } 5697 5698 void MacroAssembler::store_klass(Register dst, Register src, Register tmp) { 5699 assert_different_registers(src, tmp); 5700 assert_different_registers(dst, tmp); 5701 #ifdef _LP64 5702 if (UseCompressedClassPointers) { 5703 encode_klass_not_null(src, tmp); 5704 movl(Address(dst, oopDesc::klass_offset_in_bytes()), src); 5705 } else 5706 #endif 5707 movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src); 5708 } 5709 5710 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src, 5711 Register tmp1, Register thread_tmp) { 5712 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5713 decorators = AccessInternal::decorator_fixup(decorators, type); 5714 bool as_raw = (decorators & AS_RAW) != 0; 5715 if (as_raw) { 5716 bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, thread_tmp); 5717 } else { 5718 bs->load_at(this, decorators, type, dst, src, tmp1, thread_tmp); 5719 } 5720 } 5721 5722 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val, 5723 Register tmp1, Register tmp2, Register tmp3) { 5724 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5725 decorators = AccessInternal::decorator_fixup(decorators, type); 5726 bool as_raw = (decorators & AS_RAW) != 0; 5727 if (as_raw) { 5728 bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 5729 } else { 5730 bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 5731 } 5732 } 5733 5734 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1, 5735 Register thread_tmp, DecoratorSet decorators) { 5736 access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp); 5737 } 5738 5739 // Doesn't do verification, generates fixed size code 5740 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1, 5741 Register thread_tmp, DecoratorSet decorators) { 5742 access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, thread_tmp); 5743 } 5744 5745 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1, 5746 Register tmp2, Register tmp3, DecoratorSet decorators) { 5747 access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3); 5748 } 5749 5750 // Used for storing nulls. 5751 void MacroAssembler::store_heap_oop_null(Address dst) { 5752 access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg); 5753 } 5754 5755 #ifdef _LP64 5756 void MacroAssembler::store_klass_gap(Register dst, Register src) { 5757 if (UseCompressedClassPointers) { 5758 // Store to klass gap in destination 5759 movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src); 5760 } 5761 } 5762 5763 #ifdef ASSERT 5764 void MacroAssembler::verify_heapbase(const char* msg) { 5765 assert (UseCompressedOops, "should be compressed"); 5766 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5767 if (CheckCompressedOops) { 5768 Label ok; 5769 ExternalAddress src2(CompressedOops::base_addr()); 5770 const bool is_src2_reachable = reachable(src2); 5771 if (!is_src2_reachable) { 5772 push(rscratch1); // cmpptr trashes rscratch1 5773 } 5774 cmpptr(r12_heapbase, src2, rscratch1); 5775 jcc(Assembler::equal, ok); 5776 STOP(msg); 5777 bind(ok); 5778 if (!is_src2_reachable) { 5779 pop(rscratch1); 5780 } 5781 } 5782 } 5783 #endif 5784 5785 // Algorithm must match oop.inline.hpp encode_heap_oop. 5786 void MacroAssembler::encode_heap_oop(Register r) { 5787 #ifdef ASSERT 5788 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?"); 5789 #endif 5790 verify_oop_msg(r, "broken oop in encode_heap_oop"); 5791 if (CompressedOops::base() == nullptr) { 5792 if (CompressedOops::shift() != 0) { 5793 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5794 shrq(r, LogMinObjAlignmentInBytes); 5795 } 5796 return; 5797 } 5798 testq(r, r); 5799 cmovq(Assembler::equal, r, r12_heapbase); 5800 subq(r, r12_heapbase); 5801 shrq(r, LogMinObjAlignmentInBytes); 5802 } 5803 5804 void MacroAssembler::encode_heap_oop_not_null(Register r) { 5805 #ifdef ASSERT 5806 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?"); 5807 if (CheckCompressedOops) { 5808 Label ok; 5809 testq(r, r); 5810 jcc(Assembler::notEqual, ok); 5811 STOP("null oop passed to encode_heap_oop_not_null"); 5812 bind(ok); 5813 } 5814 #endif 5815 verify_oop_msg(r, "broken oop in encode_heap_oop_not_null"); 5816 if (CompressedOops::base() != nullptr) { 5817 subq(r, r12_heapbase); 5818 } 5819 if (CompressedOops::shift() != 0) { 5820 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5821 shrq(r, LogMinObjAlignmentInBytes); 5822 } 5823 } 5824 5825 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) { 5826 #ifdef ASSERT 5827 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?"); 5828 if (CheckCompressedOops) { 5829 Label ok; 5830 testq(src, src); 5831 jcc(Assembler::notEqual, ok); 5832 STOP("null oop passed to encode_heap_oop_not_null2"); 5833 bind(ok); 5834 } 5835 #endif 5836 verify_oop_msg(src, "broken oop in encode_heap_oop_not_null2"); 5837 if (dst != src) { 5838 movq(dst, src); 5839 } 5840 if (CompressedOops::base() != nullptr) { 5841 subq(dst, r12_heapbase); 5842 } 5843 if (CompressedOops::shift() != 0) { 5844 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5845 shrq(dst, LogMinObjAlignmentInBytes); 5846 } 5847 } 5848 5849 void MacroAssembler::decode_heap_oop(Register r) { 5850 #ifdef ASSERT 5851 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?"); 5852 #endif 5853 if (CompressedOops::base() == nullptr) { 5854 if (CompressedOops::shift() != 0) { 5855 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5856 shlq(r, LogMinObjAlignmentInBytes); 5857 } 5858 } else { 5859 Label done; 5860 shlq(r, LogMinObjAlignmentInBytes); 5861 jccb(Assembler::equal, done); 5862 addq(r, r12_heapbase); 5863 bind(done); 5864 } 5865 verify_oop_msg(r, "broken oop in decode_heap_oop"); 5866 } 5867 5868 void MacroAssembler::decode_heap_oop_not_null(Register r) { 5869 // Note: it will change flags 5870 assert (UseCompressedOops, "should only be used for compressed headers"); 5871 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5872 // Cannot assert, unverified entry point counts instructions (see .ad file) 5873 // vtableStubs also counts instructions in pd_code_size_limit. 5874 // Also do not verify_oop as this is called by verify_oop. 5875 if (CompressedOops::shift() != 0) { 5876 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5877 shlq(r, LogMinObjAlignmentInBytes); 5878 if (CompressedOops::base() != nullptr) { 5879 addq(r, r12_heapbase); 5880 } 5881 } else { 5882 assert (CompressedOops::base() == nullptr, "sanity"); 5883 } 5884 } 5885 5886 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { 5887 // Note: it will change flags 5888 assert (UseCompressedOops, "should only be used for compressed headers"); 5889 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5890 // Cannot assert, unverified entry point counts instructions (see .ad file) 5891 // vtableStubs also counts instructions in pd_code_size_limit. 5892 // Also do not verify_oop as this is called by verify_oop. 5893 if (CompressedOops::shift() != 0) { 5894 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5895 if (LogMinObjAlignmentInBytes == Address::times_8) { 5896 leaq(dst, Address(r12_heapbase, src, Address::times_8, 0)); 5897 } else { 5898 if (dst != src) { 5899 movq(dst, src); 5900 } 5901 shlq(dst, LogMinObjAlignmentInBytes); 5902 if (CompressedOops::base() != nullptr) { 5903 addq(dst, r12_heapbase); 5904 } 5905 } 5906 } else { 5907 assert (CompressedOops::base() == nullptr, "sanity"); 5908 if (dst != src) { 5909 movq(dst, src); 5910 } 5911 } 5912 } 5913 5914 void MacroAssembler::encode_klass_not_null(Register r, Register tmp) { 5915 assert_different_registers(r, tmp); 5916 if (CompressedKlassPointers::base() != nullptr) { 5917 mov64(tmp, (int64_t)CompressedKlassPointers::base()); 5918 subq(r, tmp); 5919 } 5920 if (CompressedKlassPointers::shift() != 0) { 5921 assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); 5922 shrq(r, LogKlassAlignmentInBytes); 5923 } 5924 } 5925 5926 void MacroAssembler::encode_and_move_klass_not_null(Register dst, Register src) { 5927 assert_different_registers(src, dst); 5928 if (CompressedKlassPointers::base() != nullptr) { 5929 mov64(dst, -(int64_t)CompressedKlassPointers::base()); 5930 addq(dst, src); 5931 } else { 5932 movptr(dst, src); 5933 } 5934 if (CompressedKlassPointers::shift() != 0) { 5935 assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); 5936 shrq(dst, LogKlassAlignmentInBytes); 5937 } 5938 } 5939 5940 void MacroAssembler::decode_klass_not_null(Register r, Register tmp) { 5941 assert_different_registers(r, tmp); 5942 // Note: it will change flags 5943 assert(UseCompressedClassPointers, "should only be used for compressed headers"); 5944 // Cannot assert, unverified entry point counts instructions (see .ad file) 5945 // vtableStubs also counts instructions in pd_code_size_limit. 5946 // Also do not verify_oop as this is called by verify_oop. 5947 if (CompressedKlassPointers::shift() != 0) { 5948 assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); 5949 shlq(r, LogKlassAlignmentInBytes); 5950 } 5951 if (CompressedKlassPointers::base() != nullptr) { 5952 mov64(tmp, (int64_t)CompressedKlassPointers::base()); 5953 addq(r, tmp); 5954 } 5955 } 5956 5957 void MacroAssembler::decode_and_move_klass_not_null(Register dst, Register src) { 5958 assert_different_registers(src, dst); 5959 // Note: it will change flags 5960 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5961 // Cannot assert, unverified entry point counts instructions (see .ad file) 5962 // vtableStubs also counts instructions in pd_code_size_limit. 5963 // Also do not verify_oop as this is called by verify_oop. 5964 5965 if (CompressedKlassPointers::base() == nullptr && 5966 CompressedKlassPointers::shift() == 0) { 5967 // The best case scenario is that there is no base or shift. Then it is already 5968 // a pointer that needs nothing but a register rename. 5969 movl(dst, src); 5970 } else { 5971 if (CompressedKlassPointers::base() != nullptr) { 5972 mov64(dst, (int64_t)CompressedKlassPointers::base()); 5973 } else { 5974 xorq(dst, dst); 5975 } 5976 if (CompressedKlassPointers::shift() != 0) { 5977 assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); 5978 assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?"); 5979 leaq(dst, Address(dst, src, Address::times_8, 0)); 5980 } else { 5981 addq(dst, src); 5982 } 5983 } 5984 } 5985 5986 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) { 5987 assert (UseCompressedOops, "should only be used for compressed headers"); 5988 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5989 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5990 int oop_index = oop_recorder()->find_index(obj); 5991 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5992 mov_narrow_oop(dst, oop_index, rspec); 5993 } 5994 5995 void MacroAssembler::set_narrow_oop(Address dst, jobject obj) { 5996 assert (UseCompressedOops, "should only be used for compressed headers"); 5997 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5998 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5999 int oop_index = oop_recorder()->find_index(obj); 6000 RelocationHolder rspec = oop_Relocation::spec(oop_index); 6001 mov_narrow_oop(dst, oop_index, rspec); 6002 } 6003 6004 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) { 6005 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 6006 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6007 int klass_index = oop_recorder()->find_index(k); 6008 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 6009 mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 6010 } 6011 6012 void MacroAssembler::set_narrow_klass(Address dst, Klass* k) { 6013 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 6014 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6015 int klass_index = oop_recorder()->find_index(k); 6016 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 6017 mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 6018 } 6019 6020 void MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) { 6021 assert (UseCompressedOops, "should only be used for compressed headers"); 6022 assert (Universe::heap() != nullptr, "java heap should be initialized"); 6023 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6024 int oop_index = oop_recorder()->find_index(obj); 6025 RelocationHolder rspec = oop_Relocation::spec(oop_index); 6026 Assembler::cmp_narrow_oop(dst, oop_index, rspec); 6027 } 6028 6029 void MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) { 6030 assert (UseCompressedOops, "should only be used for compressed headers"); 6031 assert (Universe::heap() != nullptr, "java heap should be initialized"); 6032 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6033 int oop_index = oop_recorder()->find_index(obj); 6034 RelocationHolder rspec = oop_Relocation::spec(oop_index); 6035 Assembler::cmp_narrow_oop(dst, oop_index, rspec); 6036 } 6037 6038 void MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) { 6039 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 6040 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6041 int klass_index = oop_recorder()->find_index(k); 6042 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 6043 Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 6044 } 6045 6046 void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) { 6047 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 6048 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6049 int klass_index = oop_recorder()->find_index(k); 6050 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 6051 Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 6052 } 6053 6054 void MacroAssembler::reinit_heapbase() { 6055 if (UseCompressedOops) { 6056 if (Universe::heap() != nullptr) { 6057 if (CompressedOops::base() == nullptr) { 6058 MacroAssembler::xorptr(r12_heapbase, r12_heapbase); 6059 } else { 6060 mov64(r12_heapbase, (int64_t)CompressedOops::base()); 6061 } 6062 } else { 6063 movptr(r12_heapbase, ExternalAddress(CompressedOops::base_addr())); 6064 } 6065 } 6066 } 6067 6068 #endif // _LP64 6069 6070 #if COMPILER2_OR_JVMCI 6071 6072 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM/ZMM registers 6073 void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask) { 6074 // cnt - number of qwords (8-byte words). 6075 // base - start address, qword aligned. 6076 Label L_zero_64_bytes, L_loop, L_sloop, L_tail, L_end; 6077 bool use64byteVector = (MaxVectorSize == 64) && (VM_Version::avx3_threshold() == 0); 6078 if (use64byteVector) { 6079 vpxor(xtmp, xtmp, xtmp, AVX_512bit); 6080 } else if (MaxVectorSize >= 32) { 6081 vpxor(xtmp, xtmp, xtmp, AVX_256bit); 6082 } else { 6083 pxor(xtmp, xtmp); 6084 } 6085 jmp(L_zero_64_bytes); 6086 6087 BIND(L_loop); 6088 if (MaxVectorSize >= 32) { 6089 fill64(base, 0, xtmp, use64byteVector); 6090 } else { 6091 movdqu(Address(base, 0), xtmp); 6092 movdqu(Address(base, 16), xtmp); 6093 movdqu(Address(base, 32), xtmp); 6094 movdqu(Address(base, 48), xtmp); 6095 } 6096 addptr(base, 64); 6097 6098 BIND(L_zero_64_bytes); 6099 subptr(cnt, 8); 6100 jccb(Assembler::greaterEqual, L_loop); 6101 6102 // Copy trailing 64 bytes 6103 if (use64byteVector) { 6104 addptr(cnt, 8); 6105 jccb(Assembler::equal, L_end); 6106 fill64_masked(3, base, 0, xtmp, mask, cnt, rtmp, true); 6107 jmp(L_end); 6108 } else { 6109 addptr(cnt, 4); 6110 jccb(Assembler::less, L_tail); 6111 if (MaxVectorSize >= 32) { 6112 vmovdqu(Address(base, 0), xtmp); 6113 } else { 6114 movdqu(Address(base, 0), xtmp); 6115 movdqu(Address(base, 16), xtmp); 6116 } 6117 } 6118 addptr(base, 32); 6119 subptr(cnt, 4); 6120 6121 BIND(L_tail); 6122 addptr(cnt, 4); 6123 jccb(Assembler::lessEqual, L_end); 6124 if (UseAVX > 2 && MaxVectorSize >= 32 && VM_Version::supports_avx512vl()) { 6125 fill32_masked(3, base, 0, xtmp, mask, cnt, rtmp); 6126 } else { 6127 decrement(cnt); 6128 6129 BIND(L_sloop); 6130 movq(Address(base, 0), xtmp); 6131 addptr(base, 8); 6132 decrement(cnt); 6133 jccb(Assembler::greaterEqual, L_sloop); 6134 } 6135 BIND(L_end); 6136 } 6137 6138 // Clearing constant sized memory using YMM/ZMM registers. 6139 void MacroAssembler::clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask) { 6140 assert(UseAVX > 2 && VM_Version::supports_avx512vl(), ""); 6141 bool use64byteVector = (MaxVectorSize > 32) && (VM_Version::avx3_threshold() == 0); 6142 6143 int vector64_count = (cnt & (~0x7)) >> 3; 6144 cnt = cnt & 0x7; 6145 const int fill64_per_loop = 4; 6146 const int max_unrolled_fill64 = 8; 6147 6148 // 64 byte initialization loop. 6149 vpxor(xtmp, xtmp, xtmp, use64byteVector ? AVX_512bit : AVX_256bit); 6150 int start64 = 0; 6151 if (vector64_count > max_unrolled_fill64) { 6152 Label LOOP; 6153 Register index = rtmp; 6154 6155 start64 = vector64_count - (vector64_count % fill64_per_loop); 6156 6157 movl(index, 0); 6158 BIND(LOOP); 6159 for (int i = 0; i < fill64_per_loop; i++) { 6160 fill64(Address(base, index, Address::times_1, i * 64), xtmp, use64byteVector); 6161 } 6162 addl(index, fill64_per_loop * 64); 6163 cmpl(index, start64 * 64); 6164 jccb(Assembler::less, LOOP); 6165 } 6166 for (int i = start64; i < vector64_count; i++) { 6167 fill64(base, i * 64, xtmp, use64byteVector); 6168 } 6169 6170 // Clear remaining 64 byte tail. 6171 int disp = vector64_count * 64; 6172 if (cnt) { 6173 switch (cnt) { 6174 case 1: 6175 movq(Address(base, disp), xtmp); 6176 break; 6177 case 2: 6178 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_128bit); 6179 break; 6180 case 3: 6181 movl(rtmp, 0x7); 6182 kmovwl(mask, rtmp); 6183 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_256bit); 6184 break; 6185 case 4: 6186 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 6187 break; 6188 case 5: 6189 if (use64byteVector) { 6190 movl(rtmp, 0x1F); 6191 kmovwl(mask, rtmp); 6192 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit); 6193 } else { 6194 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 6195 movq(Address(base, disp + 32), xtmp); 6196 } 6197 break; 6198 case 6: 6199 if (use64byteVector) { 6200 movl(rtmp, 0x3F); 6201 kmovwl(mask, rtmp); 6202 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit); 6203 } else { 6204 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 6205 evmovdqu(T_LONG, k0, Address(base, disp + 32), xtmp, false, Assembler::AVX_128bit); 6206 } 6207 break; 6208 case 7: 6209 if (use64byteVector) { 6210 movl(rtmp, 0x7F); 6211 kmovwl(mask, rtmp); 6212 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit); 6213 } else { 6214 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 6215 movl(rtmp, 0x7); 6216 kmovwl(mask, rtmp); 6217 evmovdqu(T_LONG, mask, Address(base, disp + 32), xtmp, true, Assembler::AVX_256bit); 6218 } 6219 break; 6220 default: 6221 fatal("Unexpected length : %d\n",cnt); 6222 break; 6223 } 6224 } 6225 } 6226 6227 void MacroAssembler::clear_mem(Register base, Register cnt, Register tmp, XMMRegister xtmp, 6228 bool is_large, KRegister mask) { 6229 // cnt - number of qwords (8-byte words). 6230 // base - start address, qword aligned. 6231 // is_large - if optimizers know cnt is larger than InitArrayShortSize 6232 assert(base==rdi, "base register must be edi for rep stos"); 6233 assert(tmp==rax, "tmp register must be eax for rep stos"); 6234 assert(cnt==rcx, "cnt register must be ecx for rep stos"); 6235 assert(InitArrayShortSize % BytesPerLong == 0, 6236 "InitArrayShortSize should be the multiple of BytesPerLong"); 6237 6238 Label DONE; 6239 if (!is_large || !UseXMMForObjInit) { 6240 xorptr(tmp, tmp); 6241 } 6242 6243 if (!is_large) { 6244 Label LOOP, LONG; 6245 cmpptr(cnt, InitArrayShortSize/BytesPerLong); 6246 jccb(Assembler::greater, LONG); 6247 6248 NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM 6249 6250 decrement(cnt); 6251 jccb(Assembler::negative, DONE); // Zero length 6252 6253 // Use individual pointer-sized stores for small counts: 6254 BIND(LOOP); 6255 movptr(Address(base, cnt, Address::times_ptr), tmp); 6256 decrement(cnt); 6257 jccb(Assembler::greaterEqual, LOOP); 6258 jmpb(DONE); 6259 6260 BIND(LONG); 6261 } 6262 6263 // Use longer rep-prefixed ops for non-small counts: 6264 if (UseFastStosb) { 6265 shlptr(cnt, 3); // convert to number of bytes 6266 rep_stosb(); 6267 } else if (UseXMMForObjInit) { 6268 xmm_clear_mem(base, cnt, tmp, xtmp, mask); 6269 } else { 6270 NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM 6271 rep_stos(); 6272 } 6273 6274 BIND(DONE); 6275 } 6276 6277 #endif //COMPILER2_OR_JVMCI 6278 6279 6280 void MacroAssembler::generate_fill(BasicType t, bool aligned, 6281 Register to, Register value, Register count, 6282 Register rtmp, XMMRegister xtmp) { 6283 ShortBranchVerifier sbv(this); 6284 assert_different_registers(to, value, count, rtmp); 6285 Label L_exit; 6286 Label L_fill_2_bytes, L_fill_4_bytes; 6287 6288 #if defined(COMPILER2) && defined(_LP64) 6289 if(MaxVectorSize >=32 && 6290 VM_Version::supports_avx512vlbw() && 6291 VM_Version::supports_bmi2()) { 6292 generate_fill_avx3(t, to, value, count, rtmp, xtmp); 6293 return; 6294 } 6295 #endif 6296 6297 int shift = -1; 6298 switch (t) { 6299 case T_BYTE: 6300 shift = 2; 6301 break; 6302 case T_SHORT: 6303 shift = 1; 6304 break; 6305 case T_INT: 6306 shift = 0; 6307 break; 6308 default: ShouldNotReachHere(); 6309 } 6310 6311 if (t == T_BYTE) { 6312 andl(value, 0xff); 6313 movl(rtmp, value); 6314 shll(rtmp, 8); 6315 orl(value, rtmp); 6316 } 6317 if (t == T_SHORT) { 6318 andl(value, 0xffff); 6319 } 6320 if (t == T_BYTE || t == T_SHORT) { 6321 movl(rtmp, value); 6322 shll(rtmp, 16); 6323 orl(value, rtmp); 6324 } 6325 6326 cmpptr(count, 2<<shift); // Short arrays (< 8 bytes) fill by element 6327 jcc(Assembler::below, L_fill_4_bytes); // use unsigned cmp 6328 if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) { 6329 Label L_skip_align2; 6330 // align source address at 4 bytes address boundary 6331 if (t == T_BYTE) { 6332 Label L_skip_align1; 6333 // One byte misalignment happens only for byte arrays 6334 testptr(to, 1); 6335 jccb(Assembler::zero, L_skip_align1); 6336 movb(Address(to, 0), value); 6337 increment(to); 6338 decrement(count); 6339 BIND(L_skip_align1); 6340 } 6341 // Two bytes misalignment happens only for byte and short (char) arrays 6342 testptr(to, 2); 6343 jccb(Assembler::zero, L_skip_align2); 6344 movw(Address(to, 0), value); 6345 addptr(to, 2); 6346 subptr(count, 1<<(shift-1)); 6347 BIND(L_skip_align2); 6348 } 6349 if (UseSSE < 2) { 6350 Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes; 6351 // Fill 32-byte chunks 6352 subptr(count, 8 << shift); 6353 jcc(Assembler::less, L_check_fill_8_bytes); 6354 align(16); 6355 6356 BIND(L_fill_32_bytes_loop); 6357 6358 for (int i = 0; i < 32; i += 4) { 6359 movl(Address(to, i), value); 6360 } 6361 6362 addptr(to, 32); 6363 subptr(count, 8 << shift); 6364 jcc(Assembler::greaterEqual, L_fill_32_bytes_loop); 6365 BIND(L_check_fill_8_bytes); 6366 addptr(count, 8 << shift); 6367 jccb(Assembler::zero, L_exit); 6368 jmpb(L_fill_8_bytes); 6369 6370 // 6371 // length is too short, just fill qwords 6372 // 6373 BIND(L_fill_8_bytes_loop); 6374 movl(Address(to, 0), value); 6375 movl(Address(to, 4), value); 6376 addptr(to, 8); 6377 BIND(L_fill_8_bytes); 6378 subptr(count, 1 << (shift + 1)); 6379 jcc(Assembler::greaterEqual, L_fill_8_bytes_loop); 6380 // fall through to fill 4 bytes 6381 } else { 6382 Label L_fill_32_bytes; 6383 if (!UseUnalignedLoadStores) { 6384 // align to 8 bytes, we know we are 4 byte aligned to start 6385 testptr(to, 4); 6386 jccb(Assembler::zero, L_fill_32_bytes); 6387 movl(Address(to, 0), value); 6388 addptr(to, 4); 6389 subptr(count, 1<<shift); 6390 } 6391 BIND(L_fill_32_bytes); 6392 { 6393 assert( UseSSE >= 2, "supported cpu only" ); 6394 Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes; 6395 movdl(xtmp, value); 6396 if (UseAVX >= 2 && UseUnalignedLoadStores) { 6397 Label L_check_fill_32_bytes; 6398 if (UseAVX > 2) { 6399 // Fill 64-byte chunks 6400 Label L_fill_64_bytes_loop_avx3, L_check_fill_64_bytes_avx2; 6401 6402 // If number of bytes to fill < VM_Version::avx3_threshold(), perform fill using AVX2 6403 cmpptr(count, VM_Version::avx3_threshold()); 6404 jccb(Assembler::below, L_check_fill_64_bytes_avx2); 6405 6406 vpbroadcastd(xtmp, xtmp, Assembler::AVX_512bit); 6407 6408 subptr(count, 16 << shift); 6409 jccb(Assembler::less, L_check_fill_32_bytes); 6410 align(16); 6411 6412 BIND(L_fill_64_bytes_loop_avx3); 6413 evmovdqul(Address(to, 0), xtmp, Assembler::AVX_512bit); 6414 addptr(to, 64); 6415 subptr(count, 16 << shift); 6416 jcc(Assembler::greaterEqual, L_fill_64_bytes_loop_avx3); 6417 jmpb(L_check_fill_32_bytes); 6418 6419 BIND(L_check_fill_64_bytes_avx2); 6420 } 6421 // Fill 64-byte chunks 6422 Label L_fill_64_bytes_loop; 6423 vpbroadcastd(xtmp, xtmp, Assembler::AVX_256bit); 6424 6425 subptr(count, 16 << shift); 6426 jcc(Assembler::less, L_check_fill_32_bytes); 6427 align(16); 6428 6429 BIND(L_fill_64_bytes_loop); 6430 vmovdqu(Address(to, 0), xtmp); 6431 vmovdqu(Address(to, 32), xtmp); 6432 addptr(to, 64); 6433 subptr(count, 16 << shift); 6434 jcc(Assembler::greaterEqual, L_fill_64_bytes_loop); 6435 6436 BIND(L_check_fill_32_bytes); 6437 addptr(count, 8 << shift); 6438 jccb(Assembler::less, L_check_fill_8_bytes); 6439 vmovdqu(Address(to, 0), xtmp); 6440 addptr(to, 32); 6441 subptr(count, 8 << shift); 6442 6443 BIND(L_check_fill_8_bytes); 6444 // clean upper bits of YMM registers 6445 movdl(xtmp, value); 6446 pshufd(xtmp, xtmp, 0); 6447 } else { 6448 // Fill 32-byte chunks 6449 pshufd(xtmp, xtmp, 0); 6450 6451 subptr(count, 8 << shift); 6452 jcc(Assembler::less, L_check_fill_8_bytes); 6453 align(16); 6454 6455 BIND(L_fill_32_bytes_loop); 6456 6457 if (UseUnalignedLoadStores) { 6458 movdqu(Address(to, 0), xtmp); 6459 movdqu(Address(to, 16), xtmp); 6460 } else { 6461 movq(Address(to, 0), xtmp); 6462 movq(Address(to, 8), xtmp); 6463 movq(Address(to, 16), xtmp); 6464 movq(Address(to, 24), xtmp); 6465 } 6466 6467 addptr(to, 32); 6468 subptr(count, 8 << shift); 6469 jcc(Assembler::greaterEqual, L_fill_32_bytes_loop); 6470 6471 BIND(L_check_fill_8_bytes); 6472 } 6473 addptr(count, 8 << shift); 6474 jccb(Assembler::zero, L_exit); 6475 jmpb(L_fill_8_bytes); 6476 6477 // 6478 // length is too short, just fill qwords 6479 // 6480 BIND(L_fill_8_bytes_loop); 6481 movq(Address(to, 0), xtmp); 6482 addptr(to, 8); 6483 BIND(L_fill_8_bytes); 6484 subptr(count, 1 << (shift + 1)); 6485 jcc(Assembler::greaterEqual, L_fill_8_bytes_loop); 6486 } 6487 } 6488 // fill trailing 4 bytes 6489 BIND(L_fill_4_bytes); 6490 testl(count, 1<<shift); 6491 jccb(Assembler::zero, L_fill_2_bytes); 6492 movl(Address(to, 0), value); 6493 if (t == T_BYTE || t == T_SHORT) { 6494 Label L_fill_byte; 6495 addptr(to, 4); 6496 BIND(L_fill_2_bytes); 6497 // fill trailing 2 bytes 6498 testl(count, 1<<(shift-1)); 6499 jccb(Assembler::zero, L_fill_byte); 6500 movw(Address(to, 0), value); 6501 if (t == T_BYTE) { 6502 addptr(to, 2); 6503 BIND(L_fill_byte); 6504 // fill trailing byte 6505 testl(count, 1); 6506 jccb(Assembler::zero, L_exit); 6507 movb(Address(to, 0), value); 6508 } else { 6509 BIND(L_fill_byte); 6510 } 6511 } else { 6512 BIND(L_fill_2_bytes); 6513 } 6514 BIND(L_exit); 6515 } 6516 6517 void MacroAssembler::evpbroadcast(BasicType type, XMMRegister dst, Register src, int vector_len) { 6518 switch(type) { 6519 case T_BYTE: 6520 case T_BOOLEAN: 6521 evpbroadcastb(dst, src, vector_len); 6522 break; 6523 case T_SHORT: 6524 case T_CHAR: 6525 evpbroadcastw(dst, src, vector_len); 6526 break; 6527 case T_INT: 6528 case T_FLOAT: 6529 evpbroadcastd(dst, src, vector_len); 6530 break; 6531 case T_LONG: 6532 case T_DOUBLE: 6533 evpbroadcastq(dst, src, vector_len); 6534 break; 6535 default: 6536 fatal("Unhandled type : %s", type2name(type)); 6537 break; 6538 } 6539 } 6540 6541 // encode char[] to byte[] in ISO_8859_1 or ASCII 6542 //@IntrinsicCandidate 6543 //private static int implEncodeISOArray(byte[] sa, int sp, 6544 //byte[] da, int dp, int len) { 6545 // int i = 0; 6546 // for (; i < len; i++) { 6547 // char c = StringUTF16.getChar(sa, sp++); 6548 // if (c > '\u00FF') 6549 // break; 6550 // da[dp++] = (byte)c; 6551 // } 6552 // return i; 6553 //} 6554 // 6555 //@IntrinsicCandidate 6556 //private static int implEncodeAsciiArray(char[] sa, int sp, 6557 // byte[] da, int dp, int len) { 6558 // int i = 0; 6559 // for (; i < len; i++) { 6560 // char c = sa[sp++]; 6561 // if (c >= '\u0080') 6562 // break; 6563 // da[dp++] = (byte)c; 6564 // } 6565 // return i; 6566 //} 6567 void MacroAssembler::encode_iso_array(Register src, Register dst, Register len, 6568 XMMRegister tmp1Reg, XMMRegister tmp2Reg, 6569 XMMRegister tmp3Reg, XMMRegister tmp4Reg, 6570 Register tmp5, Register result, bool ascii) { 6571 6572 // rsi: src 6573 // rdi: dst 6574 // rdx: len 6575 // rcx: tmp5 6576 // rax: result 6577 ShortBranchVerifier sbv(this); 6578 assert_different_registers(src, dst, len, tmp5, result); 6579 Label L_done, L_copy_1_char, L_copy_1_char_exit; 6580 6581 int mask = ascii ? 0xff80ff80 : 0xff00ff00; 6582 int short_mask = ascii ? 0xff80 : 0xff00; 6583 6584 // set result 6585 xorl(result, result); 6586 // check for zero length 6587 testl(len, len); 6588 jcc(Assembler::zero, L_done); 6589 6590 movl(result, len); 6591 6592 // Setup pointers 6593 lea(src, Address(src, len, Address::times_2)); // char[] 6594 lea(dst, Address(dst, len, Address::times_1)); // byte[] 6595 negptr(len); 6596 6597 if (UseSSE42Intrinsics || UseAVX >= 2) { 6598 Label L_copy_8_chars, L_copy_8_chars_exit; 6599 Label L_chars_16_check, L_copy_16_chars, L_copy_16_chars_exit; 6600 6601 if (UseAVX >= 2) { 6602 Label L_chars_32_check, L_copy_32_chars, L_copy_32_chars_exit; 6603 movl(tmp5, mask); // create mask to test for Unicode or non-ASCII chars in vector 6604 movdl(tmp1Reg, tmp5); 6605 vpbroadcastd(tmp1Reg, tmp1Reg, Assembler::AVX_256bit); 6606 jmp(L_chars_32_check); 6607 6608 bind(L_copy_32_chars); 6609 vmovdqu(tmp3Reg, Address(src, len, Address::times_2, -64)); 6610 vmovdqu(tmp4Reg, Address(src, len, Address::times_2, -32)); 6611 vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1); 6612 vptest(tmp2Reg, tmp1Reg); // check for Unicode or non-ASCII chars in vector 6613 jccb(Assembler::notZero, L_copy_32_chars_exit); 6614 vpackuswb(tmp3Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1); 6615 vpermq(tmp4Reg, tmp3Reg, 0xD8, /* vector_len */ 1); 6616 vmovdqu(Address(dst, len, Address::times_1, -32), tmp4Reg); 6617 6618 bind(L_chars_32_check); 6619 addptr(len, 32); 6620 jcc(Assembler::lessEqual, L_copy_32_chars); 6621 6622 bind(L_copy_32_chars_exit); 6623 subptr(len, 16); 6624 jccb(Assembler::greater, L_copy_16_chars_exit); 6625 6626 } else if (UseSSE42Intrinsics) { 6627 movl(tmp5, mask); // create mask to test for Unicode or non-ASCII chars in vector 6628 movdl(tmp1Reg, tmp5); 6629 pshufd(tmp1Reg, tmp1Reg, 0); 6630 jmpb(L_chars_16_check); 6631 } 6632 6633 bind(L_copy_16_chars); 6634 if (UseAVX >= 2) { 6635 vmovdqu(tmp2Reg, Address(src, len, Address::times_2, -32)); 6636 vptest(tmp2Reg, tmp1Reg); 6637 jcc(Assembler::notZero, L_copy_16_chars_exit); 6638 vpackuswb(tmp2Reg, tmp2Reg, tmp1Reg, /* vector_len */ 1); 6639 vpermq(tmp3Reg, tmp2Reg, 0xD8, /* vector_len */ 1); 6640 } else { 6641 if (UseAVX > 0) { 6642 movdqu(tmp3Reg, Address(src, len, Address::times_2, -32)); 6643 movdqu(tmp4Reg, Address(src, len, Address::times_2, -16)); 6644 vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 0); 6645 } else { 6646 movdqu(tmp3Reg, Address(src, len, Address::times_2, -32)); 6647 por(tmp2Reg, tmp3Reg); 6648 movdqu(tmp4Reg, Address(src, len, Address::times_2, -16)); 6649 por(tmp2Reg, tmp4Reg); 6650 } 6651 ptest(tmp2Reg, tmp1Reg); // check for Unicode or non-ASCII chars in vector 6652 jccb(Assembler::notZero, L_copy_16_chars_exit); 6653 packuswb(tmp3Reg, tmp4Reg); 6654 } 6655 movdqu(Address(dst, len, Address::times_1, -16), tmp3Reg); 6656 6657 bind(L_chars_16_check); 6658 addptr(len, 16); 6659 jcc(Assembler::lessEqual, L_copy_16_chars); 6660 6661 bind(L_copy_16_chars_exit); 6662 if (UseAVX >= 2) { 6663 // clean upper bits of YMM registers 6664 vpxor(tmp2Reg, tmp2Reg); 6665 vpxor(tmp3Reg, tmp3Reg); 6666 vpxor(tmp4Reg, tmp4Reg); 6667 movdl(tmp1Reg, tmp5); 6668 pshufd(tmp1Reg, tmp1Reg, 0); 6669 } 6670 subptr(len, 8); 6671 jccb(Assembler::greater, L_copy_8_chars_exit); 6672 6673 bind(L_copy_8_chars); 6674 movdqu(tmp3Reg, Address(src, len, Address::times_2, -16)); 6675 ptest(tmp3Reg, tmp1Reg); 6676 jccb(Assembler::notZero, L_copy_8_chars_exit); 6677 packuswb(tmp3Reg, tmp1Reg); 6678 movq(Address(dst, len, Address::times_1, -8), tmp3Reg); 6679 addptr(len, 8); 6680 jccb(Assembler::lessEqual, L_copy_8_chars); 6681 6682 bind(L_copy_8_chars_exit); 6683 subptr(len, 8); 6684 jccb(Assembler::zero, L_done); 6685 } 6686 6687 bind(L_copy_1_char); 6688 load_unsigned_short(tmp5, Address(src, len, Address::times_2, 0)); 6689 testl(tmp5, short_mask); // check if Unicode or non-ASCII char 6690 jccb(Assembler::notZero, L_copy_1_char_exit); 6691 movb(Address(dst, len, Address::times_1, 0), tmp5); 6692 addptr(len, 1); 6693 jccb(Assembler::less, L_copy_1_char); 6694 6695 bind(L_copy_1_char_exit); 6696 addptr(result, len); // len is negative count of not processed elements 6697 6698 bind(L_done); 6699 } 6700 6701 #ifdef _LP64 6702 /** 6703 * Helper for multiply_to_len(). 6704 */ 6705 void MacroAssembler::add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2) { 6706 addq(dest_lo, src1); 6707 adcq(dest_hi, 0); 6708 addq(dest_lo, src2); 6709 adcq(dest_hi, 0); 6710 } 6711 6712 /** 6713 * Multiply 64 bit by 64 bit first loop. 6714 */ 6715 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 6716 Register y, Register y_idx, Register z, 6717 Register carry, Register product, 6718 Register idx, Register kdx) { 6719 // 6720 // jlong carry, x[], y[], z[]; 6721 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 6722 // huge_128 product = y[idx] * x[xstart] + carry; 6723 // z[kdx] = (jlong)product; 6724 // carry = (jlong)(product >>> 64); 6725 // } 6726 // z[xstart] = carry; 6727 // 6728 6729 Label L_first_loop, L_first_loop_exit; 6730 Label L_one_x, L_one_y, L_multiply; 6731 6732 decrementl(xstart); 6733 jcc(Assembler::negative, L_one_x); 6734 6735 movq(x_xstart, Address(x, xstart, Address::times_4, 0)); 6736 rorq(x_xstart, 32); // convert big-endian to little-endian 6737 6738 bind(L_first_loop); 6739 decrementl(idx); 6740 jcc(Assembler::negative, L_first_loop_exit); 6741 decrementl(idx); 6742 jcc(Assembler::negative, L_one_y); 6743 movq(y_idx, Address(y, idx, Address::times_4, 0)); 6744 rorq(y_idx, 32); // convert big-endian to little-endian 6745 bind(L_multiply); 6746 movq(product, x_xstart); 6747 mulq(y_idx); // product(rax) * y_idx -> rdx:rax 6748 addq(product, carry); 6749 adcq(rdx, 0); 6750 subl(kdx, 2); 6751 movl(Address(z, kdx, Address::times_4, 4), product); 6752 shrq(product, 32); 6753 movl(Address(z, kdx, Address::times_4, 0), product); 6754 movq(carry, rdx); 6755 jmp(L_first_loop); 6756 6757 bind(L_one_y); 6758 movl(y_idx, Address(y, 0)); 6759 jmp(L_multiply); 6760 6761 bind(L_one_x); 6762 movl(x_xstart, Address(x, 0)); 6763 jmp(L_first_loop); 6764 6765 bind(L_first_loop_exit); 6766 } 6767 6768 /** 6769 * Multiply 64 bit by 64 bit and add 128 bit. 6770 */ 6771 void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y, Register z, 6772 Register yz_idx, Register idx, 6773 Register carry, Register product, int offset) { 6774 // huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry; 6775 // z[kdx] = (jlong)product; 6776 6777 movq(yz_idx, Address(y, idx, Address::times_4, offset)); 6778 rorq(yz_idx, 32); // convert big-endian to little-endian 6779 movq(product, x_xstart); 6780 mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax) 6781 movq(yz_idx, Address(z, idx, Address::times_4, offset)); 6782 rorq(yz_idx, 32); // convert big-endian to little-endian 6783 6784 add2_with_carry(rdx, product, carry, yz_idx); 6785 6786 movl(Address(z, idx, Address::times_4, offset+4), product); 6787 shrq(product, 32); 6788 movl(Address(z, idx, Address::times_4, offset), product); 6789 6790 } 6791 6792 /** 6793 * Multiply 128 bit by 128 bit. Unrolled inner loop. 6794 */ 6795 void MacroAssembler::multiply_128_x_128_loop(Register x_xstart, Register y, Register z, 6796 Register yz_idx, Register idx, Register jdx, 6797 Register carry, Register product, 6798 Register carry2) { 6799 // jlong carry, x[], y[], z[]; 6800 // int kdx = ystart+1; 6801 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 6802 // huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry; 6803 // z[kdx+idx+1] = (jlong)product; 6804 // jlong carry2 = (jlong)(product >>> 64); 6805 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry2; 6806 // z[kdx+idx] = (jlong)product; 6807 // carry = (jlong)(product >>> 64); 6808 // } 6809 // idx += 2; 6810 // if (idx > 0) { 6811 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry; 6812 // z[kdx+idx] = (jlong)product; 6813 // carry = (jlong)(product >>> 64); 6814 // } 6815 // 6816 6817 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 6818 6819 movl(jdx, idx); 6820 andl(jdx, 0xFFFFFFFC); 6821 shrl(jdx, 2); 6822 6823 bind(L_third_loop); 6824 subl(jdx, 1); 6825 jcc(Assembler::negative, L_third_loop_exit); 6826 subl(idx, 4); 6827 6828 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 8); 6829 movq(carry2, rdx); 6830 6831 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product, 0); 6832 movq(carry, rdx); 6833 jmp(L_third_loop); 6834 6835 bind (L_third_loop_exit); 6836 6837 andl (idx, 0x3); 6838 jcc(Assembler::zero, L_post_third_loop_done); 6839 6840 Label L_check_1; 6841 subl(idx, 2); 6842 jcc(Assembler::negative, L_check_1); 6843 6844 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 0); 6845 movq(carry, rdx); 6846 6847 bind (L_check_1); 6848 addl (idx, 0x2); 6849 andl (idx, 0x1); 6850 subl(idx, 1); 6851 jcc(Assembler::negative, L_post_third_loop_done); 6852 6853 movl(yz_idx, Address(y, idx, Address::times_4, 0)); 6854 movq(product, x_xstart); 6855 mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax) 6856 movl(yz_idx, Address(z, idx, Address::times_4, 0)); 6857 6858 add2_with_carry(rdx, product, yz_idx, carry); 6859 6860 movl(Address(z, idx, Address::times_4, 0), product); 6861 shrq(product, 32); 6862 6863 shlq(rdx, 32); 6864 orq(product, rdx); 6865 movq(carry, product); 6866 6867 bind(L_post_third_loop_done); 6868 } 6869 6870 /** 6871 * Multiply 128 bit by 128 bit using BMI2. Unrolled inner loop. 6872 * 6873 */ 6874 void MacroAssembler::multiply_128_x_128_bmi2_loop(Register y, Register z, 6875 Register carry, Register carry2, 6876 Register idx, Register jdx, 6877 Register yz_idx1, Register yz_idx2, 6878 Register tmp, Register tmp3, Register tmp4) { 6879 assert(UseBMI2Instructions, "should be used only when BMI2 is available"); 6880 6881 // jlong carry, x[], y[], z[]; 6882 // int kdx = ystart+1; 6883 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 6884 // huge_128 tmp3 = (y[idx+1] * rdx) + z[kdx+idx+1] + carry; 6885 // jlong carry2 = (jlong)(tmp3 >>> 64); 6886 // huge_128 tmp4 = (y[idx] * rdx) + z[kdx+idx] + carry2; 6887 // carry = (jlong)(tmp4 >>> 64); 6888 // z[kdx+idx+1] = (jlong)tmp3; 6889 // z[kdx+idx] = (jlong)tmp4; 6890 // } 6891 // idx += 2; 6892 // if (idx > 0) { 6893 // yz_idx1 = (y[idx] * rdx) + z[kdx+idx] + carry; 6894 // z[kdx+idx] = (jlong)yz_idx1; 6895 // carry = (jlong)(yz_idx1 >>> 64); 6896 // } 6897 // 6898 6899 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 6900 6901 movl(jdx, idx); 6902 andl(jdx, 0xFFFFFFFC); 6903 shrl(jdx, 2); 6904 6905 bind(L_third_loop); 6906 subl(jdx, 1); 6907 jcc(Assembler::negative, L_third_loop_exit); 6908 subl(idx, 4); 6909 6910 movq(yz_idx1, Address(y, idx, Address::times_4, 8)); 6911 rorxq(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian 6912 movq(yz_idx2, Address(y, idx, Address::times_4, 0)); 6913 rorxq(yz_idx2, yz_idx2, 32); 6914 6915 mulxq(tmp4, tmp3, yz_idx1); // yz_idx1 * rdx -> tmp4:tmp3 6916 mulxq(carry2, tmp, yz_idx2); // yz_idx2 * rdx -> carry2:tmp 6917 6918 movq(yz_idx1, Address(z, idx, Address::times_4, 8)); 6919 rorxq(yz_idx1, yz_idx1, 32); 6920 movq(yz_idx2, Address(z, idx, Address::times_4, 0)); 6921 rorxq(yz_idx2, yz_idx2, 32); 6922 6923 if (VM_Version::supports_adx()) { 6924 adcxq(tmp3, carry); 6925 adoxq(tmp3, yz_idx1); 6926 6927 adcxq(tmp4, tmp); 6928 adoxq(tmp4, yz_idx2); 6929 6930 movl(carry, 0); // does not affect flags 6931 adcxq(carry2, carry); 6932 adoxq(carry2, carry); 6933 } else { 6934 add2_with_carry(tmp4, tmp3, carry, yz_idx1); 6935 add2_with_carry(carry2, tmp4, tmp, yz_idx2); 6936 } 6937 movq(carry, carry2); 6938 6939 movl(Address(z, idx, Address::times_4, 12), tmp3); 6940 shrq(tmp3, 32); 6941 movl(Address(z, idx, Address::times_4, 8), tmp3); 6942 6943 movl(Address(z, idx, Address::times_4, 4), tmp4); 6944 shrq(tmp4, 32); 6945 movl(Address(z, idx, Address::times_4, 0), tmp4); 6946 6947 jmp(L_third_loop); 6948 6949 bind (L_third_loop_exit); 6950 6951 andl (idx, 0x3); 6952 jcc(Assembler::zero, L_post_third_loop_done); 6953 6954 Label L_check_1; 6955 subl(idx, 2); 6956 jcc(Assembler::negative, L_check_1); 6957 6958 movq(yz_idx1, Address(y, idx, Address::times_4, 0)); 6959 rorxq(yz_idx1, yz_idx1, 32); 6960 mulxq(tmp4, tmp3, yz_idx1); // yz_idx1 * rdx -> tmp4:tmp3 6961 movq(yz_idx2, Address(z, idx, Address::times_4, 0)); 6962 rorxq(yz_idx2, yz_idx2, 32); 6963 6964 add2_with_carry(tmp4, tmp3, carry, yz_idx2); 6965 6966 movl(Address(z, idx, Address::times_4, 4), tmp3); 6967 shrq(tmp3, 32); 6968 movl(Address(z, idx, Address::times_4, 0), tmp3); 6969 movq(carry, tmp4); 6970 6971 bind (L_check_1); 6972 addl (idx, 0x2); 6973 andl (idx, 0x1); 6974 subl(idx, 1); 6975 jcc(Assembler::negative, L_post_third_loop_done); 6976 movl(tmp4, Address(y, idx, Address::times_4, 0)); 6977 mulxq(carry2, tmp3, tmp4); // tmp4 * rdx -> carry2:tmp3 6978 movl(tmp4, Address(z, idx, Address::times_4, 0)); 6979 6980 add2_with_carry(carry2, tmp3, tmp4, carry); 6981 6982 movl(Address(z, idx, Address::times_4, 0), tmp3); 6983 shrq(tmp3, 32); 6984 6985 shlq(carry2, 32); 6986 orq(tmp3, carry2); 6987 movq(carry, tmp3); 6988 6989 bind(L_post_third_loop_done); 6990 } 6991 6992 /** 6993 * Code for BigInteger::multiplyToLen() intrinsic. 6994 * 6995 * rdi: x 6996 * rax: xlen 6997 * rsi: y 6998 * rcx: ylen 6999 * r8: z 7000 * r11: tmp0 7001 * r12: tmp1 7002 * r13: tmp2 7003 * r14: tmp3 7004 * r15: tmp4 7005 * rbx: tmp5 7006 * 7007 */ 7008 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register tmp0, 7009 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5) { 7010 ShortBranchVerifier sbv(this); 7011 assert_different_registers(x, xlen, y, ylen, z, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, rdx); 7012 7013 push(tmp0); 7014 push(tmp1); 7015 push(tmp2); 7016 push(tmp3); 7017 push(tmp4); 7018 push(tmp5); 7019 7020 push(xlen); 7021 7022 const Register idx = tmp1; 7023 const Register kdx = tmp2; 7024 const Register xstart = tmp3; 7025 7026 const Register y_idx = tmp4; 7027 const Register carry = tmp5; 7028 const Register product = xlen; 7029 const Register x_xstart = tmp0; 7030 7031 // First Loop. 7032 // 7033 // final static long LONG_MASK = 0xffffffffL; 7034 // int xstart = xlen - 1; 7035 // int ystart = ylen - 1; 7036 // long carry = 0; 7037 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 7038 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry; 7039 // z[kdx] = (int)product; 7040 // carry = product >>> 32; 7041 // } 7042 // z[xstart] = (int)carry; 7043 // 7044 7045 movl(idx, ylen); // idx = ylen; 7046 lea(kdx, Address(xlen, ylen)); // kdx = xlen+ylen; 7047 xorq(carry, carry); // carry = 0; 7048 7049 Label L_done; 7050 7051 movl(xstart, xlen); 7052 decrementl(xstart); 7053 jcc(Assembler::negative, L_done); 7054 7055 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx); 7056 7057 Label L_second_loop; 7058 testl(kdx, kdx); 7059 jcc(Assembler::zero, L_second_loop); 7060 7061 Label L_carry; 7062 subl(kdx, 1); 7063 jcc(Assembler::zero, L_carry); 7064 7065 movl(Address(z, kdx, Address::times_4, 0), carry); 7066 shrq(carry, 32); 7067 subl(kdx, 1); 7068 7069 bind(L_carry); 7070 movl(Address(z, kdx, Address::times_4, 0), carry); 7071 7072 // Second and third (nested) loops. 7073 // 7074 // for (int i = xstart-1; i >= 0; i--) { // Second loop 7075 // carry = 0; 7076 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop 7077 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) + 7078 // (z[k] & LONG_MASK) + carry; 7079 // z[k] = (int)product; 7080 // carry = product >>> 32; 7081 // } 7082 // z[i] = (int)carry; 7083 // } 7084 // 7085 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx 7086 7087 const Register jdx = tmp1; 7088 7089 bind(L_second_loop); 7090 xorl(carry, carry); // carry = 0; 7091 movl(jdx, ylen); // j = ystart+1 7092 7093 subl(xstart, 1); // i = xstart-1; 7094 jcc(Assembler::negative, L_done); 7095 7096 push (z); 7097 7098 Label L_last_x; 7099 lea(z, Address(z, xstart, Address::times_4, 4)); // z = z + k - j 7100 subl(xstart, 1); // i = xstart-1; 7101 jcc(Assembler::negative, L_last_x); 7102 7103 if (UseBMI2Instructions) { 7104 movq(rdx, Address(x, xstart, Address::times_4, 0)); 7105 rorxq(rdx, rdx, 32); // convert big-endian to little-endian 7106 } else { 7107 movq(x_xstart, Address(x, xstart, Address::times_4, 0)); 7108 rorq(x_xstart, 32); // convert big-endian to little-endian 7109 } 7110 7111 Label L_third_loop_prologue; 7112 bind(L_third_loop_prologue); 7113 7114 push (x); 7115 push (xstart); 7116 push (ylen); 7117 7118 7119 if (UseBMI2Instructions) { 7120 multiply_128_x_128_bmi2_loop(y, z, carry, x, jdx, ylen, product, tmp2, x_xstart, tmp3, tmp4); 7121 } else { // !UseBMI2Instructions 7122 multiply_128_x_128_loop(x_xstart, y, z, y_idx, jdx, ylen, carry, product, x); 7123 } 7124 7125 pop(ylen); 7126 pop(xlen); 7127 pop(x); 7128 pop(z); 7129 7130 movl(tmp3, xlen); 7131 addl(tmp3, 1); 7132 movl(Address(z, tmp3, Address::times_4, 0), carry); 7133 subl(tmp3, 1); 7134 jccb(Assembler::negative, L_done); 7135 7136 shrq(carry, 32); 7137 movl(Address(z, tmp3, Address::times_4, 0), carry); 7138 jmp(L_second_loop); 7139 7140 // Next infrequent code is moved outside loops. 7141 bind(L_last_x); 7142 if (UseBMI2Instructions) { 7143 movl(rdx, Address(x, 0)); 7144 } else { 7145 movl(x_xstart, Address(x, 0)); 7146 } 7147 jmp(L_third_loop_prologue); 7148 7149 bind(L_done); 7150 7151 pop(xlen); 7152 7153 pop(tmp5); 7154 pop(tmp4); 7155 pop(tmp3); 7156 pop(tmp2); 7157 pop(tmp1); 7158 pop(tmp0); 7159 } 7160 7161 void MacroAssembler::vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale, 7162 Register result, Register tmp1, Register tmp2, XMMRegister rymm0, XMMRegister rymm1, XMMRegister rymm2){ 7163 assert(UseSSE42Intrinsics, "SSE4.2 must be enabled."); 7164 Label VECTOR16_LOOP, VECTOR8_LOOP, VECTOR4_LOOP; 7165 Label VECTOR8_TAIL, VECTOR4_TAIL; 7166 Label VECTOR32_NOT_EQUAL, VECTOR16_NOT_EQUAL, VECTOR8_NOT_EQUAL, VECTOR4_NOT_EQUAL; 7167 Label SAME_TILL_END, DONE; 7168 Label BYTES_LOOP, BYTES_TAIL, BYTES_NOT_EQUAL; 7169 7170 //scale is in rcx in both Win64 and Unix 7171 ShortBranchVerifier sbv(this); 7172 7173 shlq(length); 7174 xorq(result, result); 7175 7176 if ((AVX3Threshold == 0) && (UseAVX > 2) && 7177 VM_Version::supports_avx512vlbw()) { 7178 Label VECTOR64_LOOP, VECTOR64_NOT_EQUAL, VECTOR32_TAIL; 7179 7180 cmpq(length, 64); 7181 jcc(Assembler::less, VECTOR32_TAIL); 7182 7183 movq(tmp1, length); 7184 andq(tmp1, 0x3F); // tail count 7185 andq(length, ~(0x3F)); //vector count 7186 7187 bind(VECTOR64_LOOP); 7188 // AVX512 code to compare 64 byte vectors. 7189 evmovdqub(rymm0, Address(obja, result), Assembler::AVX_512bit); 7190 evpcmpeqb(k7, rymm0, Address(objb, result), Assembler::AVX_512bit); 7191 kortestql(k7, k7); 7192 jcc(Assembler::aboveEqual, VECTOR64_NOT_EQUAL); // mismatch 7193 addq(result, 64); 7194 subq(length, 64); 7195 jccb(Assembler::notZero, VECTOR64_LOOP); 7196 7197 //bind(VECTOR64_TAIL); 7198 testq(tmp1, tmp1); 7199 jcc(Assembler::zero, SAME_TILL_END); 7200 7201 //bind(VECTOR64_TAIL); 7202 // AVX512 code to compare up to 63 byte vectors. 7203 mov64(tmp2, 0xFFFFFFFFFFFFFFFF); 7204 shlxq(tmp2, tmp2, tmp1); 7205 notq(tmp2); 7206 kmovql(k3, tmp2); 7207 7208 evmovdqub(rymm0, k3, Address(obja, result), false, Assembler::AVX_512bit); 7209 evpcmpeqb(k7, k3, rymm0, Address(objb, result), Assembler::AVX_512bit); 7210 7211 ktestql(k7, k3); 7212 jcc(Assembler::below, SAME_TILL_END); // not mismatch 7213 7214 bind(VECTOR64_NOT_EQUAL); 7215 kmovql(tmp1, k7); 7216 notq(tmp1); 7217 tzcntq(tmp1, tmp1); 7218 addq(result, tmp1); 7219 shrq(result); 7220 jmp(DONE); 7221 bind(VECTOR32_TAIL); 7222 } 7223 7224 cmpq(length, 8); 7225 jcc(Assembler::equal, VECTOR8_LOOP); 7226 jcc(Assembler::less, VECTOR4_TAIL); 7227 7228 if (UseAVX >= 2) { 7229 Label VECTOR16_TAIL, VECTOR32_LOOP; 7230 7231 cmpq(length, 16); 7232 jcc(Assembler::equal, VECTOR16_LOOP); 7233 jcc(Assembler::less, VECTOR8_LOOP); 7234 7235 cmpq(length, 32); 7236 jccb(Assembler::less, VECTOR16_TAIL); 7237 7238 subq(length, 32); 7239 bind(VECTOR32_LOOP); 7240 vmovdqu(rymm0, Address(obja, result)); 7241 vmovdqu(rymm1, Address(objb, result)); 7242 vpxor(rymm2, rymm0, rymm1, Assembler::AVX_256bit); 7243 vptest(rymm2, rymm2); 7244 jcc(Assembler::notZero, VECTOR32_NOT_EQUAL);//mismatch found 7245 addq(result, 32); 7246 subq(length, 32); 7247 jcc(Assembler::greaterEqual, VECTOR32_LOOP); 7248 addq(length, 32); 7249 jcc(Assembler::equal, SAME_TILL_END); 7250 //falling through if less than 32 bytes left //close the branch here. 7251 7252 bind(VECTOR16_TAIL); 7253 cmpq(length, 16); 7254 jccb(Assembler::less, VECTOR8_TAIL); 7255 bind(VECTOR16_LOOP); 7256 movdqu(rymm0, Address(obja, result)); 7257 movdqu(rymm1, Address(objb, result)); 7258 vpxor(rymm2, rymm0, rymm1, Assembler::AVX_128bit); 7259 ptest(rymm2, rymm2); 7260 jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found 7261 addq(result, 16); 7262 subq(length, 16); 7263 jcc(Assembler::equal, SAME_TILL_END); 7264 //falling through if less than 16 bytes left 7265 } else {//regular intrinsics 7266 7267 cmpq(length, 16); 7268 jccb(Assembler::less, VECTOR8_TAIL); 7269 7270 subq(length, 16); 7271 bind(VECTOR16_LOOP); 7272 movdqu(rymm0, Address(obja, result)); 7273 movdqu(rymm1, Address(objb, result)); 7274 pxor(rymm0, rymm1); 7275 ptest(rymm0, rymm0); 7276 jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found 7277 addq(result, 16); 7278 subq(length, 16); 7279 jccb(Assembler::greaterEqual, VECTOR16_LOOP); 7280 addq(length, 16); 7281 jcc(Assembler::equal, SAME_TILL_END); 7282 //falling through if less than 16 bytes left 7283 } 7284 7285 bind(VECTOR8_TAIL); 7286 cmpq(length, 8); 7287 jccb(Assembler::less, VECTOR4_TAIL); 7288 bind(VECTOR8_LOOP); 7289 movq(tmp1, Address(obja, result)); 7290 movq(tmp2, Address(objb, result)); 7291 xorq(tmp1, tmp2); 7292 testq(tmp1, tmp1); 7293 jcc(Assembler::notZero, VECTOR8_NOT_EQUAL);//mismatch found 7294 addq(result, 8); 7295 subq(length, 8); 7296 jcc(Assembler::equal, SAME_TILL_END); 7297 //falling through if less than 8 bytes left 7298 7299 bind(VECTOR4_TAIL); 7300 cmpq(length, 4); 7301 jccb(Assembler::less, BYTES_TAIL); 7302 bind(VECTOR4_LOOP); 7303 movl(tmp1, Address(obja, result)); 7304 xorl(tmp1, Address(objb, result)); 7305 testl(tmp1, tmp1); 7306 jcc(Assembler::notZero, VECTOR4_NOT_EQUAL);//mismatch found 7307 addq(result, 4); 7308 subq(length, 4); 7309 jcc(Assembler::equal, SAME_TILL_END); 7310 //falling through if less than 4 bytes left 7311 7312 bind(BYTES_TAIL); 7313 bind(BYTES_LOOP); 7314 load_unsigned_byte(tmp1, Address(obja, result)); 7315 load_unsigned_byte(tmp2, Address(objb, result)); 7316 xorl(tmp1, tmp2); 7317 testl(tmp1, tmp1); 7318 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found 7319 decq(length); 7320 jcc(Assembler::zero, SAME_TILL_END); 7321 incq(result); 7322 load_unsigned_byte(tmp1, Address(obja, result)); 7323 load_unsigned_byte(tmp2, Address(objb, result)); 7324 xorl(tmp1, tmp2); 7325 testl(tmp1, tmp1); 7326 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found 7327 decq(length); 7328 jcc(Assembler::zero, SAME_TILL_END); 7329 incq(result); 7330 load_unsigned_byte(tmp1, Address(obja, result)); 7331 load_unsigned_byte(tmp2, Address(objb, result)); 7332 xorl(tmp1, tmp2); 7333 testl(tmp1, tmp1); 7334 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found 7335 jmp(SAME_TILL_END); 7336 7337 if (UseAVX >= 2) { 7338 bind(VECTOR32_NOT_EQUAL); 7339 vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_256bit); 7340 vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_256bit); 7341 vpxor(rymm0, rymm0, rymm2, Assembler::AVX_256bit); 7342 vpmovmskb(tmp1, rymm0); 7343 bsfq(tmp1, tmp1); 7344 addq(result, tmp1); 7345 shrq(result); 7346 jmp(DONE); 7347 } 7348 7349 bind(VECTOR16_NOT_EQUAL); 7350 if (UseAVX >= 2) { 7351 vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_128bit); 7352 vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_128bit); 7353 pxor(rymm0, rymm2); 7354 } else { 7355 pcmpeqb(rymm2, rymm2); 7356 pxor(rymm0, rymm1); 7357 pcmpeqb(rymm0, rymm1); 7358 pxor(rymm0, rymm2); 7359 } 7360 pmovmskb(tmp1, rymm0); 7361 bsfq(tmp1, tmp1); 7362 addq(result, tmp1); 7363 shrq(result); 7364 jmpb(DONE); 7365 7366 bind(VECTOR8_NOT_EQUAL); 7367 bind(VECTOR4_NOT_EQUAL); 7368 bsfq(tmp1, tmp1); 7369 shrq(tmp1, 3); 7370 addq(result, tmp1); 7371 bind(BYTES_NOT_EQUAL); 7372 shrq(result); 7373 jmpb(DONE); 7374 7375 bind(SAME_TILL_END); 7376 mov64(result, -1); 7377 7378 bind(DONE); 7379 } 7380 7381 //Helper functions for square_to_len() 7382 7383 /** 7384 * Store the squares of x[], right shifted one bit (divided by 2) into z[] 7385 * Preserves x and z and modifies rest of the registers. 7386 */ 7387 void MacroAssembler::square_rshift(Register x, Register xlen, Register z, Register tmp1, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 7388 // Perform square and right shift by 1 7389 // Handle odd xlen case first, then for even xlen do the following 7390 // jlong carry = 0; 7391 // for (int j=0, i=0; j < xlen; j+=2, i+=4) { 7392 // huge_128 product = x[j:j+1] * x[j:j+1]; 7393 // z[i:i+1] = (carry << 63) | (jlong)(product >>> 65); 7394 // z[i+2:i+3] = (jlong)(product >>> 1); 7395 // carry = (jlong)product; 7396 // } 7397 7398 xorq(tmp5, tmp5); // carry 7399 xorq(rdxReg, rdxReg); 7400 xorl(tmp1, tmp1); // index for x 7401 xorl(tmp4, tmp4); // index for z 7402 7403 Label L_first_loop, L_first_loop_exit; 7404 7405 testl(xlen, 1); 7406 jccb(Assembler::zero, L_first_loop); //jump if xlen is even 7407 7408 // Square and right shift by 1 the odd element using 32 bit multiply 7409 movl(raxReg, Address(x, tmp1, Address::times_4, 0)); 7410 imulq(raxReg, raxReg); 7411 shrq(raxReg, 1); 7412 adcq(tmp5, 0); 7413 movq(Address(z, tmp4, Address::times_4, 0), raxReg); 7414 incrementl(tmp1); 7415 addl(tmp4, 2); 7416 7417 // Square and right shift by 1 the rest using 64 bit multiply 7418 bind(L_first_loop); 7419 cmpptr(tmp1, xlen); 7420 jccb(Assembler::equal, L_first_loop_exit); 7421 7422 // Square 7423 movq(raxReg, Address(x, tmp1, Address::times_4, 0)); 7424 rorq(raxReg, 32); // convert big-endian to little-endian 7425 mulq(raxReg); // 64-bit multiply rax * rax -> rdx:rax 7426 7427 // Right shift by 1 and save carry 7428 shrq(tmp5, 1); // rdx:rax:tmp5 = (tmp5:rdx:rax) >>> 1 7429 rcrq(rdxReg, 1); 7430 rcrq(raxReg, 1); 7431 adcq(tmp5, 0); 7432 7433 // Store result in z 7434 movq(Address(z, tmp4, Address::times_4, 0), rdxReg); 7435 movq(Address(z, tmp4, Address::times_4, 8), raxReg); 7436 7437 // Update indices for x and z 7438 addl(tmp1, 2); 7439 addl(tmp4, 4); 7440 jmp(L_first_loop); 7441 7442 bind(L_first_loop_exit); 7443 } 7444 7445 7446 /** 7447 * Perform the following multiply add operation using BMI2 instructions 7448 * carry:sum = sum + op1*op2 + carry 7449 * op2 should be in rdx 7450 * op2 is preserved, all other registers are modified 7451 */ 7452 void MacroAssembler::multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, Register tmp2) { 7453 // assert op2 is rdx 7454 mulxq(tmp2, op1, op1); // op1 * op2 -> tmp2:op1 7455 addq(sum, carry); 7456 adcq(tmp2, 0); 7457 addq(sum, op1); 7458 adcq(tmp2, 0); 7459 movq(carry, tmp2); 7460 } 7461 7462 /** 7463 * Perform the following multiply add operation: 7464 * carry:sum = sum + op1*op2 + carry 7465 * Preserves op1, op2 and modifies rest of registers 7466 */ 7467 void MacroAssembler::multiply_add_64(Register sum, Register op1, Register op2, Register carry, Register rdxReg, Register raxReg) { 7468 // rdx:rax = op1 * op2 7469 movq(raxReg, op2); 7470 mulq(op1); 7471 7472 // rdx:rax = sum + carry + rdx:rax 7473 addq(sum, carry); 7474 adcq(rdxReg, 0); 7475 addq(sum, raxReg); 7476 adcq(rdxReg, 0); 7477 7478 // carry:sum = rdx:sum 7479 movq(carry, rdxReg); 7480 } 7481 7482 /** 7483 * Add 64 bit long carry into z[] with carry propagation. 7484 * Preserves z and carry register values and modifies rest of registers. 7485 * 7486 */ 7487 void MacroAssembler::add_one_64(Register z, Register zlen, Register carry, Register tmp1) { 7488 Label L_fourth_loop, L_fourth_loop_exit; 7489 7490 movl(tmp1, 1); 7491 subl(zlen, 2); 7492 addq(Address(z, zlen, Address::times_4, 0), carry); 7493 7494 bind(L_fourth_loop); 7495 jccb(Assembler::carryClear, L_fourth_loop_exit); 7496 subl(zlen, 2); 7497 jccb(Assembler::negative, L_fourth_loop_exit); 7498 addq(Address(z, zlen, Address::times_4, 0), tmp1); 7499 jmp(L_fourth_loop); 7500 bind(L_fourth_loop_exit); 7501 } 7502 7503 /** 7504 * Shift z[] left by 1 bit. 7505 * Preserves x, len, z and zlen registers and modifies rest of the registers. 7506 * 7507 */ 7508 void MacroAssembler::lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4) { 7509 7510 Label L_fifth_loop, L_fifth_loop_exit; 7511 7512 // Fifth loop 7513 // Perform primitiveLeftShift(z, zlen, 1) 7514 7515 const Register prev_carry = tmp1; 7516 const Register new_carry = tmp4; 7517 const Register value = tmp2; 7518 const Register zidx = tmp3; 7519 7520 // int zidx, carry; 7521 // long value; 7522 // carry = 0; 7523 // for (zidx = zlen-2; zidx >=0; zidx -= 2) { 7524 // (carry:value) = (z[i] << 1) | carry ; 7525 // z[i] = value; 7526 // } 7527 7528 movl(zidx, zlen); 7529 xorl(prev_carry, prev_carry); // clear carry flag and prev_carry register 7530 7531 bind(L_fifth_loop); 7532 decl(zidx); // Use decl to preserve carry flag 7533 decl(zidx); 7534 jccb(Assembler::negative, L_fifth_loop_exit); 7535 7536 if (UseBMI2Instructions) { 7537 movq(value, Address(z, zidx, Address::times_4, 0)); 7538 rclq(value, 1); 7539 rorxq(value, value, 32); 7540 movq(Address(z, zidx, Address::times_4, 0), value); // Store back in big endian form 7541 } 7542 else { 7543 // clear new_carry 7544 xorl(new_carry, new_carry); 7545 7546 // Shift z[i] by 1, or in previous carry and save new carry 7547 movq(value, Address(z, zidx, Address::times_4, 0)); 7548 shlq(value, 1); 7549 adcl(new_carry, 0); 7550 7551 orq(value, prev_carry); 7552 rorq(value, 0x20); 7553 movq(Address(z, zidx, Address::times_4, 0), value); // Store back in big endian form 7554 7555 // Set previous carry = new carry 7556 movl(prev_carry, new_carry); 7557 } 7558 jmp(L_fifth_loop); 7559 7560 bind(L_fifth_loop_exit); 7561 } 7562 7563 7564 /** 7565 * Code for BigInteger::squareToLen() intrinsic 7566 * 7567 * rdi: x 7568 * rsi: len 7569 * r8: z 7570 * rcx: zlen 7571 * r12: tmp1 7572 * r13: tmp2 7573 * r14: tmp3 7574 * r15: tmp4 7575 * rbx: tmp5 7576 * 7577 */ 7578 void MacroAssembler::square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 7579 7580 Label L_second_loop, L_second_loop_exit, L_third_loop, L_third_loop_exit, L_last_x, L_multiply; 7581 push(tmp1); 7582 push(tmp2); 7583 push(tmp3); 7584 push(tmp4); 7585 push(tmp5); 7586 7587 // First loop 7588 // Store the squares, right shifted one bit (i.e., divided by 2). 7589 square_rshift(x, len, z, tmp1, tmp3, tmp4, tmp5, rdxReg, raxReg); 7590 7591 // Add in off-diagonal sums. 7592 // 7593 // Second, third (nested) and fourth loops. 7594 // zlen +=2; 7595 // for (int xidx=len-2,zidx=zlen-4; xidx > 0; xidx-=2,zidx-=4) { 7596 // carry = 0; 7597 // long op2 = x[xidx:xidx+1]; 7598 // for (int j=xidx-2,k=zidx; j >= 0; j-=2) { 7599 // k -= 2; 7600 // long op1 = x[j:j+1]; 7601 // long sum = z[k:k+1]; 7602 // carry:sum = multiply_add_64(sum, op1, op2, carry, tmp_regs); 7603 // z[k:k+1] = sum; 7604 // } 7605 // add_one_64(z, k, carry, tmp_regs); 7606 // } 7607 7608 const Register carry = tmp5; 7609 const Register sum = tmp3; 7610 const Register op1 = tmp4; 7611 Register op2 = tmp2; 7612 7613 push(zlen); 7614 push(len); 7615 addl(zlen,2); 7616 bind(L_second_loop); 7617 xorq(carry, carry); 7618 subl(zlen, 4); 7619 subl(len, 2); 7620 push(zlen); 7621 push(len); 7622 cmpl(len, 0); 7623 jccb(Assembler::lessEqual, L_second_loop_exit); 7624 7625 // Multiply an array by one 64 bit long. 7626 if (UseBMI2Instructions) { 7627 op2 = rdxReg; 7628 movq(op2, Address(x, len, Address::times_4, 0)); 7629 rorxq(op2, op2, 32); 7630 } 7631 else { 7632 movq(op2, Address(x, len, Address::times_4, 0)); 7633 rorq(op2, 32); 7634 } 7635 7636 bind(L_third_loop); 7637 decrementl(len); 7638 jccb(Assembler::negative, L_third_loop_exit); 7639 decrementl(len); 7640 jccb(Assembler::negative, L_last_x); 7641 7642 movq(op1, Address(x, len, Address::times_4, 0)); 7643 rorq(op1, 32); 7644 7645 bind(L_multiply); 7646 subl(zlen, 2); 7647 movq(sum, Address(z, zlen, Address::times_4, 0)); 7648 7649 // Multiply 64 bit by 64 bit and add 64 bits lower half and upper 64 bits as carry. 7650 if (UseBMI2Instructions) { 7651 multiply_add_64_bmi2(sum, op1, op2, carry, tmp2); 7652 } 7653 else { 7654 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 7655 } 7656 7657 movq(Address(z, zlen, Address::times_4, 0), sum); 7658 7659 jmp(L_third_loop); 7660 bind(L_third_loop_exit); 7661 7662 // Fourth loop 7663 // Add 64 bit long carry into z with carry propagation. 7664 // Uses offsetted zlen. 7665 add_one_64(z, zlen, carry, tmp1); 7666 7667 pop(len); 7668 pop(zlen); 7669 jmp(L_second_loop); 7670 7671 // Next infrequent code is moved outside loops. 7672 bind(L_last_x); 7673 movl(op1, Address(x, 0)); 7674 jmp(L_multiply); 7675 7676 bind(L_second_loop_exit); 7677 pop(len); 7678 pop(zlen); 7679 pop(len); 7680 pop(zlen); 7681 7682 // Fifth loop 7683 // Shift z left 1 bit. 7684 lshift_by_1(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4); 7685 7686 // z[zlen-1] |= x[len-1] & 1; 7687 movl(tmp3, Address(x, len, Address::times_4, -4)); 7688 andl(tmp3, 1); 7689 orl(Address(z, zlen, Address::times_4, -4), tmp3); 7690 7691 pop(tmp5); 7692 pop(tmp4); 7693 pop(tmp3); 7694 pop(tmp2); 7695 pop(tmp1); 7696 } 7697 7698 /** 7699 * Helper function for mul_add() 7700 * Multiply the in[] by int k and add to out[] starting at offset offs using 7701 * 128 bit by 32 bit multiply and return the carry in tmp5. 7702 * Only quad int aligned length of in[] is operated on in this function. 7703 * k is in rdxReg for BMI2Instructions, for others it is in tmp2. 7704 * This function preserves out, in and k registers. 7705 * len and offset point to the appropriate index in "in" & "out" correspondingly 7706 * tmp5 has the carry. 7707 * other registers are temporary and are modified. 7708 * 7709 */ 7710 void MacroAssembler::mul_add_128_x_32_loop(Register out, Register in, 7711 Register offset, Register len, Register tmp1, Register tmp2, Register tmp3, 7712 Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 7713 7714 Label L_first_loop, L_first_loop_exit; 7715 7716 movl(tmp1, len); 7717 shrl(tmp1, 2); 7718 7719 bind(L_first_loop); 7720 subl(tmp1, 1); 7721 jccb(Assembler::negative, L_first_loop_exit); 7722 7723 subl(len, 4); 7724 subl(offset, 4); 7725 7726 Register op2 = tmp2; 7727 const Register sum = tmp3; 7728 const Register op1 = tmp4; 7729 const Register carry = tmp5; 7730 7731 if (UseBMI2Instructions) { 7732 op2 = rdxReg; 7733 } 7734 7735 movq(op1, Address(in, len, Address::times_4, 8)); 7736 rorq(op1, 32); 7737 movq(sum, Address(out, offset, Address::times_4, 8)); 7738 rorq(sum, 32); 7739 if (UseBMI2Instructions) { 7740 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg); 7741 } 7742 else { 7743 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 7744 } 7745 // Store back in big endian from little endian 7746 rorq(sum, 0x20); 7747 movq(Address(out, offset, Address::times_4, 8), sum); 7748 7749 movq(op1, Address(in, len, Address::times_4, 0)); 7750 rorq(op1, 32); 7751 movq(sum, Address(out, offset, Address::times_4, 0)); 7752 rorq(sum, 32); 7753 if (UseBMI2Instructions) { 7754 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg); 7755 } 7756 else { 7757 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 7758 } 7759 // Store back in big endian from little endian 7760 rorq(sum, 0x20); 7761 movq(Address(out, offset, Address::times_4, 0), sum); 7762 7763 jmp(L_first_loop); 7764 bind(L_first_loop_exit); 7765 } 7766 7767 /** 7768 * Code for BigInteger::mulAdd() intrinsic 7769 * 7770 * rdi: out 7771 * rsi: in 7772 * r11: offs (out.length - offset) 7773 * rcx: len 7774 * r8: k 7775 * r12: tmp1 7776 * r13: tmp2 7777 * r14: tmp3 7778 * r15: tmp4 7779 * rbx: tmp5 7780 * Multiply the in[] by word k and add to out[], return the carry in rax 7781 */ 7782 void MacroAssembler::mul_add(Register out, Register in, Register offs, 7783 Register len, Register k, Register tmp1, Register tmp2, Register tmp3, 7784 Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 7785 7786 Label L_carry, L_last_in, L_done; 7787 7788 // carry = 0; 7789 // for (int j=len-1; j >= 0; j--) { 7790 // long product = (in[j] & LONG_MASK) * kLong + 7791 // (out[offs] & LONG_MASK) + carry; 7792 // out[offs--] = (int)product; 7793 // carry = product >>> 32; 7794 // } 7795 // 7796 push(tmp1); 7797 push(tmp2); 7798 push(tmp3); 7799 push(tmp4); 7800 push(tmp5); 7801 7802 Register op2 = tmp2; 7803 const Register sum = tmp3; 7804 const Register op1 = tmp4; 7805 const Register carry = tmp5; 7806 7807 if (UseBMI2Instructions) { 7808 op2 = rdxReg; 7809 movl(op2, k); 7810 } 7811 else { 7812 movl(op2, k); 7813 } 7814 7815 xorq(carry, carry); 7816 7817 //First loop 7818 7819 //Multiply in[] by k in a 4 way unrolled loop using 128 bit by 32 bit multiply 7820 //The carry is in tmp5 7821 mul_add_128_x_32_loop(out, in, offs, len, tmp1, tmp2, tmp3, tmp4, tmp5, rdxReg, raxReg); 7822 7823 //Multiply the trailing in[] entry using 64 bit by 32 bit, if any 7824 decrementl(len); 7825 jccb(Assembler::negative, L_carry); 7826 decrementl(len); 7827 jccb(Assembler::negative, L_last_in); 7828 7829 movq(op1, Address(in, len, Address::times_4, 0)); 7830 rorq(op1, 32); 7831 7832 subl(offs, 2); 7833 movq(sum, Address(out, offs, Address::times_4, 0)); 7834 rorq(sum, 32); 7835 7836 if (UseBMI2Instructions) { 7837 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg); 7838 } 7839 else { 7840 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 7841 } 7842 7843 // Store back in big endian from little endian 7844 rorq(sum, 0x20); 7845 movq(Address(out, offs, Address::times_4, 0), sum); 7846 7847 testl(len, len); 7848 jccb(Assembler::zero, L_carry); 7849 7850 //Multiply the last in[] entry, if any 7851 bind(L_last_in); 7852 movl(op1, Address(in, 0)); 7853 movl(sum, Address(out, offs, Address::times_4, -4)); 7854 7855 movl(raxReg, k); 7856 mull(op1); //tmp4 * eax -> edx:eax 7857 addl(sum, carry); 7858 adcl(rdxReg, 0); 7859 addl(sum, raxReg); 7860 adcl(rdxReg, 0); 7861 movl(carry, rdxReg); 7862 7863 movl(Address(out, offs, Address::times_4, -4), sum); 7864 7865 bind(L_carry); 7866 //return tmp5/carry as carry in rax 7867 movl(rax, carry); 7868 7869 bind(L_done); 7870 pop(tmp5); 7871 pop(tmp4); 7872 pop(tmp3); 7873 pop(tmp2); 7874 pop(tmp1); 7875 } 7876 #endif 7877 7878 /** 7879 * Emits code to update CRC-32 with a byte value according to constants in table 7880 * 7881 * @param [in,out]crc Register containing the crc. 7882 * @param [in]val Register containing the byte to fold into the CRC. 7883 * @param [in]table Register containing the table of crc constants. 7884 * 7885 * uint32_t crc; 7886 * val = crc_table[(val ^ crc) & 0xFF]; 7887 * crc = val ^ (crc >> 8); 7888 * 7889 */ 7890 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 7891 xorl(val, crc); 7892 andl(val, 0xFF); 7893 shrl(crc, 8); // unsigned shift 7894 xorl(crc, Address(table, val, Address::times_4, 0)); 7895 } 7896 7897 /** 7898 * Fold 128-bit data chunk 7899 */ 7900 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) { 7901 if (UseAVX > 0) { 7902 vpclmulhdq(xtmp, xK, xcrc); // [123:64] 7903 vpclmulldq(xcrc, xK, xcrc); // [63:0] 7904 vpxor(xcrc, xcrc, Address(buf, offset), 0 /* vector_len */); 7905 pxor(xcrc, xtmp); 7906 } else { 7907 movdqa(xtmp, xcrc); 7908 pclmulhdq(xtmp, xK); // [123:64] 7909 pclmulldq(xcrc, xK); // [63:0] 7910 pxor(xcrc, xtmp); 7911 movdqu(xtmp, Address(buf, offset)); 7912 pxor(xcrc, xtmp); 7913 } 7914 } 7915 7916 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf) { 7917 if (UseAVX > 0) { 7918 vpclmulhdq(xtmp, xK, xcrc); 7919 vpclmulldq(xcrc, xK, xcrc); 7920 pxor(xcrc, xbuf); 7921 pxor(xcrc, xtmp); 7922 } else { 7923 movdqa(xtmp, xcrc); 7924 pclmulhdq(xtmp, xK); 7925 pclmulldq(xcrc, xK); 7926 pxor(xcrc, xbuf); 7927 pxor(xcrc, xtmp); 7928 } 7929 } 7930 7931 /** 7932 * 8-bit folds to compute 32-bit CRC 7933 * 7934 * uint64_t xcrc; 7935 * timesXtoThe32[xcrc & 0xFF] ^ (xcrc >> 8); 7936 */ 7937 void MacroAssembler::fold_8bit_crc32(XMMRegister xcrc, Register table, XMMRegister xtmp, Register tmp) { 7938 movdl(tmp, xcrc); 7939 andl(tmp, 0xFF); 7940 movdl(xtmp, Address(table, tmp, Address::times_4, 0)); 7941 psrldq(xcrc, 1); // unsigned shift one byte 7942 pxor(xcrc, xtmp); 7943 } 7944 7945 /** 7946 * uint32_t crc; 7947 * timesXtoThe32[crc & 0xFF] ^ (crc >> 8); 7948 */ 7949 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) { 7950 movl(tmp, crc); 7951 andl(tmp, 0xFF); 7952 shrl(crc, 8); 7953 xorl(crc, Address(table, tmp, Address::times_4, 0)); 7954 } 7955 7956 /** 7957 * @param crc register containing existing CRC (32-bit) 7958 * @param buf register pointing to input byte buffer (byte*) 7959 * @param len register containing number of bytes 7960 * @param table register that will contain address of CRC table 7961 * @param tmp scratch register 7962 */ 7963 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp) { 7964 assert_different_registers(crc, buf, len, table, tmp, rax); 7965 7966 Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned; 7967 Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop; 7968 7969 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 7970 // context for the registers used, where all instructions below are using 128-bit mode 7971 // On EVEX without VL and BW, these instructions will all be AVX. 7972 lea(table, ExternalAddress(StubRoutines::crc_table_addr())); 7973 notl(crc); // ~crc 7974 cmpl(len, 16); 7975 jcc(Assembler::less, L_tail); 7976 7977 // Align buffer to 16 bytes 7978 movl(tmp, buf); 7979 andl(tmp, 0xF); 7980 jccb(Assembler::zero, L_aligned); 7981 subl(tmp, 16); 7982 addl(len, tmp); 7983 7984 align(4); 7985 BIND(L_align_loop); 7986 movsbl(rax, Address(buf, 0)); // load byte with sign extension 7987 update_byte_crc32(crc, rax, table); 7988 increment(buf); 7989 incrementl(tmp); 7990 jccb(Assembler::less, L_align_loop); 7991 7992 BIND(L_aligned); 7993 movl(tmp, len); // save 7994 shrl(len, 4); 7995 jcc(Assembler::zero, L_tail_restore); 7996 7997 // Fold crc into first bytes of vector 7998 movdqa(xmm1, Address(buf, 0)); 7999 movdl(rax, xmm1); 8000 xorl(crc, rax); 8001 if (VM_Version::supports_sse4_1()) { 8002 pinsrd(xmm1, crc, 0); 8003 } else { 8004 pinsrw(xmm1, crc, 0); 8005 shrl(crc, 16); 8006 pinsrw(xmm1, crc, 1); 8007 } 8008 addptr(buf, 16); 8009 subl(len, 4); // len > 0 8010 jcc(Assembler::less, L_fold_tail); 8011 8012 movdqa(xmm2, Address(buf, 0)); 8013 movdqa(xmm3, Address(buf, 16)); 8014 movdqa(xmm4, Address(buf, 32)); 8015 addptr(buf, 48); 8016 subl(len, 3); 8017 jcc(Assembler::lessEqual, L_fold_512b); 8018 8019 // Fold total 512 bits of polynomial on each iteration, 8020 // 128 bits per each of 4 parallel streams. 8021 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 32), rscratch1); 8022 8023 align32(); 8024 BIND(L_fold_512b_loop); 8025 fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0); 8026 fold_128bit_crc32(xmm2, xmm0, xmm5, buf, 16); 8027 fold_128bit_crc32(xmm3, xmm0, xmm5, buf, 32); 8028 fold_128bit_crc32(xmm4, xmm0, xmm5, buf, 48); 8029 addptr(buf, 64); 8030 subl(len, 4); 8031 jcc(Assembler::greater, L_fold_512b_loop); 8032 8033 // Fold 512 bits to 128 bits. 8034 BIND(L_fold_512b); 8035 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16), rscratch1); 8036 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm2); 8037 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm3); 8038 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm4); 8039 8040 // Fold the rest of 128 bits data chunks 8041 BIND(L_fold_tail); 8042 addl(len, 3); 8043 jccb(Assembler::lessEqual, L_fold_128b); 8044 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16), rscratch1); 8045 8046 BIND(L_fold_tail_loop); 8047 fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0); 8048 addptr(buf, 16); 8049 decrementl(len); 8050 jccb(Assembler::greater, L_fold_tail_loop); 8051 8052 // Fold 128 bits in xmm1 down into 32 bits in crc register. 8053 BIND(L_fold_128b); 8054 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr()), rscratch1); 8055 if (UseAVX > 0) { 8056 vpclmulqdq(xmm2, xmm0, xmm1, 0x1); 8057 vpand(xmm3, xmm0, xmm2, 0 /* vector_len */); 8058 vpclmulqdq(xmm0, xmm0, xmm3, 0x1); 8059 } else { 8060 movdqa(xmm2, xmm0); 8061 pclmulqdq(xmm2, xmm1, 0x1); 8062 movdqa(xmm3, xmm0); 8063 pand(xmm3, xmm2); 8064 pclmulqdq(xmm0, xmm3, 0x1); 8065 } 8066 psrldq(xmm1, 8); 8067 psrldq(xmm2, 4); 8068 pxor(xmm0, xmm1); 8069 pxor(xmm0, xmm2); 8070 8071 // 8 8-bit folds to compute 32-bit CRC. 8072 for (int j = 0; j < 4; j++) { 8073 fold_8bit_crc32(xmm0, table, xmm1, rax); 8074 } 8075 movdl(crc, xmm0); // mov 32 bits to general register 8076 for (int j = 0; j < 4; j++) { 8077 fold_8bit_crc32(crc, table, rax); 8078 } 8079 8080 BIND(L_tail_restore); 8081 movl(len, tmp); // restore 8082 BIND(L_tail); 8083 andl(len, 0xf); 8084 jccb(Assembler::zero, L_exit); 8085 8086 // Fold the rest of bytes 8087 align(4); 8088 BIND(L_tail_loop); 8089 movsbl(rax, Address(buf, 0)); // load byte with sign extension 8090 update_byte_crc32(crc, rax, table); 8091 increment(buf); 8092 decrementl(len); 8093 jccb(Assembler::greater, L_tail_loop); 8094 8095 BIND(L_exit); 8096 notl(crc); // ~c 8097 } 8098 8099 #ifdef _LP64 8100 // Helper function for AVX 512 CRC32 8101 // Fold 512-bit data chunks 8102 void MacroAssembler::fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, 8103 Register pos, int offset) { 8104 evmovdquq(xmm3, Address(buf, pos, Address::times_1, offset), Assembler::AVX_512bit); 8105 evpclmulqdq(xtmp, xcrc, xK, 0x10, Assembler::AVX_512bit); // [123:64] 8106 evpclmulqdq(xmm2, xcrc, xK, 0x01, Assembler::AVX_512bit); // [63:0] 8107 evpxorq(xcrc, xtmp, xmm2, Assembler::AVX_512bit /* vector_len */); 8108 evpxorq(xcrc, xcrc, xmm3, Assembler::AVX_512bit /* vector_len */); 8109 } 8110 8111 // Helper function for AVX 512 CRC32 8112 // Compute CRC32 for < 256B buffers 8113 void MacroAssembler::kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register table, Register pos, 8114 Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop, 8115 Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup) { 8116 8117 Label L_less_than_32, L_exact_16_left, L_less_than_16_left; 8118 Label L_less_than_8_left, L_less_than_4_left, L_less_than_2_left, L_zero_left; 8119 Label L_only_less_than_4, L_only_less_than_3, L_only_less_than_2; 8120 8121 // check if there is enough buffer to be able to fold 16B at a time 8122 cmpl(len, 32); 8123 jcc(Assembler::less, L_less_than_32); 8124 8125 // if there is, load the constants 8126 movdqu(xmm10, Address(table, 1 * 16)); //rk1 and rk2 in xmm10 8127 movdl(xmm0, crc); // get the initial crc value 8128 movdqu(xmm7, Address(buf, pos, Address::times_1, 0 * 16)); //load the plaintext 8129 pxor(xmm7, xmm0); 8130 8131 // update the buffer pointer 8132 addl(pos, 16); 8133 //update the counter.subtract 32 instead of 16 to save one instruction from the loop 8134 subl(len, 32); 8135 jmp(L_16B_reduction_loop); 8136 8137 bind(L_less_than_32); 8138 //mov initial crc to the return value. this is necessary for zero - length buffers. 8139 movl(rax, crc); 8140 testl(len, len); 8141 jcc(Assembler::equal, L_cleanup); 8142 8143 movdl(xmm0, crc); //get the initial crc value 8144 8145 cmpl(len, 16); 8146 jcc(Assembler::equal, L_exact_16_left); 8147 jcc(Assembler::less, L_less_than_16_left); 8148 8149 movdqu(xmm7, Address(buf, pos, Address::times_1, 0 * 16)); //load the plaintext 8150 pxor(xmm7, xmm0); //xor the initial crc value 8151 addl(pos, 16); 8152 subl(len, 16); 8153 movdqu(xmm10, Address(table, 1 * 16)); // rk1 and rk2 in xmm10 8154 jmp(L_get_last_two_xmms); 8155 8156 bind(L_less_than_16_left); 8157 //use stack space to load data less than 16 bytes, zero - out the 16B in memory first. 8158 pxor(xmm1, xmm1); 8159 movptr(tmp1, rsp); 8160 movdqu(Address(tmp1, 0 * 16), xmm1); 8161 8162 cmpl(len, 4); 8163 jcc(Assembler::less, L_only_less_than_4); 8164 8165 //backup the counter value 8166 movl(tmp2, len); 8167 cmpl(len, 8); 8168 jcc(Assembler::less, L_less_than_8_left); 8169 8170 //load 8 Bytes 8171 movq(rax, Address(buf, pos, Address::times_1, 0 * 16)); 8172 movq(Address(tmp1, 0 * 16), rax); 8173 addptr(tmp1, 8); 8174 subl(len, 8); 8175 addl(pos, 8); 8176 8177 bind(L_less_than_8_left); 8178 cmpl(len, 4); 8179 jcc(Assembler::less, L_less_than_4_left); 8180 8181 //load 4 Bytes 8182 movl(rax, Address(buf, pos, Address::times_1, 0)); 8183 movl(Address(tmp1, 0 * 16), rax); 8184 addptr(tmp1, 4); 8185 subl(len, 4); 8186 addl(pos, 4); 8187 8188 bind(L_less_than_4_left); 8189 cmpl(len, 2); 8190 jcc(Assembler::less, L_less_than_2_left); 8191 8192 // load 2 Bytes 8193 movw(rax, Address(buf, pos, Address::times_1, 0)); 8194 movl(Address(tmp1, 0 * 16), rax); 8195 addptr(tmp1, 2); 8196 subl(len, 2); 8197 addl(pos, 2); 8198 8199 bind(L_less_than_2_left); 8200 cmpl(len, 1); 8201 jcc(Assembler::less, L_zero_left); 8202 8203 // load 1 Byte 8204 movb(rax, Address(buf, pos, Address::times_1, 0)); 8205 movb(Address(tmp1, 0 * 16), rax); 8206 8207 bind(L_zero_left); 8208 movdqu(xmm7, Address(rsp, 0)); 8209 pxor(xmm7, xmm0); //xor the initial crc value 8210 8211 lea(rax, ExternalAddress(StubRoutines::x86::shuf_table_crc32_avx512_addr())); 8212 movdqu(xmm0, Address(rax, tmp2)); 8213 pshufb(xmm7, xmm0); 8214 jmp(L_128_done); 8215 8216 bind(L_exact_16_left); 8217 movdqu(xmm7, Address(buf, pos, Address::times_1, 0)); 8218 pxor(xmm7, xmm0); //xor the initial crc value 8219 jmp(L_128_done); 8220 8221 bind(L_only_less_than_4); 8222 cmpl(len, 3); 8223 jcc(Assembler::less, L_only_less_than_3); 8224 8225 // load 3 Bytes 8226 movb(rax, Address(buf, pos, Address::times_1, 0)); 8227 movb(Address(tmp1, 0), rax); 8228 8229 movb(rax, Address(buf, pos, Address::times_1, 1)); 8230 movb(Address(tmp1, 1), rax); 8231 8232 movb(rax, Address(buf, pos, Address::times_1, 2)); 8233 movb(Address(tmp1, 2), rax); 8234 8235 movdqu(xmm7, Address(rsp, 0)); 8236 pxor(xmm7, xmm0); //xor the initial crc value 8237 8238 pslldq(xmm7, 0x5); 8239 jmp(L_barrett); 8240 bind(L_only_less_than_3); 8241 cmpl(len, 2); 8242 jcc(Assembler::less, L_only_less_than_2); 8243 8244 // load 2 Bytes 8245 movb(rax, Address(buf, pos, Address::times_1, 0)); 8246 movb(Address(tmp1, 0), rax); 8247 8248 movb(rax, Address(buf, pos, Address::times_1, 1)); 8249 movb(Address(tmp1, 1), rax); 8250 8251 movdqu(xmm7, Address(rsp, 0)); 8252 pxor(xmm7, xmm0); //xor the initial crc value 8253 8254 pslldq(xmm7, 0x6); 8255 jmp(L_barrett); 8256 8257 bind(L_only_less_than_2); 8258 //load 1 Byte 8259 movb(rax, Address(buf, pos, Address::times_1, 0)); 8260 movb(Address(tmp1, 0), rax); 8261 8262 movdqu(xmm7, Address(rsp, 0)); 8263 pxor(xmm7, xmm0); //xor the initial crc value 8264 8265 pslldq(xmm7, 0x7); 8266 } 8267 8268 /** 8269 * Compute CRC32 using AVX512 instructions 8270 * param crc register containing existing CRC (32-bit) 8271 * param buf register pointing to input byte buffer (byte*) 8272 * param len register containing number of bytes 8273 * param table address of crc or crc32c table 8274 * param tmp1 scratch register 8275 * param tmp2 scratch register 8276 * return rax result register 8277 * 8278 * This routine is identical for crc32c with the exception of the precomputed constant 8279 * table which will be passed as the table argument. The calculation steps are 8280 * the same for both variants. 8281 */ 8282 void MacroAssembler::kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2) { 8283 assert_different_registers(crc, buf, len, table, tmp1, tmp2, rax, r12); 8284 8285 Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned; 8286 Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop; 8287 Label L_less_than_256, L_fold_128_B_loop, L_fold_256_B_loop; 8288 Label L_fold_128_B_register, L_final_reduction_for_128, L_16B_reduction_loop; 8289 Label L_128_done, L_get_last_two_xmms, L_barrett, L_cleanup; 8290 8291 const Register pos = r12; 8292 push(r12); 8293 subptr(rsp, 16 * 2 + 8); 8294 8295 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 8296 // context for the registers used, where all instructions below are using 128-bit mode 8297 // On EVEX without VL and BW, these instructions will all be AVX. 8298 movl(pos, 0); 8299 8300 // check if smaller than 256B 8301 cmpl(len, 256); 8302 jcc(Assembler::less, L_less_than_256); 8303 8304 // load the initial crc value 8305 movdl(xmm10, crc); 8306 8307 // receive the initial 64B data, xor the initial crc value 8308 evmovdquq(xmm0, Address(buf, pos, Address::times_1, 0 * 64), Assembler::AVX_512bit); 8309 evmovdquq(xmm4, Address(buf, pos, Address::times_1, 1 * 64), Assembler::AVX_512bit); 8310 evpxorq(xmm0, xmm0, xmm10, Assembler::AVX_512bit); 8311 evbroadcasti32x4(xmm10, Address(table, 2 * 16), Assembler::AVX_512bit); //zmm10 has rk3 and rk4 8312 8313 subl(len, 256); 8314 cmpl(len, 256); 8315 jcc(Assembler::less, L_fold_128_B_loop); 8316 8317 evmovdquq(xmm7, Address(buf, pos, Address::times_1, 2 * 64), Assembler::AVX_512bit); 8318 evmovdquq(xmm8, Address(buf, pos, Address::times_1, 3 * 64), Assembler::AVX_512bit); 8319 evbroadcasti32x4(xmm16, Address(table, 0 * 16), Assembler::AVX_512bit); //zmm16 has rk-1 and rk-2 8320 subl(len, 256); 8321 8322 bind(L_fold_256_B_loop); 8323 addl(pos, 256); 8324 fold512bit_crc32_avx512(xmm0, xmm16, xmm1, buf, pos, 0 * 64); 8325 fold512bit_crc32_avx512(xmm4, xmm16, xmm1, buf, pos, 1 * 64); 8326 fold512bit_crc32_avx512(xmm7, xmm16, xmm1, buf, pos, 2 * 64); 8327 fold512bit_crc32_avx512(xmm8, xmm16, xmm1, buf, pos, 3 * 64); 8328 8329 subl(len, 256); 8330 jcc(Assembler::greaterEqual, L_fold_256_B_loop); 8331 8332 // Fold 256 into 128 8333 addl(pos, 256); 8334 evpclmulqdq(xmm1, xmm0, xmm10, 0x01, Assembler::AVX_512bit); 8335 evpclmulqdq(xmm2, xmm0, xmm10, 0x10, Assembler::AVX_512bit); 8336 vpternlogq(xmm7, 0x96, xmm1, xmm2, Assembler::AVX_512bit); // xor ABC 8337 8338 evpclmulqdq(xmm5, xmm4, xmm10, 0x01, Assembler::AVX_512bit); 8339 evpclmulqdq(xmm6, xmm4, xmm10, 0x10, Assembler::AVX_512bit); 8340 vpternlogq(xmm8, 0x96, xmm5, xmm6, Assembler::AVX_512bit); // xor ABC 8341 8342 evmovdquq(xmm0, xmm7, Assembler::AVX_512bit); 8343 evmovdquq(xmm4, xmm8, Assembler::AVX_512bit); 8344 8345 addl(len, 128); 8346 jmp(L_fold_128_B_register); 8347 8348 // at this section of the code, there is 128 * x + y(0 <= y<128) bytes of buffer.The fold_128_B_loop 8349 // loop will fold 128B at a time until we have 128 + y Bytes of buffer 8350 8351 // fold 128B at a time.This section of the code folds 8 xmm registers in parallel 8352 bind(L_fold_128_B_loop); 8353 addl(pos, 128); 8354 fold512bit_crc32_avx512(xmm0, xmm10, xmm1, buf, pos, 0 * 64); 8355 fold512bit_crc32_avx512(xmm4, xmm10, xmm1, buf, pos, 1 * 64); 8356 8357 subl(len, 128); 8358 jcc(Assembler::greaterEqual, L_fold_128_B_loop); 8359 8360 addl(pos, 128); 8361 8362 // at this point, the buffer pointer is pointing at the last y Bytes of the buffer, where 0 <= y < 128 8363 // the 128B of folded data is in 8 of the xmm registers : xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7 8364 bind(L_fold_128_B_register); 8365 evmovdquq(xmm16, Address(table, 5 * 16), Assembler::AVX_512bit); // multiply by rk9-rk16 8366 evmovdquq(xmm11, Address(table, 9 * 16), Assembler::AVX_512bit); // multiply by rk17-rk20, rk1,rk2, 0,0 8367 evpclmulqdq(xmm1, xmm0, xmm16, 0x01, Assembler::AVX_512bit); 8368 evpclmulqdq(xmm2, xmm0, xmm16, 0x10, Assembler::AVX_512bit); 8369 // save last that has no multiplicand 8370 vextracti64x2(xmm7, xmm4, 3); 8371 8372 evpclmulqdq(xmm5, xmm4, xmm11, 0x01, Assembler::AVX_512bit); 8373 evpclmulqdq(xmm6, xmm4, xmm11, 0x10, Assembler::AVX_512bit); 8374 // Needed later in reduction loop 8375 movdqu(xmm10, Address(table, 1 * 16)); 8376 vpternlogq(xmm1, 0x96, xmm2, xmm5, Assembler::AVX_512bit); // xor ABC 8377 vpternlogq(xmm1, 0x96, xmm6, xmm7, Assembler::AVX_512bit); // xor ABC 8378 8379 // Swap 1,0,3,2 - 01 00 11 10 8380 evshufi64x2(xmm8, xmm1, xmm1, 0x4e, Assembler::AVX_512bit); 8381 evpxorq(xmm8, xmm8, xmm1, Assembler::AVX_256bit); 8382 vextracti128(xmm5, xmm8, 1); 8383 evpxorq(xmm7, xmm5, xmm8, Assembler::AVX_128bit); 8384 8385 // instead of 128, we add 128 - 16 to the loop counter to save 1 instruction from the loop 8386 // instead of a cmp instruction, we use the negative flag with the jl instruction 8387 addl(len, 128 - 16); 8388 jcc(Assembler::less, L_final_reduction_for_128); 8389 8390 bind(L_16B_reduction_loop); 8391 vpclmulqdq(xmm8, xmm7, xmm10, 0x01); 8392 vpclmulqdq(xmm7, xmm7, xmm10, 0x10); 8393 vpxor(xmm7, xmm7, xmm8, Assembler::AVX_128bit); 8394 movdqu(xmm0, Address(buf, pos, Address::times_1, 0 * 16)); 8395 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 8396 addl(pos, 16); 8397 subl(len, 16); 8398 jcc(Assembler::greaterEqual, L_16B_reduction_loop); 8399 8400 bind(L_final_reduction_for_128); 8401 addl(len, 16); 8402 jcc(Assembler::equal, L_128_done); 8403 8404 bind(L_get_last_two_xmms); 8405 movdqu(xmm2, xmm7); 8406 addl(pos, len); 8407 movdqu(xmm1, Address(buf, pos, Address::times_1, -16)); 8408 subl(pos, len); 8409 8410 // get rid of the extra data that was loaded before 8411 // load the shift constant 8412 lea(rax, ExternalAddress(StubRoutines::x86::shuf_table_crc32_avx512_addr())); 8413 movdqu(xmm0, Address(rax, len)); 8414 addl(rax, len); 8415 8416 vpshufb(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 8417 //Change mask to 512 8418 vpxor(xmm0, xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr() + 2 * 16), Assembler::AVX_128bit, tmp2); 8419 vpshufb(xmm2, xmm2, xmm0, Assembler::AVX_128bit); 8420 8421 blendvpb(xmm2, xmm2, xmm1, xmm0, Assembler::AVX_128bit); 8422 vpclmulqdq(xmm8, xmm7, xmm10, 0x01); 8423 vpclmulqdq(xmm7, xmm7, xmm10, 0x10); 8424 vpxor(xmm7, xmm7, xmm8, Assembler::AVX_128bit); 8425 vpxor(xmm7, xmm7, xmm2, Assembler::AVX_128bit); 8426 8427 bind(L_128_done); 8428 // compute crc of a 128-bit value 8429 movdqu(xmm10, Address(table, 3 * 16)); 8430 movdqu(xmm0, xmm7); 8431 8432 // 64b fold 8433 vpclmulqdq(xmm7, xmm7, xmm10, 0x0); 8434 vpsrldq(xmm0, xmm0, 0x8, Assembler::AVX_128bit); 8435 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 8436 8437 // 32b fold 8438 movdqu(xmm0, xmm7); 8439 vpslldq(xmm7, xmm7, 0x4, Assembler::AVX_128bit); 8440 vpclmulqdq(xmm7, xmm7, xmm10, 0x10); 8441 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 8442 jmp(L_barrett); 8443 8444 bind(L_less_than_256); 8445 kernel_crc32_avx512_256B(crc, buf, len, table, pos, tmp1, tmp2, L_barrett, L_16B_reduction_loop, L_get_last_two_xmms, L_128_done, L_cleanup); 8446 8447 //barrett reduction 8448 bind(L_barrett); 8449 vpand(xmm7, xmm7, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr() + 1 * 16), Assembler::AVX_128bit, tmp2); 8450 movdqu(xmm1, xmm7); 8451 movdqu(xmm2, xmm7); 8452 movdqu(xmm10, Address(table, 4 * 16)); 8453 8454 pclmulqdq(xmm7, xmm10, 0x0); 8455 pxor(xmm7, xmm2); 8456 vpand(xmm7, xmm7, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr()), Assembler::AVX_128bit, tmp2); 8457 movdqu(xmm2, xmm7); 8458 pclmulqdq(xmm7, xmm10, 0x10); 8459 pxor(xmm7, xmm2); 8460 pxor(xmm7, xmm1); 8461 pextrd(crc, xmm7, 2); 8462 8463 bind(L_cleanup); 8464 addptr(rsp, 16 * 2 + 8); 8465 pop(r12); 8466 } 8467 8468 // S. Gueron / Information Processing Letters 112 (2012) 184 8469 // Algorithm 4: Computing carry-less multiplication using a precomputed lookup table. 8470 // Input: A 32 bit value B = [byte3, byte2, byte1, byte0]. 8471 // Output: the 64-bit carry-less product of B * CONST 8472 void MacroAssembler::crc32c_ipl_alg4(Register in, uint32_t n, 8473 Register tmp1, Register tmp2, Register tmp3) { 8474 lea(tmp3, ExternalAddress(StubRoutines::crc32c_table_addr())); 8475 if (n > 0) { 8476 addq(tmp3, n * 256 * 8); 8477 } 8478 // Q1 = TABLEExt[n][B & 0xFF]; 8479 movl(tmp1, in); 8480 andl(tmp1, 0x000000FF); 8481 shll(tmp1, 3); 8482 addq(tmp1, tmp3); 8483 movq(tmp1, Address(tmp1, 0)); 8484 8485 // Q2 = TABLEExt[n][B >> 8 & 0xFF]; 8486 movl(tmp2, in); 8487 shrl(tmp2, 8); 8488 andl(tmp2, 0x000000FF); 8489 shll(tmp2, 3); 8490 addq(tmp2, tmp3); 8491 movq(tmp2, Address(tmp2, 0)); 8492 8493 shlq(tmp2, 8); 8494 xorq(tmp1, tmp2); 8495 8496 // Q3 = TABLEExt[n][B >> 16 & 0xFF]; 8497 movl(tmp2, in); 8498 shrl(tmp2, 16); 8499 andl(tmp2, 0x000000FF); 8500 shll(tmp2, 3); 8501 addq(tmp2, tmp3); 8502 movq(tmp2, Address(tmp2, 0)); 8503 8504 shlq(tmp2, 16); 8505 xorq(tmp1, tmp2); 8506 8507 // Q4 = TABLEExt[n][B >> 24 & 0xFF]; 8508 shrl(in, 24); 8509 andl(in, 0x000000FF); 8510 shll(in, 3); 8511 addq(in, tmp3); 8512 movq(in, Address(in, 0)); 8513 8514 shlq(in, 24); 8515 xorq(in, tmp1); 8516 // return Q1 ^ Q2 << 8 ^ Q3 << 16 ^ Q4 << 24; 8517 } 8518 8519 void MacroAssembler::crc32c_pclmulqdq(XMMRegister w_xtmp1, 8520 Register in_out, 8521 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, 8522 XMMRegister w_xtmp2, 8523 Register tmp1, 8524 Register n_tmp2, Register n_tmp3) { 8525 if (is_pclmulqdq_supported) { 8526 movdl(w_xtmp1, in_out); // modified blindly 8527 8528 movl(tmp1, const_or_pre_comp_const_index); 8529 movdl(w_xtmp2, tmp1); 8530 pclmulqdq(w_xtmp1, w_xtmp2, 0); 8531 8532 movdq(in_out, w_xtmp1); 8533 } else { 8534 crc32c_ipl_alg4(in_out, const_or_pre_comp_const_index, tmp1, n_tmp2, n_tmp3); 8535 } 8536 } 8537 8538 // Recombination Alternative 2: No bit-reflections 8539 // T1 = (CRC_A * U1) << 1 8540 // T2 = (CRC_B * U2) << 1 8541 // C1 = T1 >> 32 8542 // C2 = T2 >> 32 8543 // T1 = T1 & 0xFFFFFFFF 8544 // T2 = T2 & 0xFFFFFFFF 8545 // T1 = CRC32(0, T1) 8546 // T2 = CRC32(0, T2) 8547 // C1 = C1 ^ T1 8548 // C2 = C2 ^ T2 8549 // CRC = C1 ^ C2 ^ CRC_C 8550 void MacroAssembler::crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, 8551 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 8552 Register tmp1, Register tmp2, 8553 Register n_tmp3) { 8554 crc32c_pclmulqdq(w_xtmp1, in_out, const_or_pre_comp_const_index_u1, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 8555 crc32c_pclmulqdq(w_xtmp2, in1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 8556 shlq(in_out, 1); 8557 movl(tmp1, in_out); 8558 shrq(in_out, 32); 8559 xorl(tmp2, tmp2); 8560 crc32(tmp2, tmp1, 4); 8561 xorl(in_out, tmp2); // we don't care about upper 32 bit contents here 8562 shlq(in1, 1); 8563 movl(tmp1, in1); 8564 shrq(in1, 32); 8565 xorl(tmp2, tmp2); 8566 crc32(tmp2, tmp1, 4); 8567 xorl(in1, tmp2); 8568 xorl(in_out, in1); 8569 xorl(in_out, in2); 8570 } 8571 8572 // Set N to predefined value 8573 // Subtract from a length of a buffer 8574 // execute in a loop: 8575 // CRC_A = 0xFFFFFFFF, CRC_B = 0, CRC_C = 0 8576 // for i = 1 to N do 8577 // CRC_A = CRC32(CRC_A, A[i]) 8578 // CRC_B = CRC32(CRC_B, B[i]) 8579 // CRC_C = CRC32(CRC_C, C[i]) 8580 // end for 8581 // Recombine 8582 void MacroAssembler::crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, 8583 Register in_out1, Register in_out2, Register in_out3, 8584 Register tmp1, Register tmp2, Register tmp3, 8585 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 8586 Register tmp4, Register tmp5, 8587 Register n_tmp6) { 8588 Label L_processPartitions; 8589 Label L_processPartition; 8590 Label L_exit; 8591 8592 bind(L_processPartitions); 8593 cmpl(in_out1, 3 * size); 8594 jcc(Assembler::less, L_exit); 8595 xorl(tmp1, tmp1); 8596 xorl(tmp2, tmp2); 8597 movq(tmp3, in_out2); 8598 addq(tmp3, size); 8599 8600 bind(L_processPartition); 8601 crc32(in_out3, Address(in_out2, 0), 8); 8602 crc32(tmp1, Address(in_out2, size), 8); 8603 crc32(tmp2, Address(in_out2, size * 2), 8); 8604 addq(in_out2, 8); 8605 cmpq(in_out2, tmp3); 8606 jcc(Assembler::less, L_processPartition); 8607 crc32c_rec_alt2(const_or_pre_comp_const_index_u1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, in_out3, tmp1, tmp2, 8608 w_xtmp1, w_xtmp2, w_xtmp3, 8609 tmp4, tmp5, 8610 n_tmp6); 8611 addq(in_out2, 2 * size); 8612 subl(in_out1, 3 * size); 8613 jmp(L_processPartitions); 8614 8615 bind(L_exit); 8616 } 8617 #else 8618 void MacroAssembler::crc32c_ipl_alg4(Register in_out, uint32_t n, 8619 Register tmp1, Register tmp2, Register tmp3, 8620 XMMRegister xtmp1, XMMRegister xtmp2) { 8621 lea(tmp3, ExternalAddress(StubRoutines::crc32c_table_addr())); 8622 if (n > 0) { 8623 addl(tmp3, n * 256 * 8); 8624 } 8625 // Q1 = TABLEExt[n][B & 0xFF]; 8626 movl(tmp1, in_out); 8627 andl(tmp1, 0x000000FF); 8628 shll(tmp1, 3); 8629 addl(tmp1, tmp3); 8630 movq(xtmp1, Address(tmp1, 0)); 8631 8632 // Q2 = TABLEExt[n][B >> 8 & 0xFF]; 8633 movl(tmp2, in_out); 8634 shrl(tmp2, 8); 8635 andl(tmp2, 0x000000FF); 8636 shll(tmp2, 3); 8637 addl(tmp2, tmp3); 8638 movq(xtmp2, Address(tmp2, 0)); 8639 8640 psllq(xtmp2, 8); 8641 pxor(xtmp1, xtmp2); 8642 8643 // Q3 = TABLEExt[n][B >> 16 & 0xFF]; 8644 movl(tmp2, in_out); 8645 shrl(tmp2, 16); 8646 andl(tmp2, 0x000000FF); 8647 shll(tmp2, 3); 8648 addl(tmp2, tmp3); 8649 movq(xtmp2, Address(tmp2, 0)); 8650 8651 psllq(xtmp2, 16); 8652 pxor(xtmp1, xtmp2); 8653 8654 // Q4 = TABLEExt[n][B >> 24 & 0xFF]; 8655 shrl(in_out, 24); 8656 andl(in_out, 0x000000FF); 8657 shll(in_out, 3); 8658 addl(in_out, tmp3); 8659 movq(xtmp2, Address(in_out, 0)); 8660 8661 psllq(xtmp2, 24); 8662 pxor(xtmp1, xtmp2); // Result in CXMM 8663 // return Q1 ^ Q2 << 8 ^ Q3 << 16 ^ Q4 << 24; 8664 } 8665 8666 void MacroAssembler::crc32c_pclmulqdq(XMMRegister w_xtmp1, 8667 Register in_out, 8668 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, 8669 XMMRegister w_xtmp2, 8670 Register tmp1, 8671 Register n_tmp2, Register n_tmp3) { 8672 if (is_pclmulqdq_supported) { 8673 movdl(w_xtmp1, in_out); 8674 8675 movl(tmp1, const_or_pre_comp_const_index); 8676 movdl(w_xtmp2, tmp1); 8677 pclmulqdq(w_xtmp1, w_xtmp2, 0); 8678 // Keep result in XMM since GPR is 32 bit in length 8679 } else { 8680 crc32c_ipl_alg4(in_out, const_or_pre_comp_const_index, tmp1, n_tmp2, n_tmp3, w_xtmp1, w_xtmp2); 8681 } 8682 } 8683 8684 void MacroAssembler::crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, 8685 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 8686 Register tmp1, Register tmp2, 8687 Register n_tmp3) { 8688 crc32c_pclmulqdq(w_xtmp1, in_out, const_or_pre_comp_const_index_u1, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 8689 crc32c_pclmulqdq(w_xtmp2, in1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 8690 8691 psllq(w_xtmp1, 1); 8692 movdl(tmp1, w_xtmp1); 8693 psrlq(w_xtmp1, 32); 8694 movdl(in_out, w_xtmp1); 8695 8696 xorl(tmp2, tmp2); 8697 crc32(tmp2, tmp1, 4); 8698 xorl(in_out, tmp2); 8699 8700 psllq(w_xtmp2, 1); 8701 movdl(tmp1, w_xtmp2); 8702 psrlq(w_xtmp2, 32); 8703 movdl(in1, w_xtmp2); 8704 8705 xorl(tmp2, tmp2); 8706 crc32(tmp2, tmp1, 4); 8707 xorl(in1, tmp2); 8708 xorl(in_out, in1); 8709 xorl(in_out, in2); 8710 } 8711 8712 void MacroAssembler::crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, 8713 Register in_out1, Register in_out2, Register in_out3, 8714 Register tmp1, Register tmp2, Register tmp3, 8715 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 8716 Register tmp4, Register tmp5, 8717 Register n_tmp6) { 8718 Label L_processPartitions; 8719 Label L_processPartition; 8720 Label L_exit; 8721 8722 bind(L_processPartitions); 8723 cmpl(in_out1, 3 * size); 8724 jcc(Assembler::less, L_exit); 8725 xorl(tmp1, tmp1); 8726 xorl(tmp2, tmp2); 8727 movl(tmp3, in_out2); 8728 addl(tmp3, size); 8729 8730 bind(L_processPartition); 8731 crc32(in_out3, Address(in_out2, 0), 4); 8732 crc32(tmp1, Address(in_out2, size), 4); 8733 crc32(tmp2, Address(in_out2, size*2), 4); 8734 crc32(in_out3, Address(in_out2, 0+4), 4); 8735 crc32(tmp1, Address(in_out2, size+4), 4); 8736 crc32(tmp2, Address(in_out2, size*2+4), 4); 8737 addl(in_out2, 8); 8738 cmpl(in_out2, tmp3); 8739 jcc(Assembler::less, L_processPartition); 8740 8741 push(tmp3); 8742 push(in_out1); 8743 push(in_out2); 8744 tmp4 = tmp3; 8745 tmp5 = in_out1; 8746 n_tmp6 = in_out2; 8747 8748 crc32c_rec_alt2(const_or_pre_comp_const_index_u1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, in_out3, tmp1, tmp2, 8749 w_xtmp1, w_xtmp2, w_xtmp3, 8750 tmp4, tmp5, 8751 n_tmp6); 8752 8753 pop(in_out2); 8754 pop(in_out1); 8755 pop(tmp3); 8756 8757 addl(in_out2, 2 * size); 8758 subl(in_out1, 3 * size); 8759 jmp(L_processPartitions); 8760 8761 bind(L_exit); 8762 } 8763 #endif //LP64 8764 8765 #ifdef _LP64 8766 // Algorithm 2: Pipelined usage of the CRC32 instruction. 8767 // Input: A buffer I of L bytes. 8768 // Output: the CRC32C value of the buffer. 8769 // Notations: 8770 // Write L = 24N + r, with N = floor (L/24). 8771 // r = L mod 24 (0 <= r < 24). 8772 // Consider I as the concatenation of A|B|C|R, where A, B, C, each, 8773 // N quadwords, and R consists of r bytes. 8774 // A[j] = I [8j+7:8j], j= 0, 1, ..., N-1 8775 // B[j] = I [N + 8j+7:N + 8j], j= 0, 1, ..., N-1 8776 // C[j] = I [2N + 8j+7:2N + 8j], j= 0, 1, ..., N-1 8777 // if r > 0 R[j] = I [3N +j], j= 0, 1, ...,r-1 8778 void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, 8779 Register tmp1, Register tmp2, Register tmp3, 8780 Register tmp4, Register tmp5, Register tmp6, 8781 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 8782 bool is_pclmulqdq_supported) { 8783 uint32_t const_or_pre_comp_const_index[CRC32C_NUM_PRECOMPUTED_CONSTANTS]; 8784 Label L_wordByWord; 8785 Label L_byteByByteProlog; 8786 Label L_byteByByte; 8787 Label L_exit; 8788 8789 if (is_pclmulqdq_supported ) { 8790 const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::_crc32c_table_addr; 8791 const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::_crc32c_table_addr+1); 8792 8793 const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 2); 8794 const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 3); 8795 8796 const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 4); 8797 const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 5); 8798 assert((CRC32C_NUM_PRECOMPUTED_CONSTANTS - 1 ) == 5, "Checking whether you declared all of the constants based on the number of \"chunks\""); 8799 } else { 8800 const_or_pre_comp_const_index[0] = 1; 8801 const_or_pre_comp_const_index[1] = 0; 8802 8803 const_or_pre_comp_const_index[2] = 3; 8804 const_or_pre_comp_const_index[3] = 2; 8805 8806 const_or_pre_comp_const_index[4] = 5; 8807 const_or_pre_comp_const_index[5] = 4; 8808 } 8809 crc32c_proc_chunk(CRC32C_HIGH, const_or_pre_comp_const_index[0], const_or_pre_comp_const_index[1], is_pclmulqdq_supported, 8810 in2, in1, in_out, 8811 tmp1, tmp2, tmp3, 8812 w_xtmp1, w_xtmp2, w_xtmp3, 8813 tmp4, tmp5, 8814 tmp6); 8815 crc32c_proc_chunk(CRC32C_MIDDLE, const_or_pre_comp_const_index[2], const_or_pre_comp_const_index[3], is_pclmulqdq_supported, 8816 in2, in1, in_out, 8817 tmp1, tmp2, tmp3, 8818 w_xtmp1, w_xtmp2, w_xtmp3, 8819 tmp4, tmp5, 8820 tmp6); 8821 crc32c_proc_chunk(CRC32C_LOW, const_or_pre_comp_const_index[4], const_or_pre_comp_const_index[5], is_pclmulqdq_supported, 8822 in2, in1, in_out, 8823 tmp1, tmp2, tmp3, 8824 w_xtmp1, w_xtmp2, w_xtmp3, 8825 tmp4, tmp5, 8826 tmp6); 8827 movl(tmp1, in2); 8828 andl(tmp1, 0x00000007); 8829 negl(tmp1); 8830 addl(tmp1, in2); 8831 addq(tmp1, in1); 8832 8833 cmpq(in1, tmp1); 8834 jccb(Assembler::greaterEqual, L_byteByByteProlog); 8835 align(16); 8836 BIND(L_wordByWord); 8837 crc32(in_out, Address(in1, 0), 8); 8838 addq(in1, 8); 8839 cmpq(in1, tmp1); 8840 jcc(Assembler::less, L_wordByWord); 8841 8842 BIND(L_byteByByteProlog); 8843 andl(in2, 0x00000007); 8844 movl(tmp2, 1); 8845 8846 cmpl(tmp2, in2); 8847 jccb(Assembler::greater, L_exit); 8848 BIND(L_byteByByte); 8849 crc32(in_out, Address(in1, 0), 1); 8850 incq(in1); 8851 incl(tmp2); 8852 cmpl(tmp2, in2); 8853 jcc(Assembler::lessEqual, L_byteByByte); 8854 8855 BIND(L_exit); 8856 } 8857 #else 8858 void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, 8859 Register tmp1, Register tmp2, Register tmp3, 8860 Register tmp4, Register tmp5, Register tmp6, 8861 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 8862 bool is_pclmulqdq_supported) { 8863 uint32_t const_or_pre_comp_const_index[CRC32C_NUM_PRECOMPUTED_CONSTANTS]; 8864 Label L_wordByWord; 8865 Label L_byteByByteProlog; 8866 Label L_byteByByte; 8867 Label L_exit; 8868 8869 if (is_pclmulqdq_supported) { 8870 const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::_crc32c_table_addr; 8871 const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 1); 8872 8873 const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 2); 8874 const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 3); 8875 8876 const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 4); 8877 const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 5); 8878 } else { 8879 const_or_pre_comp_const_index[0] = 1; 8880 const_or_pre_comp_const_index[1] = 0; 8881 8882 const_or_pre_comp_const_index[2] = 3; 8883 const_or_pre_comp_const_index[3] = 2; 8884 8885 const_or_pre_comp_const_index[4] = 5; 8886 const_or_pre_comp_const_index[5] = 4; 8887 } 8888 crc32c_proc_chunk(CRC32C_HIGH, const_or_pre_comp_const_index[0], const_or_pre_comp_const_index[1], is_pclmulqdq_supported, 8889 in2, in1, in_out, 8890 tmp1, tmp2, tmp3, 8891 w_xtmp1, w_xtmp2, w_xtmp3, 8892 tmp4, tmp5, 8893 tmp6); 8894 crc32c_proc_chunk(CRC32C_MIDDLE, const_or_pre_comp_const_index[2], const_or_pre_comp_const_index[3], is_pclmulqdq_supported, 8895 in2, in1, in_out, 8896 tmp1, tmp2, tmp3, 8897 w_xtmp1, w_xtmp2, w_xtmp3, 8898 tmp4, tmp5, 8899 tmp6); 8900 crc32c_proc_chunk(CRC32C_LOW, const_or_pre_comp_const_index[4], const_or_pre_comp_const_index[5], is_pclmulqdq_supported, 8901 in2, in1, in_out, 8902 tmp1, tmp2, tmp3, 8903 w_xtmp1, w_xtmp2, w_xtmp3, 8904 tmp4, tmp5, 8905 tmp6); 8906 movl(tmp1, in2); 8907 andl(tmp1, 0x00000007); 8908 negl(tmp1); 8909 addl(tmp1, in2); 8910 addl(tmp1, in1); 8911 8912 BIND(L_wordByWord); 8913 cmpl(in1, tmp1); 8914 jcc(Assembler::greaterEqual, L_byteByByteProlog); 8915 crc32(in_out, Address(in1,0), 4); 8916 addl(in1, 4); 8917 jmp(L_wordByWord); 8918 8919 BIND(L_byteByByteProlog); 8920 andl(in2, 0x00000007); 8921 movl(tmp2, 1); 8922 8923 BIND(L_byteByByte); 8924 cmpl(tmp2, in2); 8925 jccb(Assembler::greater, L_exit); 8926 movb(tmp1, Address(in1, 0)); 8927 crc32(in_out, tmp1, 1); 8928 incl(in1); 8929 incl(tmp2); 8930 jmp(L_byteByByte); 8931 8932 BIND(L_exit); 8933 } 8934 #endif // LP64 8935 #undef BIND 8936 #undef BLOCK_COMMENT 8937 8938 // Compress char[] array to byte[]. 8939 // Intrinsic for java.lang.StringUTF16.compress(char[] src, int srcOff, byte[] dst, int dstOff, int len) 8940 // Return the array length if every element in array can be encoded, 8941 // otherwise, the index of first non-latin1 (> 0xff) character. 8942 // @IntrinsicCandidate 8943 // public static int compress(char[] src, int srcOff, byte[] dst, int dstOff, int len) { 8944 // for (int i = 0; i < len; i++) { 8945 // char c = src[srcOff]; 8946 // if (c > 0xff) { 8947 // return i; // return index of non-latin1 char 8948 // } 8949 // dst[dstOff] = (byte)c; 8950 // srcOff++; 8951 // dstOff++; 8952 // } 8953 // return len; 8954 // } 8955 void MacroAssembler::char_array_compress(Register src, Register dst, Register len, 8956 XMMRegister tmp1Reg, XMMRegister tmp2Reg, 8957 XMMRegister tmp3Reg, XMMRegister tmp4Reg, 8958 Register tmp5, Register result, KRegister mask1, KRegister mask2) { 8959 Label copy_chars_loop, done, reset_sp, copy_tail; 8960 8961 // rsi: src 8962 // rdi: dst 8963 // rdx: len 8964 // rcx: tmp5 8965 // rax: result 8966 8967 // rsi holds start addr of source char[] to be compressed 8968 // rdi holds start addr of destination byte[] 8969 // rdx holds length 8970 8971 assert(len != result, ""); 8972 8973 // save length for return 8974 movl(result, len); 8975 8976 if ((AVX3Threshold == 0) && (UseAVX > 2) && // AVX512 8977 VM_Version::supports_avx512vlbw() && 8978 VM_Version::supports_bmi2()) { 8979 8980 Label copy_32_loop, copy_loop_tail, below_threshold, reset_for_copy_tail; 8981 8982 // alignment 8983 Label post_alignment; 8984 8985 // if length of the string is less than 32, handle it the old fashioned way 8986 testl(len, -32); 8987 jcc(Assembler::zero, below_threshold); 8988 8989 // First check whether a character is compressible ( <= 0xFF). 8990 // Create mask to test for Unicode chars inside zmm vector 8991 movl(tmp5, 0x00FF); 8992 evpbroadcastw(tmp2Reg, tmp5, Assembler::AVX_512bit); 8993 8994 testl(len, -64); 8995 jccb(Assembler::zero, post_alignment); 8996 8997 movl(tmp5, dst); 8998 andl(tmp5, (32 - 1)); 8999 negl(tmp5); 9000 andl(tmp5, (32 - 1)); 9001 9002 // bail out when there is nothing to be done 9003 testl(tmp5, 0xFFFFFFFF); 9004 jccb(Assembler::zero, post_alignment); 9005 9006 // ~(~0 << len), where len is the # of remaining elements to process 9007 movl(len, 0xFFFFFFFF); 9008 shlxl(len, len, tmp5); 9009 notl(len); 9010 kmovdl(mask2, len); 9011 movl(len, result); 9012 9013 evmovdquw(tmp1Reg, mask2, Address(src, 0), /*merge*/ false, Assembler::AVX_512bit); 9014 evpcmpw(mask1, mask2, tmp1Reg, tmp2Reg, Assembler::le, /*signed*/ false, Assembler::AVX_512bit); 9015 ktestd(mask1, mask2); 9016 jcc(Assembler::carryClear, copy_tail); 9017 9018 evpmovwb(Address(dst, 0), mask2, tmp1Reg, Assembler::AVX_512bit); 9019 9020 addptr(src, tmp5); 9021 addptr(src, tmp5); 9022 addptr(dst, tmp5); 9023 subl(len, tmp5); 9024 9025 bind(post_alignment); 9026 // end of alignment 9027 9028 movl(tmp5, len); 9029 andl(tmp5, (32 - 1)); // tail count (in chars) 9030 andl(len, ~(32 - 1)); // vector count (in chars) 9031 jccb(Assembler::zero, copy_loop_tail); 9032 9033 lea(src, Address(src, len, Address::times_2)); 9034 lea(dst, Address(dst, len, Address::times_1)); 9035 negptr(len); 9036 9037 bind(copy_32_loop); 9038 evmovdquw(tmp1Reg, Address(src, len, Address::times_2), Assembler::AVX_512bit); 9039 evpcmpuw(mask1, tmp1Reg, tmp2Reg, Assembler::le, Assembler::AVX_512bit); 9040 kortestdl(mask1, mask1); 9041 jccb(Assembler::carryClear, reset_for_copy_tail); 9042 9043 // All elements in current processed chunk are valid candidates for 9044 // compression. Write a truncated byte elements to the memory. 9045 evpmovwb(Address(dst, len, Address::times_1), tmp1Reg, Assembler::AVX_512bit); 9046 addptr(len, 32); 9047 jccb(Assembler::notZero, copy_32_loop); 9048 9049 bind(copy_loop_tail); 9050 // bail out when there is nothing to be done 9051 testl(tmp5, 0xFFFFFFFF); 9052 jcc(Assembler::zero, done); 9053 9054 movl(len, tmp5); 9055 9056 // ~(~0 << len), where len is the # of remaining elements to process 9057 movl(tmp5, 0xFFFFFFFF); 9058 shlxl(tmp5, tmp5, len); 9059 notl(tmp5); 9060 9061 kmovdl(mask2, tmp5); 9062 9063 evmovdquw(tmp1Reg, mask2, Address(src, 0), /*merge*/ false, Assembler::AVX_512bit); 9064 evpcmpw(mask1, mask2, tmp1Reg, tmp2Reg, Assembler::le, /*signed*/ false, Assembler::AVX_512bit); 9065 ktestd(mask1, mask2); 9066 jcc(Assembler::carryClear, copy_tail); 9067 9068 evpmovwb(Address(dst, 0), mask2, tmp1Reg, Assembler::AVX_512bit); 9069 jmp(done); 9070 9071 bind(reset_for_copy_tail); 9072 lea(src, Address(src, tmp5, Address::times_2)); 9073 lea(dst, Address(dst, tmp5, Address::times_1)); 9074 subptr(len, tmp5); 9075 jmp(copy_chars_loop); 9076 9077 bind(below_threshold); 9078 } 9079 9080 if (UseSSE42Intrinsics) { 9081 Label copy_32_loop, copy_16, copy_tail_sse, reset_for_copy_tail; 9082 9083 // vectored compression 9084 testl(len, 0xfffffff8); 9085 jcc(Assembler::zero, copy_tail); 9086 9087 movl(tmp5, 0xff00ff00); // create mask to test for Unicode chars in vectors 9088 movdl(tmp1Reg, tmp5); 9089 pshufd(tmp1Reg, tmp1Reg, 0); // store Unicode mask in tmp1Reg 9090 9091 andl(len, 0xfffffff0); 9092 jccb(Assembler::zero, copy_16); 9093 9094 // compress 16 chars per iter 9095 pxor(tmp4Reg, tmp4Reg); 9096 9097 lea(src, Address(src, len, Address::times_2)); 9098 lea(dst, Address(dst, len, Address::times_1)); 9099 negptr(len); 9100 9101 bind(copy_32_loop); 9102 movdqu(tmp2Reg, Address(src, len, Address::times_2)); // load 1st 8 characters 9103 por(tmp4Reg, tmp2Reg); 9104 movdqu(tmp3Reg, Address(src, len, Address::times_2, 16)); // load next 8 characters 9105 por(tmp4Reg, tmp3Reg); 9106 ptest(tmp4Reg, tmp1Reg); // check for Unicode chars in next vector 9107 jccb(Assembler::notZero, reset_for_copy_tail); 9108 packuswb(tmp2Reg, tmp3Reg); // only ASCII chars; compress each to 1 byte 9109 movdqu(Address(dst, len, Address::times_1), tmp2Reg); 9110 addptr(len, 16); 9111 jccb(Assembler::notZero, copy_32_loop); 9112 9113 // compress next vector of 8 chars (if any) 9114 bind(copy_16); 9115 // len = 0 9116 testl(result, 0x00000008); // check if there's a block of 8 chars to compress 9117 jccb(Assembler::zero, copy_tail_sse); 9118 9119 pxor(tmp3Reg, tmp3Reg); 9120 9121 movdqu(tmp2Reg, Address(src, 0)); 9122 ptest(tmp2Reg, tmp1Reg); // check for Unicode chars in vector 9123 jccb(Assembler::notZero, reset_for_copy_tail); 9124 packuswb(tmp2Reg, tmp3Reg); // only LATIN1 chars; compress each to 1 byte 9125 movq(Address(dst, 0), tmp2Reg); 9126 addptr(src, 16); 9127 addptr(dst, 8); 9128 jmpb(copy_tail_sse); 9129 9130 bind(reset_for_copy_tail); 9131 movl(tmp5, result); 9132 andl(tmp5, 0x0000000f); 9133 lea(src, Address(src, tmp5, Address::times_2)); 9134 lea(dst, Address(dst, tmp5, Address::times_1)); 9135 subptr(len, tmp5); 9136 jmpb(copy_chars_loop); 9137 9138 bind(copy_tail_sse); 9139 movl(len, result); 9140 andl(len, 0x00000007); // tail count (in chars) 9141 } 9142 // compress 1 char per iter 9143 bind(copy_tail); 9144 testl(len, len); 9145 jccb(Assembler::zero, done); 9146 lea(src, Address(src, len, Address::times_2)); 9147 lea(dst, Address(dst, len, Address::times_1)); 9148 negptr(len); 9149 9150 bind(copy_chars_loop); 9151 load_unsigned_short(tmp5, Address(src, len, Address::times_2)); 9152 testl(tmp5, 0xff00); // check if Unicode char 9153 jccb(Assembler::notZero, reset_sp); 9154 movb(Address(dst, len, Address::times_1), tmp5); // ASCII char; compress to 1 byte 9155 increment(len); 9156 jccb(Assembler::notZero, copy_chars_loop); 9157 9158 // add len then return (len will be zero if compress succeeded, otherwise negative) 9159 bind(reset_sp); 9160 addl(result, len); 9161 9162 bind(done); 9163 } 9164 9165 // Inflate byte[] array to char[]. 9166 // ..\jdk\src\java.base\share\classes\java\lang\StringLatin1.java 9167 // @IntrinsicCandidate 9168 // private static void inflate(byte[] src, int srcOff, char[] dst, int dstOff, int len) { 9169 // for (int i = 0; i < len; i++) { 9170 // dst[dstOff++] = (char)(src[srcOff++] & 0xff); 9171 // } 9172 // } 9173 void MacroAssembler::byte_array_inflate(Register src, Register dst, Register len, 9174 XMMRegister tmp1, Register tmp2, KRegister mask) { 9175 Label copy_chars_loop, done, below_threshold, avx3_threshold; 9176 // rsi: src 9177 // rdi: dst 9178 // rdx: len 9179 // rcx: tmp2 9180 9181 // rsi holds start addr of source byte[] to be inflated 9182 // rdi holds start addr of destination char[] 9183 // rdx holds length 9184 assert_different_registers(src, dst, len, tmp2); 9185 movl(tmp2, len); 9186 if ((UseAVX > 2) && // AVX512 9187 VM_Version::supports_avx512vlbw() && 9188 VM_Version::supports_bmi2()) { 9189 9190 Label copy_32_loop, copy_tail; 9191 Register tmp3_aliased = len; 9192 9193 // if length of the string is less than 16, handle it in an old fashioned way 9194 testl(len, -16); 9195 jcc(Assembler::zero, below_threshold); 9196 9197 testl(len, -1 * AVX3Threshold); 9198 jcc(Assembler::zero, avx3_threshold); 9199 9200 // In order to use only one arithmetic operation for the main loop we use 9201 // this pre-calculation 9202 andl(tmp2, (32 - 1)); // tail count (in chars), 32 element wide loop 9203 andl(len, -32); // vector count 9204 jccb(Assembler::zero, copy_tail); 9205 9206 lea(src, Address(src, len, Address::times_1)); 9207 lea(dst, Address(dst, len, Address::times_2)); 9208 negptr(len); 9209 9210 9211 // inflate 32 chars per iter 9212 bind(copy_32_loop); 9213 vpmovzxbw(tmp1, Address(src, len, Address::times_1), Assembler::AVX_512bit); 9214 evmovdquw(Address(dst, len, Address::times_2), tmp1, Assembler::AVX_512bit); 9215 addptr(len, 32); 9216 jcc(Assembler::notZero, copy_32_loop); 9217 9218 bind(copy_tail); 9219 // bail out when there is nothing to be done 9220 testl(tmp2, -1); // we don't destroy the contents of tmp2 here 9221 jcc(Assembler::zero, done); 9222 9223 // ~(~0 << length), where length is the # of remaining elements to process 9224 movl(tmp3_aliased, -1); 9225 shlxl(tmp3_aliased, tmp3_aliased, tmp2); 9226 notl(tmp3_aliased); 9227 kmovdl(mask, tmp3_aliased); 9228 evpmovzxbw(tmp1, mask, Address(src, 0), Assembler::AVX_512bit); 9229 evmovdquw(Address(dst, 0), mask, tmp1, /*merge*/ true, Assembler::AVX_512bit); 9230 9231 jmp(done); 9232 bind(avx3_threshold); 9233 } 9234 if (UseSSE42Intrinsics) { 9235 Label copy_16_loop, copy_8_loop, copy_bytes, copy_new_tail, copy_tail; 9236 9237 if (UseAVX > 1) { 9238 andl(tmp2, (16 - 1)); 9239 andl(len, -16); 9240 jccb(Assembler::zero, copy_new_tail); 9241 } else { 9242 andl(tmp2, 0x00000007); // tail count (in chars) 9243 andl(len, 0xfffffff8); // vector count (in chars) 9244 jccb(Assembler::zero, copy_tail); 9245 } 9246 9247 // vectored inflation 9248 lea(src, Address(src, len, Address::times_1)); 9249 lea(dst, Address(dst, len, Address::times_2)); 9250 negptr(len); 9251 9252 if (UseAVX > 1) { 9253 bind(copy_16_loop); 9254 vpmovzxbw(tmp1, Address(src, len, Address::times_1), Assembler::AVX_256bit); 9255 vmovdqu(Address(dst, len, Address::times_2), tmp1); 9256 addptr(len, 16); 9257 jcc(Assembler::notZero, copy_16_loop); 9258 9259 bind(below_threshold); 9260 bind(copy_new_tail); 9261 movl(len, tmp2); 9262 andl(tmp2, 0x00000007); 9263 andl(len, 0xFFFFFFF8); 9264 jccb(Assembler::zero, copy_tail); 9265 9266 pmovzxbw(tmp1, Address(src, 0)); 9267 movdqu(Address(dst, 0), tmp1); 9268 addptr(src, 8); 9269 addptr(dst, 2 * 8); 9270 9271 jmp(copy_tail, true); 9272 } 9273 9274 // inflate 8 chars per iter 9275 bind(copy_8_loop); 9276 pmovzxbw(tmp1, Address(src, len, Address::times_1)); // unpack to 8 words 9277 movdqu(Address(dst, len, Address::times_2), tmp1); 9278 addptr(len, 8); 9279 jcc(Assembler::notZero, copy_8_loop); 9280 9281 bind(copy_tail); 9282 movl(len, tmp2); 9283 9284 cmpl(len, 4); 9285 jccb(Assembler::less, copy_bytes); 9286 9287 movdl(tmp1, Address(src, 0)); // load 4 byte chars 9288 pmovzxbw(tmp1, tmp1); 9289 movq(Address(dst, 0), tmp1); 9290 subptr(len, 4); 9291 addptr(src, 4); 9292 addptr(dst, 8); 9293 9294 bind(copy_bytes); 9295 } else { 9296 bind(below_threshold); 9297 } 9298 9299 testl(len, len); 9300 jccb(Assembler::zero, done); 9301 lea(src, Address(src, len, Address::times_1)); 9302 lea(dst, Address(dst, len, Address::times_2)); 9303 negptr(len); 9304 9305 // inflate 1 char per iter 9306 bind(copy_chars_loop); 9307 load_unsigned_byte(tmp2, Address(src, len, Address::times_1)); // load byte char 9308 movw(Address(dst, len, Address::times_2), tmp2); // inflate byte char to word 9309 increment(len); 9310 jcc(Assembler::notZero, copy_chars_loop); 9311 9312 bind(done); 9313 } 9314 9315 9316 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, bool merge, int vector_len) { 9317 switch(type) { 9318 case T_BYTE: 9319 case T_BOOLEAN: 9320 evmovdqub(dst, kmask, src, merge, vector_len); 9321 break; 9322 case T_CHAR: 9323 case T_SHORT: 9324 evmovdquw(dst, kmask, src, merge, vector_len); 9325 break; 9326 case T_INT: 9327 case T_FLOAT: 9328 evmovdqul(dst, kmask, src, merge, vector_len); 9329 break; 9330 case T_LONG: 9331 case T_DOUBLE: 9332 evmovdquq(dst, kmask, src, merge, vector_len); 9333 break; 9334 default: 9335 fatal("Unexpected type argument %s", type2name(type)); 9336 break; 9337 } 9338 } 9339 9340 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, bool merge, int vector_len) { 9341 switch(type) { 9342 case T_BYTE: 9343 case T_BOOLEAN: 9344 evmovdqub(dst, kmask, src, merge, vector_len); 9345 break; 9346 case T_CHAR: 9347 case T_SHORT: 9348 evmovdquw(dst, kmask, src, merge, vector_len); 9349 break; 9350 case T_INT: 9351 case T_FLOAT: 9352 evmovdqul(dst, kmask, src, merge, vector_len); 9353 break; 9354 case T_LONG: 9355 case T_DOUBLE: 9356 evmovdquq(dst, kmask, src, merge, vector_len); 9357 break; 9358 default: 9359 fatal("Unexpected type argument %s", type2name(type)); 9360 break; 9361 } 9362 } 9363 9364 void MacroAssembler::knot(uint masklen, KRegister dst, KRegister src, KRegister ktmp, Register rtmp) { 9365 switch(masklen) { 9366 case 2: 9367 knotbl(dst, src); 9368 movl(rtmp, 3); 9369 kmovbl(ktmp, rtmp); 9370 kandbl(dst, ktmp, dst); 9371 break; 9372 case 4: 9373 knotbl(dst, src); 9374 movl(rtmp, 15); 9375 kmovbl(ktmp, rtmp); 9376 kandbl(dst, ktmp, dst); 9377 break; 9378 case 8: 9379 knotbl(dst, src); 9380 break; 9381 case 16: 9382 knotwl(dst, src); 9383 break; 9384 case 32: 9385 knotdl(dst, src); 9386 break; 9387 case 64: 9388 knotql(dst, src); 9389 break; 9390 default: 9391 fatal("Unexpected vector length %d", masklen); 9392 break; 9393 } 9394 } 9395 9396 void MacroAssembler::kand(BasicType type, KRegister dst, KRegister src1, KRegister src2) { 9397 switch(type) { 9398 case T_BOOLEAN: 9399 case T_BYTE: 9400 kandbl(dst, src1, src2); 9401 break; 9402 case T_CHAR: 9403 case T_SHORT: 9404 kandwl(dst, src1, src2); 9405 break; 9406 case T_INT: 9407 case T_FLOAT: 9408 kanddl(dst, src1, src2); 9409 break; 9410 case T_LONG: 9411 case T_DOUBLE: 9412 kandql(dst, src1, src2); 9413 break; 9414 default: 9415 fatal("Unexpected type argument %s", type2name(type)); 9416 break; 9417 } 9418 } 9419 9420 void MacroAssembler::kor(BasicType type, KRegister dst, KRegister src1, KRegister src2) { 9421 switch(type) { 9422 case T_BOOLEAN: 9423 case T_BYTE: 9424 korbl(dst, src1, src2); 9425 break; 9426 case T_CHAR: 9427 case T_SHORT: 9428 korwl(dst, src1, src2); 9429 break; 9430 case T_INT: 9431 case T_FLOAT: 9432 kordl(dst, src1, src2); 9433 break; 9434 case T_LONG: 9435 case T_DOUBLE: 9436 korql(dst, src1, src2); 9437 break; 9438 default: 9439 fatal("Unexpected type argument %s", type2name(type)); 9440 break; 9441 } 9442 } 9443 9444 void MacroAssembler::kxor(BasicType type, KRegister dst, KRegister src1, KRegister src2) { 9445 switch(type) { 9446 case T_BOOLEAN: 9447 case T_BYTE: 9448 kxorbl(dst, src1, src2); 9449 break; 9450 case T_CHAR: 9451 case T_SHORT: 9452 kxorwl(dst, src1, src2); 9453 break; 9454 case T_INT: 9455 case T_FLOAT: 9456 kxordl(dst, src1, src2); 9457 break; 9458 case T_LONG: 9459 case T_DOUBLE: 9460 kxorql(dst, src1, src2); 9461 break; 9462 default: 9463 fatal("Unexpected type argument %s", type2name(type)); 9464 break; 9465 } 9466 } 9467 9468 void MacroAssembler::evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9469 switch(type) { 9470 case T_BOOLEAN: 9471 case T_BYTE: 9472 evpermb(dst, mask, nds, src, merge, vector_len); break; 9473 case T_CHAR: 9474 case T_SHORT: 9475 evpermw(dst, mask, nds, src, merge, vector_len); break; 9476 case T_INT: 9477 case T_FLOAT: 9478 evpermd(dst, mask, nds, src, merge, vector_len); break; 9479 case T_LONG: 9480 case T_DOUBLE: 9481 evpermq(dst, mask, nds, src, merge, vector_len); break; 9482 default: 9483 fatal("Unexpected type argument %s", type2name(type)); break; 9484 } 9485 } 9486 9487 void MacroAssembler::evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9488 switch(type) { 9489 case T_BOOLEAN: 9490 case T_BYTE: 9491 evpermb(dst, mask, nds, src, merge, vector_len); break; 9492 case T_CHAR: 9493 case T_SHORT: 9494 evpermw(dst, mask, nds, src, merge, vector_len); break; 9495 case T_INT: 9496 case T_FLOAT: 9497 evpermd(dst, mask, nds, src, merge, vector_len); break; 9498 case T_LONG: 9499 case T_DOUBLE: 9500 evpermq(dst, mask, nds, src, merge, vector_len); break; 9501 default: 9502 fatal("Unexpected type argument %s", type2name(type)); break; 9503 } 9504 } 9505 9506 void MacroAssembler::evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9507 switch(type) { 9508 case T_BYTE: 9509 evpminsb(dst, mask, nds, src, merge, vector_len); break; 9510 case T_SHORT: 9511 evpminsw(dst, mask, nds, src, merge, vector_len); break; 9512 case T_INT: 9513 evpminsd(dst, mask, nds, src, merge, vector_len); break; 9514 case T_LONG: 9515 evpminsq(dst, mask, nds, src, merge, vector_len); break; 9516 default: 9517 fatal("Unexpected type argument %s", type2name(type)); break; 9518 } 9519 } 9520 9521 void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9522 switch(type) { 9523 case T_BYTE: 9524 evpmaxsb(dst, mask, nds, src, merge, vector_len); break; 9525 case T_SHORT: 9526 evpmaxsw(dst, mask, nds, src, merge, vector_len); break; 9527 case T_INT: 9528 evpmaxsd(dst, mask, nds, src, merge, vector_len); break; 9529 case T_LONG: 9530 evpmaxsq(dst, mask, nds, src, merge, vector_len); break; 9531 default: 9532 fatal("Unexpected type argument %s", type2name(type)); break; 9533 } 9534 } 9535 9536 void MacroAssembler::evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9537 switch(type) { 9538 case T_BYTE: 9539 evpminsb(dst, mask, nds, src, merge, vector_len); break; 9540 case T_SHORT: 9541 evpminsw(dst, mask, nds, src, merge, vector_len); break; 9542 case T_INT: 9543 evpminsd(dst, mask, nds, src, merge, vector_len); break; 9544 case T_LONG: 9545 evpminsq(dst, mask, nds, src, merge, vector_len); break; 9546 default: 9547 fatal("Unexpected type argument %s", type2name(type)); break; 9548 } 9549 } 9550 9551 void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9552 switch(type) { 9553 case T_BYTE: 9554 evpmaxsb(dst, mask, nds, src, merge, vector_len); break; 9555 case T_SHORT: 9556 evpmaxsw(dst, mask, nds, src, merge, vector_len); break; 9557 case T_INT: 9558 evpmaxsd(dst, mask, nds, src, merge, vector_len); break; 9559 case T_LONG: 9560 evpmaxsq(dst, mask, nds, src, merge, vector_len); break; 9561 default: 9562 fatal("Unexpected type argument %s", type2name(type)); break; 9563 } 9564 } 9565 9566 void MacroAssembler::evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9567 switch(type) { 9568 case T_INT: 9569 evpxord(dst, mask, nds, src, merge, vector_len); break; 9570 case T_LONG: 9571 evpxorq(dst, mask, nds, src, merge, vector_len); break; 9572 default: 9573 fatal("Unexpected type argument %s", type2name(type)); break; 9574 } 9575 } 9576 9577 void MacroAssembler::evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9578 switch(type) { 9579 case T_INT: 9580 evpxord(dst, mask, nds, src, merge, vector_len); break; 9581 case T_LONG: 9582 evpxorq(dst, mask, nds, src, merge, vector_len); break; 9583 default: 9584 fatal("Unexpected type argument %s", type2name(type)); break; 9585 } 9586 } 9587 9588 void MacroAssembler::evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9589 switch(type) { 9590 case T_INT: 9591 Assembler::evpord(dst, mask, nds, src, merge, vector_len); break; 9592 case T_LONG: 9593 evporq(dst, mask, nds, src, merge, vector_len); break; 9594 default: 9595 fatal("Unexpected type argument %s", type2name(type)); break; 9596 } 9597 } 9598 9599 void MacroAssembler::evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9600 switch(type) { 9601 case T_INT: 9602 Assembler::evpord(dst, mask, nds, src, merge, vector_len); break; 9603 case T_LONG: 9604 evporq(dst, mask, nds, src, merge, vector_len); break; 9605 default: 9606 fatal("Unexpected type argument %s", type2name(type)); break; 9607 } 9608 } 9609 9610 void MacroAssembler::evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9611 switch(type) { 9612 case T_INT: 9613 evpandd(dst, mask, nds, src, merge, vector_len); break; 9614 case T_LONG: 9615 evpandq(dst, mask, nds, src, merge, vector_len); break; 9616 default: 9617 fatal("Unexpected type argument %s", type2name(type)); break; 9618 } 9619 } 9620 9621 void MacroAssembler::evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9622 switch(type) { 9623 case T_INT: 9624 evpandd(dst, mask, nds, src, merge, vector_len); break; 9625 case T_LONG: 9626 evpandq(dst, mask, nds, src, merge, vector_len); break; 9627 default: 9628 fatal("Unexpected type argument %s", type2name(type)); break; 9629 } 9630 } 9631 9632 void MacroAssembler::kortest(uint masklen, KRegister src1, KRegister src2) { 9633 switch(masklen) { 9634 case 8: 9635 kortestbl(src1, src2); 9636 break; 9637 case 16: 9638 kortestwl(src1, src2); 9639 break; 9640 case 32: 9641 kortestdl(src1, src2); 9642 break; 9643 case 64: 9644 kortestql(src1, src2); 9645 break; 9646 default: 9647 fatal("Unexpected mask length %d", masklen); 9648 break; 9649 } 9650 } 9651 9652 9653 void MacroAssembler::ktest(uint masklen, KRegister src1, KRegister src2) { 9654 switch(masklen) { 9655 case 8: 9656 ktestbl(src1, src2); 9657 break; 9658 case 16: 9659 ktestwl(src1, src2); 9660 break; 9661 case 32: 9662 ktestdl(src1, src2); 9663 break; 9664 case 64: 9665 ktestql(src1, src2); 9666 break; 9667 default: 9668 fatal("Unexpected mask length %d", masklen); 9669 break; 9670 } 9671 } 9672 9673 void MacroAssembler::evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc) { 9674 switch(type) { 9675 case T_INT: 9676 evprold(dst, mask, src, shift, merge, vlen_enc); break; 9677 case T_LONG: 9678 evprolq(dst, mask, src, shift, merge, vlen_enc); break; 9679 default: 9680 fatal("Unexpected type argument %s", type2name(type)); break; 9681 break; 9682 } 9683 } 9684 9685 void MacroAssembler::evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc) { 9686 switch(type) { 9687 case T_INT: 9688 evprord(dst, mask, src, shift, merge, vlen_enc); break; 9689 case T_LONG: 9690 evprorq(dst, mask, src, shift, merge, vlen_enc); break; 9691 default: 9692 fatal("Unexpected type argument %s", type2name(type)); break; 9693 } 9694 } 9695 9696 void MacroAssembler::evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc) { 9697 switch(type) { 9698 case T_INT: 9699 evprolvd(dst, mask, src1, src2, merge, vlen_enc); break; 9700 case T_LONG: 9701 evprolvq(dst, mask, src1, src2, merge, vlen_enc); break; 9702 default: 9703 fatal("Unexpected type argument %s", type2name(type)); break; 9704 } 9705 } 9706 9707 void MacroAssembler::evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc) { 9708 switch(type) { 9709 case T_INT: 9710 evprorvd(dst, mask, src1, src2, merge, vlen_enc); break; 9711 case T_LONG: 9712 evprorvq(dst, mask, src1, src2, merge, vlen_enc); break; 9713 default: 9714 fatal("Unexpected type argument %s", type2name(type)); break; 9715 } 9716 } 9717 9718 void MacroAssembler::evpandq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 9719 assert(rscratch != noreg || always_reachable(src), "missing"); 9720 9721 if (reachable(src)) { 9722 evpandq(dst, nds, as_Address(src), vector_len); 9723 } else { 9724 lea(rscratch, src); 9725 evpandq(dst, nds, Address(rscratch, 0), vector_len); 9726 } 9727 } 9728 9729 void MacroAssembler::evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch) { 9730 assert(rscratch != noreg || always_reachable(src), "missing"); 9731 9732 if (reachable(src)) { 9733 Assembler::evpaddq(dst, mask, nds, as_Address(src), merge, vector_len); 9734 } else { 9735 lea(rscratch, src); 9736 Assembler::evpaddq(dst, mask, nds, Address(rscratch, 0), merge, vector_len); 9737 } 9738 } 9739 9740 void MacroAssembler::evporq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 9741 assert(rscratch != noreg || always_reachable(src), "missing"); 9742 9743 if (reachable(src)) { 9744 evporq(dst, nds, as_Address(src), vector_len); 9745 } else { 9746 lea(rscratch, src); 9747 evporq(dst, nds, Address(rscratch, 0), vector_len); 9748 } 9749 } 9750 9751 void MacroAssembler::vpshufb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 9752 assert(rscratch != noreg || always_reachable(src), "missing"); 9753 9754 if (reachable(src)) { 9755 vpshufb(dst, nds, as_Address(src), vector_len); 9756 } else { 9757 lea(rscratch, src); 9758 vpshufb(dst, nds, Address(rscratch, 0), vector_len); 9759 } 9760 } 9761 9762 void MacroAssembler::vpor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 9763 assert(rscratch != noreg || always_reachable(src), "missing"); 9764 9765 if (reachable(src)) { 9766 Assembler::vpor(dst, nds, as_Address(src), vector_len); 9767 } else { 9768 lea(rscratch, src); 9769 Assembler::vpor(dst, nds, Address(rscratch, 0), vector_len); 9770 } 9771 } 9772 9773 void MacroAssembler::vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, AddressLiteral src3, int vector_len, Register rscratch) { 9774 assert(rscratch != noreg || always_reachable(src3), "missing"); 9775 9776 if (reachable(src3)) { 9777 vpternlogq(dst, imm8, src2, as_Address(src3), vector_len); 9778 } else { 9779 lea(rscratch, src3); 9780 vpternlogq(dst, imm8, src2, Address(rscratch, 0), vector_len); 9781 } 9782 } 9783 9784 #if COMPILER2_OR_JVMCI 9785 9786 void MacroAssembler::fill_masked(BasicType bt, Address dst, XMMRegister xmm, KRegister mask, 9787 Register length, Register temp, int vec_enc) { 9788 // Computing mask for predicated vector store. 9789 movptr(temp, -1); 9790 bzhiq(temp, temp, length); 9791 kmov(mask, temp); 9792 evmovdqu(bt, mask, dst, xmm, true, vec_enc); 9793 } 9794 9795 // Set memory operation for length "less than" 64 bytes. 9796 void MacroAssembler::fill64_masked(uint shift, Register dst, int disp, 9797 XMMRegister xmm, KRegister mask, Register length, 9798 Register temp, bool use64byteVector) { 9799 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 9800 const BasicType type[] = { T_BYTE, T_SHORT, T_INT, T_LONG}; 9801 if (!use64byteVector) { 9802 fill32(dst, disp, xmm); 9803 subptr(length, 32 >> shift); 9804 fill32_masked(shift, dst, disp + 32, xmm, mask, length, temp); 9805 } else { 9806 assert(MaxVectorSize == 64, "vector length != 64"); 9807 fill_masked(type[shift], Address(dst, disp), xmm, mask, length, temp, Assembler::AVX_512bit); 9808 } 9809 } 9810 9811 9812 void MacroAssembler::fill32_masked(uint shift, Register dst, int disp, 9813 XMMRegister xmm, KRegister mask, Register length, 9814 Register temp) { 9815 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 9816 const BasicType type[] = { T_BYTE, T_SHORT, T_INT, T_LONG}; 9817 fill_masked(type[shift], Address(dst, disp), xmm, mask, length, temp, Assembler::AVX_256bit); 9818 } 9819 9820 9821 void MacroAssembler::fill32(Address dst, XMMRegister xmm) { 9822 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 9823 vmovdqu(dst, xmm); 9824 } 9825 9826 void MacroAssembler::fill32(Register dst, int disp, XMMRegister xmm) { 9827 fill32(Address(dst, disp), xmm); 9828 } 9829 9830 void MacroAssembler::fill64(Address dst, XMMRegister xmm, bool use64byteVector) { 9831 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 9832 if (!use64byteVector) { 9833 fill32(dst, xmm); 9834 fill32(dst.plus_disp(32), xmm); 9835 } else { 9836 evmovdquq(dst, xmm, Assembler::AVX_512bit); 9837 } 9838 } 9839 9840 void MacroAssembler::fill64(Register dst, int disp, XMMRegister xmm, bool use64byteVector) { 9841 fill64(Address(dst, disp), xmm, use64byteVector); 9842 } 9843 9844 #ifdef _LP64 9845 void MacroAssembler::generate_fill_avx3(BasicType type, Register to, Register value, 9846 Register count, Register rtmp, XMMRegister xtmp) { 9847 Label L_exit; 9848 Label L_fill_start; 9849 Label L_fill_64_bytes; 9850 Label L_fill_96_bytes; 9851 Label L_fill_128_bytes; 9852 Label L_fill_128_bytes_loop; 9853 Label L_fill_128_loop_header; 9854 Label L_fill_128_bytes_loop_header; 9855 Label L_fill_128_bytes_loop_pre_header; 9856 Label L_fill_zmm_sequence; 9857 9858 int shift = -1; 9859 int avx3threshold = VM_Version::avx3_threshold(); 9860 switch(type) { 9861 case T_BYTE: shift = 0; 9862 break; 9863 case T_SHORT: shift = 1; 9864 break; 9865 case T_INT: shift = 2; 9866 break; 9867 /* Uncomment when LONG fill stubs are supported. 9868 case T_LONG: shift = 3; 9869 break; 9870 */ 9871 default: 9872 fatal("Unhandled type: %s\n", type2name(type)); 9873 } 9874 9875 if ((avx3threshold != 0) || (MaxVectorSize == 32)) { 9876 9877 if (MaxVectorSize == 64) { 9878 cmpq(count, avx3threshold >> shift); 9879 jcc(Assembler::greater, L_fill_zmm_sequence); 9880 } 9881 9882 evpbroadcast(type, xtmp, value, Assembler::AVX_256bit); 9883 9884 bind(L_fill_start); 9885 9886 cmpq(count, 32 >> shift); 9887 jccb(Assembler::greater, L_fill_64_bytes); 9888 fill32_masked(shift, to, 0, xtmp, k2, count, rtmp); 9889 jmp(L_exit); 9890 9891 bind(L_fill_64_bytes); 9892 cmpq(count, 64 >> shift); 9893 jccb(Assembler::greater, L_fill_96_bytes); 9894 fill64_masked(shift, to, 0, xtmp, k2, count, rtmp); 9895 jmp(L_exit); 9896 9897 bind(L_fill_96_bytes); 9898 cmpq(count, 96 >> shift); 9899 jccb(Assembler::greater, L_fill_128_bytes); 9900 fill64(to, 0, xtmp); 9901 subq(count, 64 >> shift); 9902 fill32_masked(shift, to, 64, xtmp, k2, count, rtmp); 9903 jmp(L_exit); 9904 9905 bind(L_fill_128_bytes); 9906 cmpq(count, 128 >> shift); 9907 jccb(Assembler::greater, L_fill_128_bytes_loop_pre_header); 9908 fill64(to, 0, xtmp); 9909 fill32(to, 64, xtmp); 9910 subq(count, 96 >> shift); 9911 fill32_masked(shift, to, 96, xtmp, k2, count, rtmp); 9912 jmp(L_exit); 9913 9914 bind(L_fill_128_bytes_loop_pre_header); 9915 { 9916 mov(rtmp, to); 9917 andq(rtmp, 31); 9918 jccb(Assembler::zero, L_fill_128_bytes_loop_header); 9919 negq(rtmp); 9920 addq(rtmp, 32); 9921 mov64(r8, -1L); 9922 bzhiq(r8, r8, rtmp); 9923 kmovql(k2, r8); 9924 evmovdqu(T_BYTE, k2, Address(to, 0), xtmp, true, Assembler::AVX_256bit); 9925 addq(to, rtmp); 9926 shrq(rtmp, shift); 9927 subq(count, rtmp); 9928 } 9929 9930 cmpq(count, 128 >> shift); 9931 jcc(Assembler::less, L_fill_start); 9932 9933 bind(L_fill_128_bytes_loop_header); 9934 subq(count, 128 >> shift); 9935 9936 align32(); 9937 bind(L_fill_128_bytes_loop); 9938 fill64(to, 0, xtmp); 9939 fill64(to, 64, xtmp); 9940 addq(to, 128); 9941 subq(count, 128 >> shift); 9942 jccb(Assembler::greaterEqual, L_fill_128_bytes_loop); 9943 9944 addq(count, 128 >> shift); 9945 jcc(Assembler::zero, L_exit); 9946 jmp(L_fill_start); 9947 } 9948 9949 if (MaxVectorSize == 64) { 9950 // Sequence using 64 byte ZMM register. 9951 Label L_fill_128_bytes_zmm; 9952 Label L_fill_192_bytes_zmm; 9953 Label L_fill_192_bytes_loop_zmm; 9954 Label L_fill_192_bytes_loop_header_zmm; 9955 Label L_fill_192_bytes_loop_pre_header_zmm; 9956 Label L_fill_start_zmm_sequence; 9957 9958 bind(L_fill_zmm_sequence); 9959 evpbroadcast(type, xtmp, value, Assembler::AVX_512bit); 9960 9961 bind(L_fill_start_zmm_sequence); 9962 cmpq(count, 64 >> shift); 9963 jccb(Assembler::greater, L_fill_128_bytes_zmm); 9964 fill64_masked(shift, to, 0, xtmp, k2, count, rtmp, true); 9965 jmp(L_exit); 9966 9967 bind(L_fill_128_bytes_zmm); 9968 cmpq(count, 128 >> shift); 9969 jccb(Assembler::greater, L_fill_192_bytes_zmm); 9970 fill64(to, 0, xtmp, true); 9971 subq(count, 64 >> shift); 9972 fill64_masked(shift, to, 64, xtmp, k2, count, rtmp, true); 9973 jmp(L_exit); 9974 9975 bind(L_fill_192_bytes_zmm); 9976 cmpq(count, 192 >> shift); 9977 jccb(Assembler::greater, L_fill_192_bytes_loop_pre_header_zmm); 9978 fill64(to, 0, xtmp, true); 9979 fill64(to, 64, xtmp, true); 9980 subq(count, 128 >> shift); 9981 fill64_masked(shift, to, 128, xtmp, k2, count, rtmp, true); 9982 jmp(L_exit); 9983 9984 bind(L_fill_192_bytes_loop_pre_header_zmm); 9985 { 9986 movq(rtmp, to); 9987 andq(rtmp, 63); 9988 jccb(Assembler::zero, L_fill_192_bytes_loop_header_zmm); 9989 negq(rtmp); 9990 addq(rtmp, 64); 9991 mov64(r8, -1L); 9992 bzhiq(r8, r8, rtmp); 9993 kmovql(k2, r8); 9994 evmovdqu(T_BYTE, k2, Address(to, 0), xtmp, true, Assembler::AVX_512bit); 9995 addq(to, rtmp); 9996 shrq(rtmp, shift); 9997 subq(count, rtmp); 9998 } 9999 10000 cmpq(count, 192 >> shift); 10001 jcc(Assembler::less, L_fill_start_zmm_sequence); 10002 10003 bind(L_fill_192_bytes_loop_header_zmm); 10004 subq(count, 192 >> shift); 10005 10006 align32(); 10007 bind(L_fill_192_bytes_loop_zmm); 10008 fill64(to, 0, xtmp, true); 10009 fill64(to, 64, xtmp, true); 10010 fill64(to, 128, xtmp, true); 10011 addq(to, 192); 10012 subq(count, 192 >> shift); 10013 jccb(Assembler::greaterEqual, L_fill_192_bytes_loop_zmm); 10014 10015 addq(count, 192 >> shift); 10016 jcc(Assembler::zero, L_exit); 10017 jmp(L_fill_start_zmm_sequence); 10018 } 10019 bind(L_exit); 10020 } 10021 #endif 10022 #endif //COMPILER2_OR_JVMCI 10023 10024 10025 #ifdef _LP64 10026 void MacroAssembler::convert_f2i(Register dst, XMMRegister src) { 10027 Label done; 10028 cvttss2sil(dst, src); 10029 // Conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub 10030 cmpl(dst, 0x80000000); // float_sign_flip 10031 jccb(Assembler::notEqual, done); 10032 subptr(rsp, 8); 10033 movflt(Address(rsp, 0), src); 10034 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::f2i_fixup()))); 10035 pop(dst); 10036 bind(done); 10037 } 10038 10039 void MacroAssembler::convert_d2i(Register dst, XMMRegister src) { 10040 Label done; 10041 cvttsd2sil(dst, src); 10042 // Conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub 10043 cmpl(dst, 0x80000000); // float_sign_flip 10044 jccb(Assembler::notEqual, done); 10045 subptr(rsp, 8); 10046 movdbl(Address(rsp, 0), src); 10047 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::d2i_fixup()))); 10048 pop(dst); 10049 bind(done); 10050 } 10051 10052 void MacroAssembler::convert_f2l(Register dst, XMMRegister src) { 10053 Label done; 10054 cvttss2siq(dst, src); 10055 cmp64(dst, ExternalAddress((address) StubRoutines::x86::double_sign_flip())); 10056 jccb(Assembler::notEqual, done); 10057 subptr(rsp, 8); 10058 movflt(Address(rsp, 0), src); 10059 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::f2l_fixup()))); 10060 pop(dst); 10061 bind(done); 10062 } 10063 10064 void MacroAssembler::round_float(Register dst, XMMRegister src, Register rtmp, Register rcx) { 10065 // Following code is line by line assembly translation rounding algorithm. 10066 // Please refer to java.lang.Math.round(float) algorithm for details. 10067 const int32_t FloatConsts_EXP_BIT_MASK = 0x7F800000; 10068 const int32_t FloatConsts_SIGNIFICAND_WIDTH = 24; 10069 const int32_t FloatConsts_EXP_BIAS = 127; 10070 const int32_t FloatConsts_SIGNIF_BIT_MASK = 0x007FFFFF; 10071 const int32_t MINUS_32 = 0xFFFFFFE0; 10072 Label L_special_case, L_block1, L_exit; 10073 movl(rtmp, FloatConsts_EXP_BIT_MASK); 10074 movdl(dst, src); 10075 andl(dst, rtmp); 10076 sarl(dst, FloatConsts_SIGNIFICAND_WIDTH - 1); 10077 movl(rtmp, FloatConsts_SIGNIFICAND_WIDTH - 2 + FloatConsts_EXP_BIAS); 10078 subl(rtmp, dst); 10079 movl(rcx, rtmp); 10080 movl(dst, MINUS_32); 10081 testl(rtmp, dst); 10082 jccb(Assembler::notEqual, L_special_case); 10083 movdl(dst, src); 10084 andl(dst, FloatConsts_SIGNIF_BIT_MASK); 10085 orl(dst, FloatConsts_SIGNIF_BIT_MASK + 1); 10086 movdl(rtmp, src); 10087 testl(rtmp, rtmp); 10088 jccb(Assembler::greaterEqual, L_block1); 10089 negl(dst); 10090 bind(L_block1); 10091 sarl(dst); 10092 addl(dst, 0x1); 10093 sarl(dst, 0x1); 10094 jmp(L_exit); 10095 bind(L_special_case); 10096 convert_f2i(dst, src); 10097 bind(L_exit); 10098 } 10099 10100 void MacroAssembler::round_double(Register dst, XMMRegister src, Register rtmp, Register rcx) { 10101 // Following code is line by line assembly translation rounding algorithm. 10102 // Please refer to java.lang.Math.round(double) algorithm for details. 10103 const int64_t DoubleConsts_EXP_BIT_MASK = 0x7FF0000000000000L; 10104 const int64_t DoubleConsts_SIGNIFICAND_WIDTH = 53; 10105 const int64_t DoubleConsts_EXP_BIAS = 1023; 10106 const int64_t DoubleConsts_SIGNIF_BIT_MASK = 0x000FFFFFFFFFFFFFL; 10107 const int64_t MINUS_64 = 0xFFFFFFFFFFFFFFC0L; 10108 Label L_special_case, L_block1, L_exit; 10109 mov64(rtmp, DoubleConsts_EXP_BIT_MASK); 10110 movq(dst, src); 10111 andq(dst, rtmp); 10112 sarq(dst, DoubleConsts_SIGNIFICAND_WIDTH - 1); 10113 mov64(rtmp, DoubleConsts_SIGNIFICAND_WIDTH - 2 + DoubleConsts_EXP_BIAS); 10114 subq(rtmp, dst); 10115 movq(rcx, rtmp); 10116 mov64(dst, MINUS_64); 10117 testq(rtmp, dst); 10118 jccb(Assembler::notEqual, L_special_case); 10119 movq(dst, src); 10120 mov64(rtmp, DoubleConsts_SIGNIF_BIT_MASK); 10121 andq(dst, rtmp); 10122 mov64(rtmp, DoubleConsts_SIGNIF_BIT_MASK + 1); 10123 orq(dst, rtmp); 10124 movq(rtmp, src); 10125 testq(rtmp, rtmp); 10126 jccb(Assembler::greaterEqual, L_block1); 10127 negq(dst); 10128 bind(L_block1); 10129 sarq(dst); 10130 addq(dst, 0x1); 10131 sarq(dst, 0x1); 10132 jmp(L_exit); 10133 bind(L_special_case); 10134 convert_d2l(dst, src); 10135 bind(L_exit); 10136 } 10137 10138 void MacroAssembler::convert_d2l(Register dst, XMMRegister src) { 10139 Label done; 10140 cvttsd2siq(dst, src); 10141 cmp64(dst, ExternalAddress((address) StubRoutines::x86::double_sign_flip())); 10142 jccb(Assembler::notEqual, done); 10143 subptr(rsp, 8); 10144 movdbl(Address(rsp, 0), src); 10145 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::d2l_fixup()))); 10146 pop(dst); 10147 bind(done); 10148 } 10149 10150 void MacroAssembler::cache_wb(Address line) 10151 { 10152 // 64 bit cpus always support clflush 10153 assert(VM_Version::supports_clflush(), "clflush should be available"); 10154 bool optimized = VM_Version::supports_clflushopt(); 10155 bool no_evict = VM_Version::supports_clwb(); 10156 10157 // prefer clwb (writeback without evict) otherwise 10158 // prefer clflushopt (potentially parallel writeback with evict) 10159 // otherwise fallback on clflush (serial writeback with evict) 10160 10161 if (optimized) { 10162 if (no_evict) { 10163 clwb(line); 10164 } else { 10165 clflushopt(line); 10166 } 10167 } else { 10168 // no need for fence when using CLFLUSH 10169 clflush(line); 10170 } 10171 } 10172 10173 void MacroAssembler::cache_wbsync(bool is_pre) 10174 { 10175 assert(VM_Version::supports_clflush(), "clflush should be available"); 10176 bool optimized = VM_Version::supports_clflushopt(); 10177 bool no_evict = VM_Version::supports_clwb(); 10178 10179 // pick the correct implementation 10180 10181 if (!is_pre && (optimized || no_evict)) { 10182 // need an sfence for post flush when using clflushopt or clwb 10183 // otherwise no no need for any synchroniaztion 10184 10185 sfence(); 10186 } 10187 } 10188 10189 #endif // _LP64 10190 10191 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) { 10192 switch (cond) { 10193 // Note some conditions are synonyms for others 10194 case Assembler::zero: return Assembler::notZero; 10195 case Assembler::notZero: return Assembler::zero; 10196 case Assembler::less: return Assembler::greaterEqual; 10197 case Assembler::lessEqual: return Assembler::greater; 10198 case Assembler::greater: return Assembler::lessEqual; 10199 case Assembler::greaterEqual: return Assembler::less; 10200 case Assembler::below: return Assembler::aboveEqual; 10201 case Assembler::belowEqual: return Assembler::above; 10202 case Assembler::above: return Assembler::belowEqual; 10203 case Assembler::aboveEqual: return Assembler::below; 10204 case Assembler::overflow: return Assembler::noOverflow; 10205 case Assembler::noOverflow: return Assembler::overflow; 10206 case Assembler::negative: return Assembler::positive; 10207 case Assembler::positive: return Assembler::negative; 10208 case Assembler::parity: return Assembler::noParity; 10209 case Assembler::noParity: return Assembler::parity; 10210 } 10211 ShouldNotReachHere(); return Assembler::overflow; 10212 } 10213 10214 SkipIfEqual::SkipIfEqual( 10215 MacroAssembler* masm, const bool* flag_addr, bool value, Register rscratch) { 10216 _masm = masm; 10217 _masm->cmp8(ExternalAddress((address)flag_addr), value, rscratch); 10218 _masm->jcc(Assembler::equal, _label); 10219 } 10220 10221 SkipIfEqual::~SkipIfEqual() { 10222 _masm->bind(_label); 10223 } 10224 10225 // 32-bit Windows has its own fast-path implementation 10226 // of get_thread 10227 #if !defined(WIN32) || defined(_LP64) 10228 10229 // This is simply a call to Thread::current() 10230 void MacroAssembler::get_thread(Register thread) { 10231 if (thread != rax) { 10232 push(rax); 10233 } 10234 LP64_ONLY(push(rdi);) 10235 LP64_ONLY(push(rsi);) 10236 push(rdx); 10237 push(rcx); 10238 #ifdef _LP64 10239 push(r8); 10240 push(r9); 10241 push(r10); 10242 push(r11); 10243 #endif 10244 10245 MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, Thread::current), 0); 10246 10247 #ifdef _LP64 10248 pop(r11); 10249 pop(r10); 10250 pop(r9); 10251 pop(r8); 10252 #endif 10253 pop(rcx); 10254 pop(rdx); 10255 LP64_ONLY(pop(rsi);) 10256 LP64_ONLY(pop(rdi);) 10257 if (thread != rax) { 10258 mov(thread, rax); 10259 pop(rax); 10260 } 10261 } 10262 10263 10264 #endif // !WIN32 || _LP64 10265 10266 void MacroAssembler::check_stack_alignment(Register sp, const char* msg, unsigned bias, Register tmp) { 10267 Label L_stack_ok; 10268 if (bias == 0) { 10269 testptr(sp, 2 * wordSize - 1); 10270 } else { 10271 // lea(tmp, Address(rsp, bias); 10272 mov(tmp, sp); 10273 addptr(tmp, bias); 10274 testptr(tmp, 2 * wordSize - 1); 10275 } 10276 jcc(Assembler::equal, L_stack_ok); 10277 block_comment(msg); 10278 stop(msg); 10279 bind(L_stack_ok); 10280 } 10281 10282 // Implements lightweight-locking. 10283 // 10284 // obj: the object to be locked 10285 // reg_rax: rax 10286 // thread: the thread which attempts to lock obj 10287 // tmp: a temporary register 10288 void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Register reg_rax, Register thread, Register tmp, Label& slow) { 10289 assert(reg_rax == rax, ""); 10290 assert_different_registers(basic_lock, obj, reg_rax, thread, tmp); 10291 10292 Label push; 10293 const Register top = tmp; 10294 10295 // Preload the markWord. It is important that this is the first 10296 // instruction emitted as it is part of C1's null check semantics. 10297 movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes())); 10298 10299 if (UseObjectMonitorTable) { 10300 // Clear cache in case fast locking succeeds. 10301 movptr(Address(basic_lock, BasicObjectLock::lock_offset() + in_ByteSize((BasicLock::object_monitor_cache_offset_in_bytes()))), 0); 10302 } 10303 10304 // Load top. 10305 movl(top, Address(thread, JavaThread::lock_stack_top_offset())); 10306 10307 // Check if the lock-stack is full. 10308 cmpl(top, LockStack::end_offset()); 10309 jcc(Assembler::greaterEqual, slow); 10310 10311 // Check for recursion. 10312 cmpptr(obj, Address(thread, top, Address::times_1, -oopSize)); 10313 jcc(Assembler::equal, push); 10314 10315 // Check header for monitor (0b10). 10316 testptr(reg_rax, markWord::monitor_value); 10317 jcc(Assembler::notZero, slow); 10318 10319 // Try to lock. Transition lock bits 0b01 => 0b00 10320 movptr(tmp, reg_rax); 10321 andptr(tmp, ~(int32_t)markWord::unlocked_value); 10322 orptr(reg_rax, markWord::unlocked_value); 10323 lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes())); 10324 jcc(Assembler::notEqual, slow); 10325 10326 // Restore top, CAS clobbers register. 10327 movl(top, Address(thread, JavaThread::lock_stack_top_offset())); 10328 10329 bind(push); 10330 // After successful lock, push object on lock-stack. 10331 movptr(Address(thread, top), obj); 10332 incrementl(top, oopSize); 10333 movl(Address(thread, JavaThread::lock_stack_top_offset()), top); 10334 } 10335 10336 // Implements lightweight-unlocking. 10337 // 10338 // obj: the object to be unlocked 10339 // reg_rax: rax 10340 // thread: the thread 10341 // tmp: a temporary register 10342 void MacroAssembler::lightweight_unlock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow) { 10343 assert(reg_rax == rax, ""); 10344 assert_different_registers(obj, reg_rax, thread, tmp); 10345 10346 Label unlocked, push_and_slow; 10347 const Register top = tmp; 10348 10349 // Check if obj is top of lock-stack. 10350 movl(top, Address(thread, JavaThread::lock_stack_top_offset())); 10351 cmpptr(obj, Address(thread, top, Address::times_1, -oopSize)); 10352 jcc(Assembler::notEqual, slow); 10353 10354 // Pop lock-stack. 10355 DEBUG_ONLY(movptr(Address(thread, top, Address::times_1, -oopSize), 0);) 10356 subl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize); 10357 10358 // Check if recursive. 10359 cmpptr(obj, Address(thread, top, Address::times_1, -2 * oopSize)); 10360 jcc(Assembler::equal, unlocked); 10361 10362 // Not recursive. Check header for monitor (0b10). 10363 movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes())); 10364 testptr(reg_rax, markWord::monitor_value); 10365 jcc(Assembler::notZero, push_and_slow); 10366 10367 #ifdef ASSERT 10368 // Check header not unlocked (0b01). 10369 Label not_unlocked; 10370 testptr(reg_rax, markWord::unlocked_value); 10371 jcc(Assembler::zero, not_unlocked); 10372 stop("lightweight_unlock already unlocked"); 10373 bind(not_unlocked); 10374 #endif 10375 10376 // Try to unlock. Transition lock bits 0b00 => 0b01 10377 movptr(tmp, reg_rax); 10378 orptr(tmp, markWord::unlocked_value); 10379 lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes())); 10380 jcc(Assembler::equal, unlocked); 10381 10382 bind(push_and_slow); 10383 // Restore lock-stack and handle the unlock in runtime. 10384 #ifdef ASSERT 10385 movl(top, Address(thread, JavaThread::lock_stack_top_offset())); 10386 movptr(Address(thread, top), obj); 10387 #endif 10388 addl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize); 10389 jmp(slow); 10390 10391 bind(unlocked); 10392 } 10393 10394 #ifdef _LP64 10395 // Saves legacy GPRs state on stack. 10396 void MacroAssembler::save_legacy_gprs() { 10397 subq(rsp, 16 * wordSize); 10398 movq(Address(rsp, 15 * wordSize), rax); 10399 movq(Address(rsp, 14 * wordSize), rcx); 10400 movq(Address(rsp, 13 * wordSize), rdx); 10401 movq(Address(rsp, 12 * wordSize), rbx); 10402 movq(Address(rsp, 10 * wordSize), rbp); 10403 movq(Address(rsp, 9 * wordSize), rsi); 10404 movq(Address(rsp, 8 * wordSize), rdi); 10405 movq(Address(rsp, 7 * wordSize), r8); 10406 movq(Address(rsp, 6 * wordSize), r9); 10407 movq(Address(rsp, 5 * wordSize), r10); 10408 movq(Address(rsp, 4 * wordSize), r11); 10409 movq(Address(rsp, 3 * wordSize), r12); 10410 movq(Address(rsp, 2 * wordSize), r13); 10411 movq(Address(rsp, wordSize), r14); 10412 movq(Address(rsp, 0), r15); 10413 } 10414 10415 // Resotres back legacy GPRs state from stack. 10416 void MacroAssembler::restore_legacy_gprs() { 10417 movq(r15, Address(rsp, 0)); 10418 movq(r14, Address(rsp, wordSize)); 10419 movq(r13, Address(rsp, 2 * wordSize)); 10420 movq(r12, Address(rsp, 3 * wordSize)); 10421 movq(r11, Address(rsp, 4 * wordSize)); 10422 movq(r10, Address(rsp, 5 * wordSize)); 10423 movq(r9, Address(rsp, 6 * wordSize)); 10424 movq(r8, Address(rsp, 7 * wordSize)); 10425 movq(rdi, Address(rsp, 8 * wordSize)); 10426 movq(rsi, Address(rsp, 9 * wordSize)); 10427 movq(rbp, Address(rsp, 10 * wordSize)); 10428 movq(rbx, Address(rsp, 12 * wordSize)); 10429 movq(rdx, Address(rsp, 13 * wordSize)); 10430 movq(rcx, Address(rsp, 14 * wordSize)); 10431 movq(rax, Address(rsp, 15 * wordSize)); 10432 addq(rsp, 16 * wordSize); 10433 } 10434 10435 void MacroAssembler::setcc(Assembler::Condition comparison, Register dst) { 10436 if (VM_Version::supports_apx_f()) { 10437 esetzucc(comparison, dst); 10438 } else { 10439 setb(comparison, dst); 10440 movzbl(dst, dst); 10441 } 10442 } 10443 #endif