1 /* 2 * Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "compiler/oopMap.hpp" 29 #include "gc/shared/barrierSet.hpp" 30 #include "gc/shared/barrierSetAssembler.hpp" 31 #include "gc/shared/barrierSetNMethod.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "memory/universe.hpp" 34 #include "nativeInst_x86.hpp" 35 #include "oops/instanceOop.hpp" 36 #include "oops/method.hpp" 37 #include "oops/objArrayKlass.hpp" 38 #include "oops/oop.inline.hpp" 39 #include "prims/methodHandles.hpp" 40 #include "runtime/frame.inline.hpp" 41 #include "runtime/handles.inline.hpp" 42 #include "runtime/sharedRuntime.hpp" 43 #include "runtime/stubCodeGenerator.hpp" 44 #include "runtime/stubRoutines.hpp" 45 #include "runtime/thread.inline.hpp" 46 #ifdef COMPILER2 47 #include "opto/runtime.hpp" 48 #endif 49 50 // Declaration and definition of StubGenerator (no .hpp file). 51 // For a more detailed description of the stub routine structure 52 // see the comment in stubRoutines.hpp 53 54 #define __ _masm-> 55 #define a__ ((Assembler*)_masm)-> 56 57 #ifdef PRODUCT 58 #define BLOCK_COMMENT(str) /* nothing */ 59 #else 60 #define BLOCK_COMMENT(str) __ block_comment(str) 61 #endif 62 63 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 64 65 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions 66 const int FPU_CNTRL_WRD_MASK = 0xFFFF; 67 68 // ------------------------------------------------------------------------------------------------------------------------- 69 // Stub Code definitions 70 71 class StubGenerator: public StubCodeGenerator { 72 private: 73 74 #ifdef PRODUCT 75 #define inc_counter_np(counter) ((void)0) 76 #else 77 void inc_counter_np_(int& counter) { 78 __ incrementl(ExternalAddress((address)&counter)); 79 } 80 #define inc_counter_np(counter) \ 81 BLOCK_COMMENT("inc_counter " #counter); \ 82 inc_counter_np_(counter); 83 #endif //PRODUCT 84 85 void inc_copy_counter_np(BasicType t) { 86 #ifndef PRODUCT 87 switch (t) { 88 case T_BYTE: inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); return; 89 case T_SHORT: inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); return; 90 case T_INT: inc_counter_np(SharedRuntime::_jint_array_copy_ctr); return; 91 case T_LONG: inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); return; 92 case T_OBJECT: inc_counter_np(SharedRuntime::_oop_array_copy_ctr); return; 93 default: ShouldNotReachHere(); 94 } 95 #endif //PRODUCT 96 } 97 98 //------------------------------------------------------------------------------------------------------------------------ 99 // Call stubs are used to call Java from C 100 // 101 // [ return_from_Java ] <--- rsp 102 // [ argument word n ] 103 // ... 104 // -N [ argument word 1 ] 105 // -7 [ Possible padding for stack alignment ] 106 // -6 [ Possible padding for stack alignment ] 107 // -5 [ Possible padding for stack alignment ] 108 // -4 [ mxcsr save ] <--- rsp_after_call 109 // -3 [ saved rbx, ] 110 // -2 [ saved rsi ] 111 // -1 [ saved rdi ] 112 // 0 [ saved rbp, ] <--- rbp, 113 // 1 [ return address ] 114 // 2 [ ptr. to call wrapper ] 115 // 3 [ result ] 116 // 4 [ result_type ] 117 // 5 [ method ] 118 // 6 [ entry_point ] 119 // 7 [ parameters ] 120 // 8 [ parameter_size ] 121 // 9 [ thread ] 122 123 124 address generate_call_stub(address& return_address) { 125 StubCodeMark mark(this, "StubRoutines", "call_stub"); 126 address start = __ pc(); 127 128 // stub code parameters / addresses 129 assert(frame::entry_frame_call_wrapper_offset == 2, "adjust this code"); 130 bool sse_save = false; 131 const Address rsp_after_call(rbp, -4 * wordSize); // same as in generate_catch_exception()! 132 const int locals_count_in_bytes (4*wordSize); 133 const Address mxcsr_save (rbp, -4 * wordSize); 134 const Address saved_rbx (rbp, -3 * wordSize); 135 const Address saved_rsi (rbp, -2 * wordSize); 136 const Address saved_rdi (rbp, -1 * wordSize); 137 const Address result (rbp, 3 * wordSize); 138 const Address result_type (rbp, 4 * wordSize); 139 const Address method (rbp, 5 * wordSize); 140 const Address entry_point (rbp, 6 * wordSize); 141 const Address parameters (rbp, 7 * wordSize); 142 const Address parameter_size(rbp, 8 * wordSize); 143 const Address thread (rbp, 9 * wordSize); // same as in generate_catch_exception()! 144 sse_save = UseSSE > 0; 145 146 // stub code 147 __ enter(); 148 __ movptr(rcx, parameter_size); // parameter counter 149 __ shlptr(rcx, Interpreter::logStackElementSize); // convert parameter count to bytes 150 __ addptr(rcx, locals_count_in_bytes); // reserve space for register saves 151 __ subptr(rsp, rcx); 152 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack 153 154 // save rdi, rsi, & rbx, according to C calling conventions 155 __ movptr(saved_rdi, rdi); 156 __ movptr(saved_rsi, rsi); 157 __ movptr(saved_rbx, rbx); 158 159 // save and initialize %mxcsr 160 if (sse_save) { 161 Label skip_ldmx; 162 __ stmxcsr(mxcsr_save); 163 __ movl(rax, mxcsr_save); 164 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 165 ExternalAddress mxcsr_std(StubRoutines::x86::addr_mxcsr_std()); 166 __ cmp32(rax, mxcsr_std); 167 __ jcc(Assembler::equal, skip_ldmx); 168 __ ldmxcsr(mxcsr_std); 169 __ bind(skip_ldmx); 170 } 171 172 // make sure the control word is correct. 173 __ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_std())); 174 175 #ifdef ASSERT 176 // make sure we have no pending exceptions 177 { Label L; 178 __ movptr(rcx, thread); 179 __ cmpptr(Address(rcx, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 180 __ jcc(Assembler::equal, L); 181 __ stop("StubRoutines::call_stub: entered with pending exception"); 182 __ bind(L); 183 } 184 #endif 185 186 // pass parameters if any 187 BLOCK_COMMENT("pass parameters if any"); 188 Label parameters_done; 189 __ movl(rcx, parameter_size); // parameter counter 190 __ testl(rcx, rcx); 191 __ jcc(Assembler::zero, parameters_done); 192 193 // parameter passing loop 194 195 Label loop; 196 // Copy Java parameters in reverse order (receiver last) 197 // Note that the argument order is inverted in the process 198 // source is rdx[rcx: N-1..0] 199 // dest is rsp[rbx: 0..N-1] 200 201 __ movptr(rdx, parameters); // parameter pointer 202 __ xorptr(rbx, rbx); 203 204 __ BIND(loop); 205 206 // get parameter 207 __ movptr(rax, Address(rdx, rcx, Interpreter::stackElementScale(), -wordSize)); 208 __ movptr(Address(rsp, rbx, Interpreter::stackElementScale(), 209 Interpreter::expr_offset_in_bytes(0)), rax); // store parameter 210 __ increment(rbx); 211 __ decrement(rcx); 212 __ jcc(Assembler::notZero, loop); 213 214 // call Java function 215 __ BIND(parameters_done); 216 __ movptr(rbx, method); // get Method* 217 __ movptr(rax, entry_point); // get entry_point 218 __ mov(rsi, rsp); // set sender sp 219 BLOCK_COMMENT("call Java function"); 220 __ call(rax); 221 222 BLOCK_COMMENT("call_stub_return_address:"); 223 return_address = __ pc(); 224 225 #ifdef COMPILER2 226 { 227 Label L_skip; 228 if (UseSSE >= 2) { 229 __ verify_FPU(0, "call_stub_return"); 230 } else { 231 for (int i = 1; i < 8; i++) { 232 __ ffree(i); 233 } 234 235 // UseSSE <= 1 so double result should be left on TOS 236 __ movl(rsi, result_type); 237 __ cmpl(rsi, T_DOUBLE); 238 __ jcc(Assembler::equal, L_skip); 239 if (UseSSE == 0) { 240 // UseSSE == 0 so float result should be left on TOS 241 __ cmpl(rsi, T_FLOAT); 242 __ jcc(Assembler::equal, L_skip); 243 } 244 __ ffree(0); 245 } 246 __ BIND(L_skip); 247 } 248 #endif // COMPILER2 249 250 // store result depending on type 251 // (everything that is not T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) 252 __ movptr(rdi, result); 253 Label is_long, is_float, is_double, exit; 254 __ movl(rsi, result_type); 255 __ cmpl(rsi, T_LONG); 256 __ jcc(Assembler::equal, is_long); 257 __ cmpl(rsi, T_FLOAT); 258 __ jcc(Assembler::equal, is_float); 259 __ cmpl(rsi, T_DOUBLE); 260 __ jcc(Assembler::equal, is_double); 261 262 // handle T_INT case 263 __ movl(Address(rdi, 0), rax); 264 __ BIND(exit); 265 266 // check that FPU stack is empty 267 __ verify_FPU(0, "generate_call_stub"); 268 269 // pop parameters 270 __ lea(rsp, rsp_after_call); 271 272 // restore %mxcsr 273 if (sse_save) { 274 __ ldmxcsr(mxcsr_save); 275 } 276 277 // restore rdi, rsi and rbx, 278 __ movptr(rbx, saved_rbx); 279 __ movptr(rsi, saved_rsi); 280 __ movptr(rdi, saved_rdi); 281 __ addptr(rsp, 4*wordSize); 282 283 // return 284 __ pop(rbp); 285 __ ret(0); 286 287 // handle return types different from T_INT 288 __ BIND(is_long); 289 __ movl(Address(rdi, 0 * wordSize), rax); 290 __ movl(Address(rdi, 1 * wordSize), rdx); 291 __ jmp(exit); 292 293 __ BIND(is_float); 294 // interpreter uses xmm0 for return values 295 if (UseSSE >= 1) { 296 __ movflt(Address(rdi, 0), xmm0); 297 } else { 298 __ fstp_s(Address(rdi, 0)); 299 } 300 __ jmp(exit); 301 302 __ BIND(is_double); 303 // interpreter uses xmm0 for return values 304 if (UseSSE >= 2) { 305 __ movdbl(Address(rdi, 0), xmm0); 306 } else { 307 __ fstp_d(Address(rdi, 0)); 308 } 309 __ jmp(exit); 310 311 return start; 312 } 313 314 315 //------------------------------------------------------------------------------------------------------------------------ 316 // Return point for a Java call if there's an exception thrown in Java code. 317 // The exception is caught and transformed into a pending exception stored in 318 // JavaThread that can be tested from within the VM. 319 // 320 // Note: Usually the parameters are removed by the callee. In case of an exception 321 // crossing an activation frame boundary, that is not the case if the callee 322 // is compiled code => need to setup the rsp. 323 // 324 // rax,: exception oop 325 326 address generate_catch_exception() { 327 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 328 const Address rsp_after_call(rbp, -4 * wordSize); // same as in generate_call_stub()! 329 const Address thread (rbp, 9 * wordSize); // same as in generate_call_stub()! 330 address start = __ pc(); 331 332 // get thread directly 333 __ movptr(rcx, thread); 334 #ifdef ASSERT 335 // verify that threads correspond 336 { Label L; 337 __ get_thread(rbx); 338 __ cmpptr(rbx, rcx); 339 __ jcc(Assembler::equal, L); 340 __ stop("StubRoutines::catch_exception: threads must correspond"); 341 __ bind(L); 342 } 343 #endif 344 // set pending exception 345 __ verify_oop(rax); 346 __ movptr(Address(rcx, Thread::pending_exception_offset()), rax ); 347 __ lea(Address(rcx, Thread::exception_file_offset ()), 348 ExternalAddress((address)__FILE__)); 349 __ movl(Address(rcx, Thread::exception_line_offset ()), __LINE__ ); 350 // complete return to VM 351 assert(StubRoutines::_call_stub_return_address != NULL, "_call_stub_return_address must have been generated before"); 352 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address)); 353 354 return start; 355 } 356 357 358 //------------------------------------------------------------------------------------------------------------------------ 359 // Continuation point for runtime calls returning with a pending exception. 360 // The pending exception check happened in the runtime or native call stub. 361 // The pending exception in Thread is converted into a Java-level exception. 362 // 363 // Contract with Java-level exception handlers: 364 // rax: exception 365 // rdx: throwing pc 366 // 367 // NOTE: At entry of this stub, exception-pc must be on stack !! 368 369 address generate_forward_exception() { 370 StubCodeMark mark(this, "StubRoutines", "forward exception"); 371 address start = __ pc(); 372 const Register thread = rcx; 373 374 // other registers used in this stub 375 const Register exception_oop = rax; 376 const Register handler_addr = rbx; 377 const Register exception_pc = rdx; 378 379 // Upon entry, the sp points to the return address returning into Java 380 // (interpreted or compiled) code; i.e., the return address becomes the 381 // throwing pc. 382 // 383 // Arguments pushed before the runtime call are still on the stack but 384 // the exception handler will reset the stack pointer -> ignore them. 385 // A potential result in registers can be ignored as well. 386 387 #ifdef ASSERT 388 // make sure this code is only executed if there is a pending exception 389 { Label L; 390 __ get_thread(thread); 391 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 392 __ jcc(Assembler::notEqual, L); 393 __ stop("StubRoutines::forward exception: no pending exception (1)"); 394 __ bind(L); 395 } 396 #endif 397 398 // compute exception handler into rbx, 399 __ get_thread(thread); 400 __ movptr(exception_pc, Address(rsp, 0)); 401 BLOCK_COMMENT("call exception_handler_for_return_address"); 402 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc); 403 __ mov(handler_addr, rax); 404 405 // setup rax & rdx, remove return address & clear pending exception 406 __ get_thread(thread); 407 __ pop(exception_pc); 408 __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset())); 409 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD); 410 411 #ifdef ASSERT 412 // make sure exception is set 413 { Label L; 414 __ testptr(exception_oop, exception_oop); 415 __ jcc(Assembler::notEqual, L); 416 __ stop("StubRoutines::forward exception: no pending exception (2)"); 417 __ bind(L); 418 } 419 #endif 420 421 // Verify that there is really a valid exception in RAX. 422 __ verify_oop(exception_oop); 423 424 // continue at exception handler (return address removed) 425 // rax: exception 426 // rbx: exception handler 427 // rdx: throwing pc 428 __ jmp(handler_addr); 429 430 return start; 431 } 432 433 //---------------------------------------------------------------------------------------------------- 434 // Support for void verify_mxcsr() 435 // 436 // This routine is used with -Xcheck:jni to verify that native 437 // JNI code does not return to Java code without restoring the 438 // MXCSR register to our expected state. 439 440 441 address generate_verify_mxcsr() { 442 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr"); 443 address start = __ pc(); 444 445 const Address mxcsr_save(rsp, 0); 446 447 if (CheckJNICalls && UseSSE > 0 ) { 448 Label ok_ret; 449 ExternalAddress mxcsr_std(StubRoutines::x86::addr_mxcsr_std()); 450 __ push(rax); 451 __ subptr(rsp, wordSize); // allocate a temp location 452 __ stmxcsr(mxcsr_save); 453 __ movl(rax, mxcsr_save); 454 __ andl(rax, MXCSR_MASK); 455 __ cmp32(rax, mxcsr_std); 456 __ jcc(Assembler::equal, ok_ret); 457 458 __ warn("MXCSR changed by native JNI code."); 459 460 __ ldmxcsr(mxcsr_std); 461 462 __ bind(ok_ret); 463 __ addptr(rsp, wordSize); 464 __ pop(rax); 465 } 466 467 __ ret(0); 468 469 return start; 470 } 471 472 473 //--------------------------------------------------------------------------- 474 // Support for void verify_fpu_cntrl_wrd() 475 // 476 // This routine is used with -Xcheck:jni to verify that native 477 // JNI code does not return to Java code without restoring the 478 // FP control word to our expected state. 479 480 address generate_verify_fpu_cntrl_wrd() { 481 StubCodeMark mark(this, "StubRoutines", "verify_spcw"); 482 address start = __ pc(); 483 484 const Address fpu_cntrl_wrd_save(rsp, 0); 485 486 if (CheckJNICalls) { 487 Label ok_ret; 488 __ push(rax); 489 __ subptr(rsp, wordSize); // allocate a temp location 490 __ fnstcw(fpu_cntrl_wrd_save); 491 __ movl(rax, fpu_cntrl_wrd_save); 492 __ andl(rax, FPU_CNTRL_WRD_MASK); 493 ExternalAddress fpu_std(StubRoutines::x86::addr_fpu_cntrl_wrd_std()); 494 __ cmp32(rax, fpu_std); 495 __ jcc(Assembler::equal, ok_ret); 496 497 __ warn("Floating point control word changed by native JNI code."); 498 499 __ fldcw(fpu_std); 500 501 __ bind(ok_ret); 502 __ addptr(rsp, wordSize); 503 __ pop(rax); 504 } 505 506 __ ret(0); 507 508 return start; 509 } 510 511 //--------------------------------------------------------------------------- 512 // Wrapper for slow-case handling of double-to-integer conversion 513 // d2i or f2i fast case failed either because it is nan or because 514 // of under/overflow. 515 // Input: FPU TOS: float value 516 // Output: rax, (rdx): integer (long) result 517 518 address generate_d2i_wrapper(BasicType t, address fcn) { 519 StubCodeMark mark(this, "StubRoutines", "d2i_wrapper"); 520 address start = __ pc(); 521 522 // Capture info about frame layout 523 enum layout { FPUState_off = 0, 524 rbp_off = FPUStateSizeInWords, 525 rdi_off, 526 rsi_off, 527 rcx_off, 528 rbx_off, 529 saved_argument_off, 530 saved_argument_off2, // 2nd half of double 531 framesize 532 }; 533 534 assert(FPUStateSizeInWords == 27, "update stack layout"); 535 536 // Save outgoing argument to stack across push_FPU_state() 537 __ subptr(rsp, wordSize * 2); 538 __ fstp_d(Address(rsp, 0)); 539 540 // Save CPU & FPU state 541 __ push(rbx); 542 __ push(rcx); 543 __ push(rsi); 544 __ push(rdi); 545 __ push(rbp); 546 __ push_FPU_state(); 547 548 // push_FPU_state() resets the FP top of stack 549 // Load original double into FP top of stack 550 __ fld_d(Address(rsp, saved_argument_off * wordSize)); 551 // Store double into stack as outgoing argument 552 __ subptr(rsp, wordSize*2); 553 __ fst_d(Address(rsp, 0)); 554 555 // Prepare FPU for doing math in C-land 556 __ empty_FPU_stack(); 557 // Call the C code to massage the double. Result in EAX 558 if (t == T_INT) 559 { BLOCK_COMMENT("SharedRuntime::d2i"); } 560 else if (t == T_LONG) 561 { BLOCK_COMMENT("SharedRuntime::d2l"); } 562 __ call_VM_leaf( fcn, 2 ); 563 564 // Restore CPU & FPU state 565 __ pop_FPU_state(); 566 __ pop(rbp); 567 __ pop(rdi); 568 __ pop(rsi); 569 __ pop(rcx); 570 __ pop(rbx); 571 __ addptr(rsp, wordSize * 2); 572 573 __ ret(0); 574 575 return start; 576 } 577 //--------------------------------------------------------------------------------------------------- 578 579 address generate_vector_mask(const char *stub_name, int32_t mask) { 580 __ align(CodeEntryAlignment); 581 StubCodeMark mark(this, "StubRoutines", stub_name); 582 address start = __ pc(); 583 584 for (int i = 0; i < 16; i++) { 585 __ emit_data(mask, relocInfo::none, 0); 586 } 587 588 return start; 589 } 590 591 address generate_popcount_avx_lut(const char *stub_name) { 592 __ align64(); 593 StubCodeMark mark(this, "StubRoutines", stub_name); 594 address start = __ pc(); 595 __ emit_data(0x02010100, relocInfo::none, 0); 596 __ emit_data(0x03020201, relocInfo::none, 0); 597 __ emit_data(0x03020201, relocInfo::none, 0); 598 __ emit_data(0x04030302, relocInfo::none, 0); 599 __ emit_data(0x02010100, relocInfo::none, 0); 600 __ emit_data(0x03020201, relocInfo::none, 0); 601 __ emit_data(0x03020201, relocInfo::none, 0); 602 __ emit_data(0x04030302, relocInfo::none, 0); 603 __ emit_data(0x02010100, relocInfo::none, 0); 604 __ emit_data(0x03020201, relocInfo::none, 0); 605 __ emit_data(0x03020201, relocInfo::none, 0); 606 __ emit_data(0x04030302, relocInfo::none, 0); 607 __ emit_data(0x02010100, relocInfo::none, 0); 608 __ emit_data(0x03020201, relocInfo::none, 0); 609 __ emit_data(0x03020201, relocInfo::none, 0); 610 __ emit_data(0x04030302, relocInfo::none, 0); 611 return start; 612 } 613 614 615 address generate_iota_indices(const char *stub_name) { 616 __ align(CodeEntryAlignment); 617 StubCodeMark mark(this, "StubRoutines", stub_name); 618 address start = __ pc(); 619 __ emit_data(0x03020100, relocInfo::none, 0); 620 __ emit_data(0x07060504, relocInfo::none, 0); 621 __ emit_data(0x0B0A0908, relocInfo::none, 0); 622 __ emit_data(0x0F0E0D0C, relocInfo::none, 0); 623 __ emit_data(0x13121110, relocInfo::none, 0); 624 __ emit_data(0x17161514, relocInfo::none, 0); 625 __ emit_data(0x1B1A1918, relocInfo::none, 0); 626 __ emit_data(0x1F1E1D1C, relocInfo::none, 0); 627 __ emit_data(0x23222120, relocInfo::none, 0); 628 __ emit_data(0x27262524, relocInfo::none, 0); 629 __ emit_data(0x2B2A2928, relocInfo::none, 0); 630 __ emit_data(0x2F2E2D2C, relocInfo::none, 0); 631 __ emit_data(0x33323130, relocInfo::none, 0); 632 __ emit_data(0x37363534, relocInfo::none, 0); 633 __ emit_data(0x3B3A3938, relocInfo::none, 0); 634 __ emit_data(0x3F3E3D3C, relocInfo::none, 0); 635 return start; 636 } 637 638 address generate_vector_byte_shuffle_mask(const char *stub_name) { 639 __ align(CodeEntryAlignment); 640 StubCodeMark mark(this, "StubRoutines", stub_name); 641 address start = __ pc(); 642 __ emit_data(0x70707070, relocInfo::none, 0); 643 __ emit_data(0x70707070, relocInfo::none, 0); 644 __ emit_data(0x70707070, relocInfo::none, 0); 645 __ emit_data(0x70707070, relocInfo::none, 0); 646 __ emit_data(0xF0F0F0F0, relocInfo::none, 0); 647 __ emit_data(0xF0F0F0F0, relocInfo::none, 0); 648 __ emit_data(0xF0F0F0F0, relocInfo::none, 0); 649 __ emit_data(0xF0F0F0F0, relocInfo::none, 0); 650 return start; 651 } 652 653 address generate_vector_mask_long_double(const char *stub_name, int32_t maskhi, int32_t masklo) { 654 __ align(CodeEntryAlignment); 655 StubCodeMark mark(this, "StubRoutines", stub_name); 656 address start = __ pc(); 657 658 for (int i = 0; i < 8; i++) { 659 __ emit_data(masklo, relocInfo::none, 0); 660 __ emit_data(maskhi, relocInfo::none, 0); 661 } 662 663 return start; 664 } 665 666 //---------------------------------------------------------------------------------------------------- 667 668 address generate_vector_byte_perm_mask(const char *stub_name) { 669 __ align(CodeEntryAlignment); 670 StubCodeMark mark(this, "StubRoutines", stub_name); 671 address start = __ pc(); 672 673 __ emit_data(0x00000001, relocInfo::none, 0); 674 __ emit_data(0x00000000, relocInfo::none, 0); 675 __ emit_data(0x00000003, relocInfo::none, 0); 676 __ emit_data(0x00000000, relocInfo::none, 0); 677 __ emit_data(0x00000005, relocInfo::none, 0); 678 __ emit_data(0x00000000, relocInfo::none, 0); 679 __ emit_data(0x00000007, relocInfo::none, 0); 680 __ emit_data(0x00000000, relocInfo::none, 0); 681 __ emit_data(0x00000000, relocInfo::none, 0); 682 __ emit_data(0x00000000, relocInfo::none, 0); 683 __ emit_data(0x00000002, relocInfo::none, 0); 684 __ emit_data(0x00000000, relocInfo::none, 0); 685 __ emit_data(0x00000004, relocInfo::none, 0); 686 __ emit_data(0x00000000, relocInfo::none, 0); 687 __ emit_data(0x00000006, relocInfo::none, 0); 688 __ emit_data(0x00000000, relocInfo::none, 0); 689 690 return start; 691 } 692 693 address generate_vector_custom_i32(const char *stub_name, Assembler::AvxVectorLen len, 694 int32_t val0, int32_t val1, int32_t val2, int32_t val3, 695 int32_t val4 = 0, int32_t val5 = 0, int32_t val6 = 0, int32_t val7 = 0, 696 int32_t val8 = 0, int32_t val9 = 0, int32_t val10 = 0, int32_t val11 = 0, 697 int32_t val12 = 0, int32_t val13 = 0, int32_t val14 = 0, int32_t val15 = 0) { 698 __ align(CodeEntryAlignment); 699 StubCodeMark mark(this, "StubRoutines", stub_name); 700 address start = __ pc(); 701 702 assert(len != Assembler::AVX_NoVec, "vector len must be specified"); 703 __ emit_data(val0, relocInfo::none, 0); 704 __ emit_data(val1, relocInfo::none, 0); 705 __ emit_data(val2, relocInfo::none, 0); 706 __ emit_data(val3, relocInfo::none, 0); 707 if (len >= Assembler::AVX_256bit) { 708 __ emit_data(val4, relocInfo::none, 0); 709 __ emit_data(val5, relocInfo::none, 0); 710 __ emit_data(val6, relocInfo::none, 0); 711 __ emit_data(val7, relocInfo::none, 0); 712 if (len >= Assembler::AVX_512bit) { 713 __ emit_data(val8, relocInfo::none, 0); 714 __ emit_data(val9, relocInfo::none, 0); 715 __ emit_data(val10, relocInfo::none, 0); 716 __ emit_data(val11, relocInfo::none, 0); 717 __ emit_data(val12, relocInfo::none, 0); 718 __ emit_data(val13, relocInfo::none, 0); 719 __ emit_data(val14, relocInfo::none, 0); 720 __ emit_data(val15, relocInfo::none, 0); 721 } 722 } 723 724 return start; 725 } 726 727 //---------------------------------------------------------------------------------------------------- 728 // Non-destructive plausibility checks for oops 729 730 address generate_verify_oop() { 731 StubCodeMark mark(this, "StubRoutines", "verify_oop"); 732 address start = __ pc(); 733 734 // Incoming arguments on stack after saving rax,: 735 // 736 // [tos ]: saved rdx 737 // [tos + 1]: saved EFLAGS 738 // [tos + 2]: return address 739 // [tos + 3]: char* error message 740 // [tos + 4]: oop object to verify 741 // [tos + 5]: saved rax, - saved by caller and bashed 742 743 Label exit, error; 744 __ pushf(); 745 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr())); 746 __ push(rdx); // save rdx 747 // make sure object is 'reasonable' 748 __ movptr(rax, Address(rsp, 4 * wordSize)); // get object 749 __ testptr(rax, rax); 750 __ jcc(Assembler::zero, exit); // if obj is NULL it is ok 751 752 // Check if the oop is in the right area of memory 753 const int oop_mask = Universe::verify_oop_mask(); 754 const int oop_bits = Universe::verify_oop_bits(); 755 __ mov(rdx, rax); 756 __ andptr(rdx, oop_mask); 757 __ cmpptr(rdx, oop_bits); 758 __ jcc(Assembler::notZero, error); 759 760 // make sure klass is 'reasonable', which is not zero. 761 __ movptr(rax, Address(rax, oopDesc::klass_offset_in_bytes())); // get klass 762 __ testptr(rax, rax); 763 __ jcc(Assembler::zero, error); // if klass is NULL it is broken 764 765 // return if everything seems ok 766 __ bind(exit); 767 __ movptr(rax, Address(rsp, 5 * wordSize)); // get saved rax, back 768 __ pop(rdx); // restore rdx 769 __ popf(); // restore EFLAGS 770 __ ret(3 * wordSize); // pop arguments 771 772 // handle errors 773 __ bind(error); 774 __ movptr(rax, Address(rsp, 5 * wordSize)); // get saved rax, back 775 __ pop(rdx); // get saved rdx back 776 __ popf(); // get saved EFLAGS off stack -- will be ignored 777 __ pusha(); // push registers (eip = return address & msg are already pushed) 778 BLOCK_COMMENT("call MacroAssembler::debug"); 779 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32))); 780 __ hlt(); 781 return start; 782 } 783 784 785 // Copy 64 bytes chunks 786 // 787 // Inputs: 788 // from - source array address 789 // to_from - destination array address - from 790 // qword_count - 8-bytes element count, negative 791 // 792 void xmm_copy_forward(Register from, Register to_from, Register qword_count) { 793 assert( UseSSE >= 2, "supported cpu only" ); 794 Label L_copy_64_bytes_loop, L_copy_64_bytes, L_copy_8_bytes, L_exit; 795 796 // Copy 64-byte chunks 797 __ jmpb(L_copy_64_bytes); 798 __ align(OptoLoopAlignment); 799 __ BIND(L_copy_64_bytes_loop); 800 801 if (UseUnalignedLoadStores) { 802 if (UseAVX > 2) { 803 __ evmovdqul(xmm0, Address(from, 0), Assembler::AVX_512bit); 804 __ evmovdqul(Address(from, to_from, Address::times_1, 0), xmm0, Assembler::AVX_512bit); 805 } else if (UseAVX == 2) { 806 __ vmovdqu(xmm0, Address(from, 0)); 807 __ vmovdqu(Address(from, to_from, Address::times_1, 0), xmm0); 808 __ vmovdqu(xmm1, Address(from, 32)); 809 __ vmovdqu(Address(from, to_from, Address::times_1, 32), xmm1); 810 } else { 811 __ movdqu(xmm0, Address(from, 0)); 812 __ movdqu(Address(from, to_from, Address::times_1, 0), xmm0); 813 __ movdqu(xmm1, Address(from, 16)); 814 __ movdqu(Address(from, to_from, Address::times_1, 16), xmm1); 815 __ movdqu(xmm2, Address(from, 32)); 816 __ movdqu(Address(from, to_from, Address::times_1, 32), xmm2); 817 __ movdqu(xmm3, Address(from, 48)); 818 __ movdqu(Address(from, to_from, Address::times_1, 48), xmm3); 819 } 820 } else { 821 __ movq(xmm0, Address(from, 0)); 822 __ movq(Address(from, to_from, Address::times_1, 0), xmm0); 823 __ movq(xmm1, Address(from, 8)); 824 __ movq(Address(from, to_from, Address::times_1, 8), xmm1); 825 __ movq(xmm2, Address(from, 16)); 826 __ movq(Address(from, to_from, Address::times_1, 16), xmm2); 827 __ movq(xmm3, Address(from, 24)); 828 __ movq(Address(from, to_from, Address::times_1, 24), xmm3); 829 __ movq(xmm4, Address(from, 32)); 830 __ movq(Address(from, to_from, Address::times_1, 32), xmm4); 831 __ movq(xmm5, Address(from, 40)); 832 __ movq(Address(from, to_from, Address::times_1, 40), xmm5); 833 __ movq(xmm6, Address(from, 48)); 834 __ movq(Address(from, to_from, Address::times_1, 48), xmm6); 835 __ movq(xmm7, Address(from, 56)); 836 __ movq(Address(from, to_from, Address::times_1, 56), xmm7); 837 } 838 839 __ addl(from, 64); 840 __ BIND(L_copy_64_bytes); 841 __ subl(qword_count, 8); 842 __ jcc(Assembler::greaterEqual, L_copy_64_bytes_loop); 843 844 if (UseUnalignedLoadStores && (UseAVX == 2)) { 845 // clean upper bits of YMM registers 846 __ vpxor(xmm0, xmm0); 847 __ vpxor(xmm1, xmm1); 848 } 849 __ addl(qword_count, 8); 850 __ jccb(Assembler::zero, L_exit); 851 // 852 // length is too short, just copy qwords 853 // 854 __ BIND(L_copy_8_bytes); 855 __ movq(xmm0, Address(from, 0)); 856 __ movq(Address(from, to_from, Address::times_1), xmm0); 857 __ addl(from, 8); 858 __ decrement(qword_count); 859 __ jcc(Assembler::greater, L_copy_8_bytes); 860 __ BIND(L_exit); 861 } 862 863 address generate_disjoint_copy(BasicType t, bool aligned, 864 Address::ScaleFactor sf, 865 address* entry, const char *name, 866 bool dest_uninitialized = false) { 867 __ align(CodeEntryAlignment); 868 StubCodeMark mark(this, "StubRoutines", name); 869 address start = __ pc(); 870 871 Label L_0_count, L_exit, L_skip_align1, L_skip_align2, L_copy_byte; 872 Label L_copy_2_bytes, L_copy_4_bytes, L_copy_64_bytes; 873 874 int shift = Address::times_ptr - sf; 875 876 const Register from = rsi; // source array address 877 const Register to = rdi; // destination array address 878 const Register count = rcx; // elements count 879 const Register to_from = to; // (to - from) 880 const Register saved_to = rdx; // saved destination array address 881 882 __ enter(); // required for proper stackwalking of RuntimeStub frame 883 __ push(rsi); 884 __ push(rdi); 885 __ movptr(from , Address(rsp, 12+ 4)); 886 __ movptr(to , Address(rsp, 12+ 8)); 887 __ movl(count, Address(rsp, 12+ 12)); 888 889 if (entry != NULL) { 890 *entry = __ pc(); // Entry point from conjoint arraycopy stub. 891 BLOCK_COMMENT("Entry:"); 892 } 893 894 if (t == T_OBJECT) { 895 __ testl(count, count); 896 __ jcc(Assembler::zero, L_0_count); 897 } 898 899 DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT; 900 if (dest_uninitialized) { 901 decorators |= IS_DEST_UNINITIALIZED; 902 } 903 if (aligned) { 904 decorators |= ARRAYCOPY_ALIGNED; 905 } 906 907 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 908 bs->arraycopy_prologue(_masm, decorators, t, from, to, count); 909 { 910 bool add_entry = (t != T_OBJECT && (!aligned || t == T_INT)); 911 // UnsafeCopyMemory page error: continue after ucm 912 UnsafeCopyMemoryMark ucmm(this, add_entry, true); 913 __ subptr(to, from); // to --> to_from 914 __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element 915 __ jcc(Assembler::below, L_copy_4_bytes); // use unsigned cmp 916 if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) { 917 // align source address at 4 bytes address boundary 918 if (t == T_BYTE) { 919 // One byte misalignment happens only for byte arrays 920 __ testl(from, 1); 921 __ jccb(Assembler::zero, L_skip_align1); 922 __ movb(rax, Address(from, 0)); 923 __ movb(Address(from, to_from, Address::times_1, 0), rax); 924 __ increment(from); 925 __ decrement(count); 926 __ BIND(L_skip_align1); 927 } 928 // Two bytes misalignment happens only for byte and short (char) arrays 929 __ testl(from, 2); 930 __ jccb(Assembler::zero, L_skip_align2); 931 __ movw(rax, Address(from, 0)); 932 __ movw(Address(from, to_from, Address::times_1, 0), rax); 933 __ addptr(from, 2); 934 __ subl(count, 1<<(shift-1)); 935 __ BIND(L_skip_align2); 936 } 937 if (!UseXMMForArrayCopy) { 938 __ mov(rax, count); // save 'count' 939 __ shrl(count, shift); // bytes count 940 __ addptr(to_from, from);// restore 'to' 941 __ rep_mov(); 942 __ subptr(to_from, from);// restore 'to_from' 943 __ mov(count, rax); // restore 'count' 944 __ jmpb(L_copy_2_bytes); // all dwords were copied 945 } else { 946 if (!UseUnalignedLoadStores) { 947 // align to 8 bytes, we know we are 4 byte aligned to start 948 __ testptr(from, 4); 949 __ jccb(Assembler::zero, L_copy_64_bytes); 950 __ movl(rax, Address(from, 0)); 951 __ movl(Address(from, to_from, Address::times_1, 0), rax); 952 __ addptr(from, 4); 953 __ subl(count, 1<<shift); 954 } 955 __ BIND(L_copy_64_bytes); 956 __ mov(rax, count); 957 __ shrl(rax, shift+1); // 8 bytes chunk count 958 // 959 // Copy 8-byte chunks through XMM registers, 8 per iteration of the loop 960 // 961 xmm_copy_forward(from, to_from, rax); 962 } 963 // copy tailing dword 964 __ BIND(L_copy_4_bytes); 965 __ testl(count, 1<<shift); 966 __ jccb(Assembler::zero, L_copy_2_bytes); 967 __ movl(rax, Address(from, 0)); 968 __ movl(Address(from, to_from, Address::times_1, 0), rax); 969 if (t == T_BYTE || t == T_SHORT) { 970 __ addptr(from, 4); 971 __ BIND(L_copy_2_bytes); 972 // copy tailing word 973 __ testl(count, 1<<(shift-1)); 974 __ jccb(Assembler::zero, L_copy_byte); 975 __ movw(rax, Address(from, 0)); 976 __ movw(Address(from, to_from, Address::times_1, 0), rax); 977 if (t == T_BYTE) { 978 __ addptr(from, 2); 979 __ BIND(L_copy_byte); 980 // copy tailing byte 981 __ testl(count, 1); 982 __ jccb(Assembler::zero, L_exit); 983 __ movb(rax, Address(from, 0)); 984 __ movb(Address(from, to_from, Address::times_1, 0), rax); 985 __ BIND(L_exit); 986 } else { 987 __ BIND(L_copy_byte); 988 } 989 } else { 990 __ BIND(L_copy_2_bytes); 991 } 992 } 993 994 __ movl(count, Address(rsp, 12+12)); // reread 'count' 995 bs->arraycopy_epilogue(_masm, decorators, t, from, to, count); 996 997 if (t == T_OBJECT) { 998 __ BIND(L_0_count); 999 } 1000 inc_copy_counter_np(t); 1001 __ pop(rdi); 1002 __ pop(rsi); 1003 __ leave(); // required for proper stackwalking of RuntimeStub frame 1004 __ vzeroupper(); 1005 __ xorptr(rax, rax); // return 0 1006 __ ret(0); 1007 return start; 1008 } 1009 1010 1011 address generate_fill(BasicType t, bool aligned, const char *name) { 1012 __ align(CodeEntryAlignment); 1013 StubCodeMark mark(this, "StubRoutines", name); 1014 address start = __ pc(); 1015 1016 BLOCK_COMMENT("Entry:"); 1017 1018 const Register to = rdi; // source array address 1019 const Register value = rdx; // value 1020 const Register count = rsi; // elements count 1021 1022 __ enter(); // required for proper stackwalking of RuntimeStub frame 1023 __ push(rsi); 1024 __ push(rdi); 1025 __ movptr(to , Address(rsp, 12+ 4)); 1026 __ movl(value, Address(rsp, 12+ 8)); 1027 __ movl(count, Address(rsp, 12+ 12)); 1028 1029 __ generate_fill(t, aligned, to, value, count, rax, xmm0); 1030 1031 __ pop(rdi); 1032 __ pop(rsi); 1033 __ leave(); // required for proper stackwalking of RuntimeStub frame 1034 __ ret(0); 1035 return start; 1036 } 1037 1038 address generate_conjoint_copy(BasicType t, bool aligned, 1039 Address::ScaleFactor sf, 1040 address nooverlap_target, 1041 address* entry, const char *name, 1042 bool dest_uninitialized = false) { 1043 __ align(CodeEntryAlignment); 1044 StubCodeMark mark(this, "StubRoutines", name); 1045 address start = __ pc(); 1046 1047 Label L_0_count, L_exit, L_skip_align1, L_skip_align2, L_copy_byte; 1048 Label L_copy_2_bytes, L_copy_4_bytes, L_copy_8_bytes, L_copy_8_bytes_loop; 1049 1050 int shift = Address::times_ptr - sf; 1051 1052 const Register src = rax; // source array address 1053 const Register dst = rdx; // destination array address 1054 const Register from = rsi; // source array address 1055 const Register to = rdi; // destination array address 1056 const Register count = rcx; // elements count 1057 const Register end = rax; // array end address 1058 1059 __ enter(); // required for proper stackwalking of RuntimeStub frame 1060 __ push(rsi); 1061 __ push(rdi); 1062 __ movptr(src , Address(rsp, 12+ 4)); // from 1063 __ movptr(dst , Address(rsp, 12+ 8)); // to 1064 __ movl2ptr(count, Address(rsp, 12+12)); // count 1065 1066 if (entry != NULL) { 1067 *entry = __ pc(); // Entry point from generic arraycopy stub. 1068 BLOCK_COMMENT("Entry:"); 1069 } 1070 1071 // nooverlap_target expects arguments in rsi and rdi. 1072 __ mov(from, src); 1073 __ mov(to , dst); 1074 1075 // arrays overlap test: dispatch to disjoint stub if necessary. 1076 RuntimeAddress nooverlap(nooverlap_target); 1077 __ cmpptr(dst, src); 1078 __ lea(end, Address(src, count, sf, 0)); // src + count * elem_size 1079 __ jump_cc(Assembler::belowEqual, nooverlap); 1080 __ cmpptr(dst, end); 1081 __ jump_cc(Assembler::aboveEqual, nooverlap); 1082 1083 if (t == T_OBJECT) { 1084 __ testl(count, count); 1085 __ jcc(Assembler::zero, L_0_count); 1086 } 1087 1088 DecoratorSet decorators = IN_HEAP | IS_ARRAY; 1089 if (dest_uninitialized) { 1090 decorators |= IS_DEST_UNINITIALIZED; 1091 } 1092 if (aligned) { 1093 decorators |= ARRAYCOPY_ALIGNED; 1094 } 1095 1096 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 1097 bs->arraycopy_prologue(_masm, decorators, t, from, to, count); 1098 1099 { 1100 bool add_entry = (t != T_OBJECT && (!aligned || t == T_INT)); 1101 // UnsafeCopyMemory page error: continue after ucm 1102 UnsafeCopyMemoryMark ucmm(this, add_entry, true); 1103 // copy from high to low 1104 __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element 1105 __ jcc(Assembler::below, L_copy_4_bytes); // use unsigned cmp 1106 if (t == T_BYTE || t == T_SHORT) { 1107 // Align the end of destination array at 4 bytes address boundary 1108 __ lea(end, Address(dst, count, sf, 0)); 1109 if (t == T_BYTE) { 1110 // One byte misalignment happens only for byte arrays 1111 __ testl(end, 1); 1112 __ jccb(Assembler::zero, L_skip_align1); 1113 __ decrement(count); 1114 __ movb(rdx, Address(from, count, sf, 0)); 1115 __ movb(Address(to, count, sf, 0), rdx); 1116 __ BIND(L_skip_align1); 1117 } 1118 // Two bytes misalignment happens only for byte and short (char) arrays 1119 __ testl(end, 2); 1120 __ jccb(Assembler::zero, L_skip_align2); 1121 __ subptr(count, 1<<(shift-1)); 1122 __ movw(rdx, Address(from, count, sf, 0)); 1123 __ movw(Address(to, count, sf, 0), rdx); 1124 __ BIND(L_skip_align2); 1125 __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element 1126 __ jcc(Assembler::below, L_copy_4_bytes); 1127 } 1128 1129 if (!UseXMMForArrayCopy) { 1130 __ std(); 1131 __ mov(rax, count); // Save 'count' 1132 __ mov(rdx, to); // Save 'to' 1133 __ lea(rsi, Address(from, count, sf, -4)); 1134 __ lea(rdi, Address(to , count, sf, -4)); 1135 __ shrptr(count, shift); // bytes count 1136 __ rep_mov(); 1137 __ cld(); 1138 __ mov(count, rax); // restore 'count' 1139 __ andl(count, (1<<shift)-1); // mask the number of rest elements 1140 __ movptr(from, Address(rsp, 12+4)); // reread 'from' 1141 __ mov(to, rdx); // restore 'to' 1142 __ jmpb(L_copy_2_bytes); // all dword were copied 1143 } else { 1144 // Align to 8 bytes the end of array. It is aligned to 4 bytes already. 1145 __ testptr(end, 4); 1146 __ jccb(Assembler::zero, L_copy_8_bytes); 1147 __ subl(count, 1<<shift); 1148 __ movl(rdx, Address(from, count, sf, 0)); 1149 __ movl(Address(to, count, sf, 0), rdx); 1150 __ jmpb(L_copy_8_bytes); 1151 1152 __ align(OptoLoopAlignment); 1153 // Move 8 bytes 1154 __ BIND(L_copy_8_bytes_loop); 1155 __ movq(xmm0, Address(from, count, sf, 0)); 1156 __ movq(Address(to, count, sf, 0), xmm0); 1157 __ BIND(L_copy_8_bytes); 1158 __ subl(count, 2<<shift); 1159 __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop); 1160 __ addl(count, 2<<shift); 1161 } 1162 __ BIND(L_copy_4_bytes); 1163 // copy prefix qword 1164 __ testl(count, 1<<shift); 1165 __ jccb(Assembler::zero, L_copy_2_bytes); 1166 __ movl(rdx, Address(from, count, sf, -4)); 1167 __ movl(Address(to, count, sf, -4), rdx); 1168 1169 if (t == T_BYTE || t == T_SHORT) { 1170 __ subl(count, (1<<shift)); 1171 __ BIND(L_copy_2_bytes); 1172 // copy prefix dword 1173 __ testl(count, 1<<(shift-1)); 1174 __ jccb(Assembler::zero, L_copy_byte); 1175 __ movw(rdx, Address(from, count, sf, -2)); 1176 __ movw(Address(to, count, sf, -2), rdx); 1177 if (t == T_BYTE) { 1178 __ subl(count, 1<<(shift-1)); 1179 __ BIND(L_copy_byte); 1180 // copy prefix byte 1181 __ testl(count, 1); 1182 __ jccb(Assembler::zero, L_exit); 1183 __ movb(rdx, Address(from, 0)); 1184 __ movb(Address(to, 0), rdx); 1185 __ BIND(L_exit); 1186 } else { 1187 __ BIND(L_copy_byte); 1188 } 1189 } else { 1190 __ BIND(L_copy_2_bytes); 1191 } 1192 } 1193 1194 __ movl2ptr(count, Address(rsp, 12+12)); // reread count 1195 bs->arraycopy_epilogue(_masm, decorators, t, from, to, count); 1196 1197 if (t == T_OBJECT) { 1198 __ BIND(L_0_count); 1199 } 1200 inc_copy_counter_np(t); 1201 __ pop(rdi); 1202 __ pop(rsi); 1203 __ leave(); // required for proper stackwalking of RuntimeStub frame 1204 __ xorptr(rax, rax); // return 0 1205 __ ret(0); 1206 return start; 1207 } 1208 1209 1210 address generate_disjoint_long_copy(address* entry, const char *name) { 1211 __ align(CodeEntryAlignment); 1212 StubCodeMark mark(this, "StubRoutines", name); 1213 address start = __ pc(); 1214 1215 Label L_copy_8_bytes, L_copy_8_bytes_loop; 1216 const Register from = rax; // source array address 1217 const Register to = rdx; // destination array address 1218 const Register count = rcx; // elements count 1219 const Register to_from = rdx; // (to - from) 1220 1221 __ enter(); // required for proper stackwalking of RuntimeStub frame 1222 __ movptr(from , Address(rsp, 8+0)); // from 1223 __ movptr(to , Address(rsp, 8+4)); // to 1224 __ movl2ptr(count, Address(rsp, 8+8)); // count 1225 1226 *entry = __ pc(); // Entry point from conjoint arraycopy stub. 1227 BLOCK_COMMENT("Entry:"); 1228 1229 { 1230 // UnsafeCopyMemory page error: continue after ucm 1231 UnsafeCopyMemoryMark ucmm(this, true, true); 1232 __ subptr(to, from); // to --> to_from 1233 if (UseXMMForArrayCopy) { 1234 xmm_copy_forward(from, to_from, count); 1235 } else { 1236 __ jmpb(L_copy_8_bytes); 1237 __ align(OptoLoopAlignment); 1238 __ BIND(L_copy_8_bytes_loop); 1239 __ fild_d(Address(from, 0)); 1240 __ fistp_d(Address(from, to_from, Address::times_1)); 1241 __ addptr(from, 8); 1242 __ BIND(L_copy_8_bytes); 1243 __ decrement(count); 1244 __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop); 1245 } 1246 } 1247 inc_copy_counter_np(T_LONG); 1248 __ leave(); // required for proper stackwalking of RuntimeStub frame 1249 __ vzeroupper(); 1250 __ xorptr(rax, rax); // return 0 1251 __ ret(0); 1252 return start; 1253 } 1254 1255 address generate_conjoint_long_copy(address nooverlap_target, 1256 address* entry, const char *name) { 1257 __ align(CodeEntryAlignment); 1258 StubCodeMark mark(this, "StubRoutines", name); 1259 address start = __ pc(); 1260 1261 Label L_copy_8_bytes, L_copy_8_bytes_loop; 1262 const Register from = rax; // source array address 1263 const Register to = rdx; // destination array address 1264 const Register count = rcx; // elements count 1265 const Register end_from = rax; // source array end address 1266 1267 __ enter(); // required for proper stackwalking of RuntimeStub frame 1268 __ movptr(from , Address(rsp, 8+0)); // from 1269 __ movptr(to , Address(rsp, 8+4)); // to 1270 __ movl2ptr(count, Address(rsp, 8+8)); // count 1271 1272 *entry = __ pc(); // Entry point from generic arraycopy stub. 1273 BLOCK_COMMENT("Entry:"); 1274 1275 // arrays overlap test 1276 __ cmpptr(to, from); 1277 RuntimeAddress nooverlap(nooverlap_target); 1278 __ jump_cc(Assembler::belowEqual, nooverlap); 1279 __ lea(end_from, Address(from, count, Address::times_8, 0)); 1280 __ cmpptr(to, end_from); 1281 __ movptr(from, Address(rsp, 8)); // from 1282 __ jump_cc(Assembler::aboveEqual, nooverlap); 1283 1284 { 1285 // UnsafeCopyMemory page error: continue after ucm 1286 UnsafeCopyMemoryMark ucmm(this, true, true); 1287 1288 __ jmpb(L_copy_8_bytes); 1289 1290 __ align(OptoLoopAlignment); 1291 __ BIND(L_copy_8_bytes_loop); 1292 if (UseXMMForArrayCopy) { 1293 __ movq(xmm0, Address(from, count, Address::times_8)); 1294 __ movq(Address(to, count, Address::times_8), xmm0); 1295 } else { 1296 __ fild_d(Address(from, count, Address::times_8)); 1297 __ fistp_d(Address(to, count, Address::times_8)); 1298 } 1299 __ BIND(L_copy_8_bytes); 1300 __ decrement(count); 1301 __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop); 1302 1303 } 1304 inc_copy_counter_np(T_LONG); 1305 __ leave(); // required for proper stackwalking of RuntimeStub frame 1306 __ xorptr(rax, rax); // return 0 1307 __ ret(0); 1308 return start; 1309 } 1310 1311 1312 // Helper for generating a dynamic type check. 1313 // The sub_klass must be one of {rbx, rdx, rsi}. 1314 // The temp is killed. 1315 void generate_type_check(Register sub_klass, 1316 Address& super_check_offset_addr, 1317 Address& super_klass_addr, 1318 Register temp, 1319 Label* L_success, Label* L_failure) { 1320 BLOCK_COMMENT("type_check:"); 1321 1322 Label L_fallthrough; 1323 #define LOCAL_JCC(assembler_con, label_ptr) \ 1324 if (label_ptr != NULL) __ jcc(assembler_con, *(label_ptr)); \ 1325 else __ jcc(assembler_con, L_fallthrough) /*omit semi*/ 1326 1327 // The following is a strange variation of the fast path which requires 1328 // one less register, because needed values are on the argument stack. 1329 // __ check_klass_subtype_fast_path(sub_klass, *super_klass*, temp, 1330 // L_success, L_failure, NULL); 1331 assert_different_registers(sub_klass, temp); 1332 1333 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 1334 1335 // if the pointers are equal, we are done (e.g., String[] elements) 1336 __ cmpptr(sub_klass, super_klass_addr); 1337 LOCAL_JCC(Assembler::equal, L_success); 1338 1339 // check the supertype display: 1340 __ movl2ptr(temp, super_check_offset_addr); 1341 Address super_check_addr(sub_klass, temp, Address::times_1, 0); 1342 __ movptr(temp, super_check_addr); // load displayed supertype 1343 __ cmpptr(temp, super_klass_addr); // test the super type 1344 LOCAL_JCC(Assembler::equal, L_success); 1345 1346 // if it was a primary super, we can just fail immediately 1347 __ cmpl(super_check_offset_addr, sc_offset); 1348 LOCAL_JCC(Assembler::notEqual, L_failure); 1349 1350 // The repne_scan instruction uses fixed registers, which will get spilled. 1351 // We happen to know this works best when super_klass is in rax. 1352 Register super_klass = temp; 1353 __ movptr(super_klass, super_klass_addr); 1354 __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, 1355 L_success, L_failure); 1356 1357 __ bind(L_fallthrough); 1358 1359 if (L_success == NULL) { BLOCK_COMMENT("L_success:"); } 1360 if (L_failure == NULL) { BLOCK_COMMENT("L_failure:"); } 1361 1362 #undef LOCAL_JCC 1363 } 1364 1365 // 1366 // Generate checkcasting array copy stub 1367 // 1368 // Input: 1369 // 4(rsp) - source array address 1370 // 8(rsp) - destination array address 1371 // 12(rsp) - element count, can be zero 1372 // 16(rsp) - size_t ckoff (super_check_offset) 1373 // 20(rsp) - oop ckval (super_klass) 1374 // 1375 // Output: 1376 // rax, == 0 - success 1377 // rax, == -1^K - failure, where K is partial transfer count 1378 // 1379 address generate_checkcast_copy(const char *name, address* entry, bool dest_uninitialized = false) { 1380 __ align(CodeEntryAlignment); 1381 StubCodeMark mark(this, "StubRoutines", name); 1382 address start = __ pc(); 1383 1384 Label L_load_element, L_store_element, L_do_card_marks, L_done; 1385 1386 // register use: 1387 // rax, rdx, rcx -- loop control (end_from, end_to, count) 1388 // rdi, rsi -- element access (oop, klass) 1389 // rbx, -- temp 1390 const Register from = rax; // source array address 1391 const Register to = rdx; // destination array address 1392 const Register length = rcx; // elements count 1393 const Register elem = rdi; // each oop copied 1394 const Register elem_klass = rsi; // each elem._klass (sub_klass) 1395 const Register temp = rbx; // lone remaining temp 1396 1397 __ enter(); // required for proper stackwalking of RuntimeStub frame 1398 1399 __ push(rsi); 1400 __ push(rdi); 1401 __ push(rbx); 1402 1403 Address from_arg(rsp, 16+ 4); // from 1404 Address to_arg(rsp, 16+ 8); // to 1405 Address length_arg(rsp, 16+12); // elements count 1406 Address ckoff_arg(rsp, 16+16); // super_check_offset 1407 Address ckval_arg(rsp, 16+20); // super_klass 1408 1409 // Load up: 1410 __ movptr(from, from_arg); 1411 __ movptr(to, to_arg); 1412 __ movl2ptr(length, length_arg); 1413 1414 if (entry != NULL) { 1415 *entry = __ pc(); // Entry point from generic arraycopy stub. 1416 BLOCK_COMMENT("Entry:"); 1417 } 1418 1419 //--------------------------------------------------------------- 1420 // Assembler stub will be used for this call to arraycopy 1421 // if the two arrays are subtypes of Object[] but the 1422 // destination array type is not equal to or a supertype 1423 // of the source type. Each element must be separately 1424 // checked. 1425 1426 // Loop-invariant addresses. They are exclusive end pointers. 1427 Address end_from_addr(from, length, Address::times_ptr, 0); 1428 Address end_to_addr(to, length, Address::times_ptr, 0); 1429 1430 Register end_from = from; // re-use 1431 Register end_to = to; // re-use 1432 Register count = length; // re-use 1433 1434 // Loop-variant addresses. They assume post-incremented count < 0. 1435 Address from_element_addr(end_from, count, Address::times_ptr, 0); 1436 Address to_element_addr(end_to, count, Address::times_ptr, 0); 1437 Address elem_klass_addr(elem, oopDesc::klass_offset_in_bytes()); 1438 1439 DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_CHECKCAST; 1440 if (dest_uninitialized) { 1441 decorators |= IS_DEST_UNINITIALIZED; 1442 } 1443 1444 BasicType type = T_OBJECT; 1445 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 1446 bs->arraycopy_prologue(_masm, decorators, type, from, to, count); 1447 1448 // Copy from low to high addresses, indexed from the end of each array. 1449 __ lea(end_from, end_from_addr); 1450 __ lea(end_to, end_to_addr); 1451 assert(length == count, ""); // else fix next line: 1452 __ negptr(count); // negate and test the length 1453 __ jccb(Assembler::notZero, L_load_element); 1454 1455 // Empty array: Nothing to do. 1456 __ xorptr(rax, rax); // return 0 on (trivial) success 1457 __ jmp(L_done); 1458 1459 // ======== begin loop ======== 1460 // (Loop is rotated; its entry is L_load_element.) 1461 // Loop control: 1462 // for (count = -count; count != 0; count++) 1463 // Base pointers src, dst are biased by 8*count,to last element. 1464 __ align(OptoLoopAlignment); 1465 1466 __ BIND(L_store_element); 1467 __ movptr(to_element_addr, elem); // store the oop 1468 __ increment(count); // increment the count toward zero 1469 __ jccb(Assembler::zero, L_do_card_marks); 1470 1471 // ======== loop entry is here ======== 1472 __ BIND(L_load_element); 1473 __ movptr(elem, from_element_addr); // load the oop 1474 __ testptr(elem, elem); 1475 __ jccb(Assembler::zero, L_store_element); 1476 1477 // (Could do a trick here: Remember last successful non-null 1478 // element stored and make a quick oop equality check on it.) 1479 1480 __ movptr(elem_klass, elem_klass_addr); // query the object klass 1481 generate_type_check(elem_klass, ckoff_arg, ckval_arg, temp, 1482 &L_store_element, NULL); 1483 // (On fall-through, we have failed the element type check.) 1484 // ======== end loop ======== 1485 1486 // It was a real error; we must depend on the caller to finish the job. 1487 // Register "count" = -1 * number of *remaining* oops, length_arg = *total* oops. 1488 // Emit GC store barriers for the oops we have copied (length_arg + count), 1489 // and report their number to the caller. 1490 assert_different_registers(to, count, rax); 1491 Label L_post_barrier; 1492 __ addl(count, length_arg); // transfers = (length - remaining) 1493 __ movl2ptr(rax, count); // save the value 1494 __ notptr(rax); // report (-1^K) to caller (does not affect flags) 1495 __ jccb(Assembler::notZero, L_post_barrier); 1496 __ jmp(L_done); // K == 0, nothing was copied, skip post barrier 1497 1498 // Come here on success only. 1499 __ BIND(L_do_card_marks); 1500 __ xorptr(rax, rax); // return 0 on success 1501 __ movl2ptr(count, length_arg); 1502 1503 __ BIND(L_post_barrier); 1504 __ movptr(to, to_arg); // reload 1505 bs->arraycopy_epilogue(_masm, decorators, type, from, to, count); 1506 1507 // Common exit point (success or failure). 1508 __ BIND(L_done); 1509 __ pop(rbx); 1510 __ pop(rdi); 1511 __ pop(rsi); 1512 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); 1513 __ leave(); // required for proper stackwalking of RuntimeStub frame 1514 __ ret(0); 1515 1516 return start; 1517 } 1518 1519 // 1520 // Generate 'unsafe' array copy stub 1521 // Though just as safe as the other stubs, it takes an unscaled 1522 // size_t argument instead of an element count. 1523 // 1524 // Input: 1525 // 4(rsp) - source array address 1526 // 8(rsp) - destination array address 1527 // 12(rsp) - byte count, can be zero 1528 // 1529 // Output: 1530 // rax, == 0 - success 1531 // rax, == -1 - need to call System.arraycopy 1532 // 1533 // Examines the alignment of the operands and dispatches 1534 // to a long, int, short, or byte copy loop. 1535 // 1536 address generate_unsafe_copy(const char *name, 1537 address byte_copy_entry, 1538 address short_copy_entry, 1539 address int_copy_entry, 1540 address long_copy_entry) { 1541 1542 Label L_long_aligned, L_int_aligned, L_short_aligned; 1543 1544 __ align(CodeEntryAlignment); 1545 StubCodeMark mark(this, "StubRoutines", name); 1546 address start = __ pc(); 1547 1548 const Register from = rax; // source array address 1549 const Register to = rdx; // destination array address 1550 const Register count = rcx; // elements count 1551 1552 __ enter(); // required for proper stackwalking of RuntimeStub frame 1553 __ push(rsi); 1554 __ push(rdi); 1555 Address from_arg(rsp, 12+ 4); // from 1556 Address to_arg(rsp, 12+ 8); // to 1557 Address count_arg(rsp, 12+12); // byte count 1558 1559 // Load up: 1560 __ movptr(from , from_arg); 1561 __ movptr(to , to_arg); 1562 __ movl2ptr(count, count_arg); 1563 1564 // bump this on entry, not on exit: 1565 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); 1566 1567 const Register bits = rsi; 1568 __ mov(bits, from); 1569 __ orptr(bits, to); 1570 __ orptr(bits, count); 1571 1572 __ testl(bits, BytesPerLong-1); 1573 __ jccb(Assembler::zero, L_long_aligned); 1574 1575 __ testl(bits, BytesPerInt-1); 1576 __ jccb(Assembler::zero, L_int_aligned); 1577 1578 __ testl(bits, BytesPerShort-1); 1579 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry)); 1580 1581 __ BIND(L_short_aligned); 1582 __ shrptr(count, LogBytesPerShort); // size => short_count 1583 __ movl(count_arg, count); // update 'count' 1584 __ jump(RuntimeAddress(short_copy_entry)); 1585 1586 __ BIND(L_int_aligned); 1587 __ shrptr(count, LogBytesPerInt); // size => int_count 1588 __ movl(count_arg, count); // update 'count' 1589 __ jump(RuntimeAddress(int_copy_entry)); 1590 1591 __ BIND(L_long_aligned); 1592 __ shrptr(count, LogBytesPerLong); // size => qword_count 1593 __ movl(count_arg, count); // update 'count' 1594 __ pop(rdi); // Do pops here since jlong_arraycopy stub does not do it. 1595 __ pop(rsi); 1596 __ jump(RuntimeAddress(long_copy_entry)); 1597 1598 return start; 1599 } 1600 1601 1602 // Perform range checks on the proposed arraycopy. 1603 // Smashes src_pos and dst_pos. (Uses them up for temps.) 1604 void arraycopy_range_checks(Register src, 1605 Register src_pos, 1606 Register dst, 1607 Register dst_pos, 1608 Address& length, 1609 Label& L_failed) { 1610 BLOCK_COMMENT("arraycopy_range_checks:"); 1611 const Register src_end = src_pos; // source array end position 1612 const Register dst_end = dst_pos; // destination array end position 1613 __ addl(src_end, length); // src_pos + length 1614 __ addl(dst_end, length); // dst_pos + length 1615 1616 // if (src_pos + length > arrayOop(src)->length() ) FAIL; 1617 __ cmpl(src_end, Address(src, arrayOopDesc::length_offset_in_bytes())); 1618 __ jcc(Assembler::above, L_failed); 1619 1620 // if (dst_pos + length > arrayOop(dst)->length() ) FAIL; 1621 __ cmpl(dst_end, Address(dst, arrayOopDesc::length_offset_in_bytes())); 1622 __ jcc(Assembler::above, L_failed); 1623 1624 BLOCK_COMMENT("arraycopy_range_checks done"); 1625 } 1626 1627 1628 // 1629 // Generate generic array copy stubs 1630 // 1631 // Input: 1632 // 4(rsp) - src oop 1633 // 8(rsp) - src_pos 1634 // 12(rsp) - dst oop 1635 // 16(rsp) - dst_pos 1636 // 20(rsp) - element count 1637 // 1638 // Output: 1639 // rax, == 0 - success 1640 // rax, == -1^K - failure, where K is partial transfer count 1641 // 1642 address generate_generic_copy(const char *name, 1643 address entry_jbyte_arraycopy, 1644 address entry_jshort_arraycopy, 1645 address entry_jint_arraycopy, 1646 address entry_oop_arraycopy, 1647 address entry_jlong_arraycopy, 1648 address entry_checkcast_arraycopy) { 1649 Label L_failed, L_failed_0, L_objArray; 1650 1651 { int modulus = CodeEntryAlignment; 1652 int target = modulus - 5; // 5 = sizeof jmp(L_failed) 1653 int advance = target - (__ offset() % modulus); 1654 if (advance < 0) advance += modulus; 1655 if (advance > 0) __ nop(advance); 1656 } 1657 StubCodeMark mark(this, "StubRoutines", name); 1658 1659 // Short-hop target to L_failed. Makes for denser prologue code. 1660 __ BIND(L_failed_0); 1661 __ jmp(L_failed); 1662 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed"); 1663 1664 __ align(CodeEntryAlignment); 1665 address start = __ pc(); 1666 1667 __ enter(); // required for proper stackwalking of RuntimeStub frame 1668 __ push(rsi); 1669 __ push(rdi); 1670 1671 // bump this on entry, not on exit: 1672 inc_counter_np(SharedRuntime::_generic_array_copy_ctr); 1673 1674 // Input values 1675 Address SRC (rsp, 12+ 4); 1676 Address SRC_POS (rsp, 12+ 8); 1677 Address DST (rsp, 12+12); 1678 Address DST_POS (rsp, 12+16); 1679 Address LENGTH (rsp, 12+20); 1680 1681 //----------------------------------------------------------------------- 1682 // Assembler stub will be used for this call to arraycopy 1683 // if the following conditions are met: 1684 // 1685 // (1) src and dst must not be null. 1686 // (2) src_pos must not be negative. 1687 // (3) dst_pos must not be negative. 1688 // (4) length must not be negative. 1689 // (5) src klass and dst klass should be the same and not NULL. 1690 // (6) src and dst should be arrays. 1691 // (7) src_pos + length must not exceed length of src. 1692 // (8) dst_pos + length must not exceed length of dst. 1693 // 1694 1695 const Register src = rax; // source array oop 1696 const Register src_pos = rsi; 1697 const Register dst = rdx; // destination array oop 1698 const Register dst_pos = rdi; 1699 const Register length = rcx; // transfer count 1700 1701 // if (src == NULL) return -1; 1702 __ movptr(src, SRC); // src oop 1703 __ testptr(src, src); 1704 __ jccb(Assembler::zero, L_failed_0); 1705 1706 // if (src_pos < 0) return -1; 1707 __ movl2ptr(src_pos, SRC_POS); // src_pos 1708 __ testl(src_pos, src_pos); 1709 __ jccb(Assembler::negative, L_failed_0); 1710 1711 // if (dst == NULL) return -1; 1712 __ movptr(dst, DST); // dst oop 1713 __ testptr(dst, dst); 1714 __ jccb(Assembler::zero, L_failed_0); 1715 1716 // if (dst_pos < 0) return -1; 1717 __ movl2ptr(dst_pos, DST_POS); // dst_pos 1718 __ testl(dst_pos, dst_pos); 1719 __ jccb(Assembler::negative, L_failed_0); 1720 1721 // if (length < 0) return -1; 1722 __ movl2ptr(length, LENGTH); // length 1723 __ testl(length, length); 1724 __ jccb(Assembler::negative, L_failed_0); 1725 1726 // if (src->klass() == NULL) return -1; 1727 Address src_klass_addr(src, oopDesc::klass_offset_in_bytes()); 1728 Address dst_klass_addr(dst, oopDesc::klass_offset_in_bytes()); 1729 const Register rcx_src_klass = rcx; // array klass 1730 __ movptr(rcx_src_klass, Address(src, oopDesc::klass_offset_in_bytes())); 1731 1732 #ifdef ASSERT 1733 // assert(src->klass() != NULL); 1734 BLOCK_COMMENT("assert klasses not null"); 1735 { Label L1, L2; 1736 __ testptr(rcx_src_klass, rcx_src_klass); 1737 __ jccb(Assembler::notZero, L2); // it is broken if klass is NULL 1738 __ bind(L1); 1739 __ stop("broken null klass"); 1740 __ bind(L2); 1741 __ cmpptr(dst_klass_addr, (int32_t)NULL_WORD); 1742 __ jccb(Assembler::equal, L1); // this would be broken also 1743 BLOCK_COMMENT("assert done"); 1744 } 1745 #endif //ASSERT 1746 1747 // Load layout helper (32-bits) 1748 // 1749 // |array_tag| | header_size | element_type | |log2_element_size| 1750 // 32 30 24 16 8 2 0 1751 // 1752 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 1753 // 1754 1755 int lh_offset = in_bytes(Klass::layout_helper_offset()); 1756 Address src_klass_lh_addr(rcx_src_klass, lh_offset); 1757 1758 // Handle objArrays completely differently... 1759 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 1760 __ cmpl(src_klass_lh_addr, objArray_lh); 1761 __ jcc(Assembler::equal, L_objArray); 1762 1763 // if (src->klass() != dst->klass()) return -1; 1764 __ cmpptr(rcx_src_klass, dst_klass_addr); 1765 __ jccb(Assembler::notEqual, L_failed_0); 1766 1767 const Register rcx_lh = rcx; // layout helper 1768 assert(rcx_lh == rcx_src_klass, "known alias"); 1769 __ movl(rcx_lh, src_klass_lh_addr); 1770 1771 // if (!src->is_Array()) return -1; 1772 __ cmpl(rcx_lh, Klass::_lh_neutral_value); 1773 __ jcc(Assembler::greaterEqual, L_failed_0); // signed cmp 1774 1775 // At this point, it is known to be a typeArray (array_tag 0x3). 1776 #ifdef ASSERT 1777 { Label L; 1778 __ cmpl(rcx_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift)); 1779 __ jcc(Assembler::greaterEqual, L); // signed cmp 1780 __ stop("must be a primitive array"); 1781 __ bind(L); 1782 } 1783 #endif 1784 1785 assert_different_registers(src, src_pos, dst, dst_pos, rcx_lh); 1786 arraycopy_range_checks(src, src_pos, dst, dst_pos, LENGTH, L_failed); 1787 1788 // TypeArrayKlass 1789 // 1790 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize); 1791 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize); 1792 // 1793 const Register rsi_offset = rsi; // array offset 1794 const Register src_array = src; // src array offset 1795 const Register dst_array = dst; // dst array offset 1796 const Register rdi_elsize = rdi; // log2 element size 1797 1798 __ mov(rsi_offset, rcx_lh); 1799 __ shrptr(rsi_offset, Klass::_lh_header_size_shift); 1800 __ andptr(rsi_offset, Klass::_lh_header_size_mask); // array_offset 1801 __ addptr(src_array, rsi_offset); // src array offset 1802 __ addptr(dst_array, rsi_offset); // dst array offset 1803 __ andptr(rcx_lh, Klass::_lh_log2_element_size_mask); // log2 elsize 1804 1805 // next registers should be set before the jump to corresponding stub 1806 const Register from = src; // source array address 1807 const Register to = dst; // destination array address 1808 const Register count = rcx; // elements count 1809 // some of them should be duplicated on stack 1810 #define FROM Address(rsp, 12+ 4) 1811 #define TO Address(rsp, 12+ 8) // Not used now 1812 #define COUNT Address(rsp, 12+12) // Only for oop arraycopy 1813 1814 BLOCK_COMMENT("scale indexes to element size"); 1815 __ movl2ptr(rsi, SRC_POS); // src_pos 1816 __ shlptr(rsi); // src_pos << rcx (log2 elsize) 1817 assert(src_array == from, ""); 1818 __ addptr(from, rsi); // from = src_array + SRC_POS << log2 elsize 1819 __ movl2ptr(rdi, DST_POS); // dst_pos 1820 __ shlptr(rdi); // dst_pos << rcx (log2 elsize) 1821 assert(dst_array == to, ""); 1822 __ addptr(to, rdi); // to = dst_array + DST_POS << log2 elsize 1823 __ movptr(FROM, from); // src_addr 1824 __ mov(rdi_elsize, rcx_lh); // log2 elsize 1825 __ movl2ptr(count, LENGTH); // elements count 1826 1827 BLOCK_COMMENT("choose copy loop based on element size"); 1828 __ cmpl(rdi_elsize, 0); 1829 1830 __ jump_cc(Assembler::equal, RuntimeAddress(entry_jbyte_arraycopy)); 1831 __ cmpl(rdi_elsize, LogBytesPerShort); 1832 __ jump_cc(Assembler::equal, RuntimeAddress(entry_jshort_arraycopy)); 1833 __ cmpl(rdi_elsize, LogBytesPerInt); 1834 __ jump_cc(Assembler::equal, RuntimeAddress(entry_jint_arraycopy)); 1835 #ifdef ASSERT 1836 __ cmpl(rdi_elsize, LogBytesPerLong); 1837 __ jccb(Assembler::notEqual, L_failed); 1838 #endif 1839 __ pop(rdi); // Do pops here since jlong_arraycopy stub does not do it. 1840 __ pop(rsi); 1841 __ jump(RuntimeAddress(entry_jlong_arraycopy)); 1842 1843 __ BIND(L_failed); 1844 __ xorptr(rax, rax); 1845 __ notptr(rax); // return -1 1846 __ pop(rdi); 1847 __ pop(rsi); 1848 __ leave(); // required for proper stackwalking of RuntimeStub frame 1849 __ ret(0); 1850 1851 // ObjArrayKlass 1852 __ BIND(L_objArray); 1853 // live at this point: rcx_src_klass, src[_pos], dst[_pos] 1854 1855 Label L_plain_copy, L_checkcast_copy; 1856 // test array classes for subtyping 1857 __ cmpptr(rcx_src_klass, dst_klass_addr); // usual case is exact equality 1858 __ jccb(Assembler::notEqual, L_checkcast_copy); 1859 1860 // Identically typed arrays can be copied without element-wise checks. 1861 assert_different_registers(src, src_pos, dst, dst_pos, rcx_src_klass); 1862 arraycopy_range_checks(src, src_pos, dst, dst_pos, LENGTH, L_failed); 1863 1864 __ BIND(L_plain_copy); 1865 __ movl2ptr(count, LENGTH); // elements count 1866 __ movl2ptr(src_pos, SRC_POS); // reload src_pos 1867 __ lea(from, Address(src, src_pos, Address::times_ptr, 1868 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr 1869 __ movl2ptr(dst_pos, DST_POS); // reload dst_pos 1870 __ lea(to, Address(dst, dst_pos, Address::times_ptr, 1871 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr 1872 __ movptr(FROM, from); // src_addr 1873 __ movptr(TO, to); // dst_addr 1874 __ movl(COUNT, count); // count 1875 __ jump(RuntimeAddress(entry_oop_arraycopy)); 1876 1877 __ BIND(L_checkcast_copy); 1878 // live at this point: rcx_src_klass, dst[_pos], src[_pos] 1879 { 1880 // Handy offsets: 1881 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 1882 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 1883 1884 Register rsi_dst_klass = rsi; 1885 Register rdi_temp = rdi; 1886 assert(rsi_dst_klass == src_pos, "expected alias w/ src_pos"); 1887 assert(rdi_temp == dst_pos, "expected alias w/ dst_pos"); 1888 Address dst_klass_lh_addr(rsi_dst_klass, lh_offset); 1889 1890 // Before looking at dst.length, make sure dst is also an objArray. 1891 __ movptr(rsi_dst_klass, dst_klass_addr); 1892 __ cmpl(dst_klass_lh_addr, objArray_lh); 1893 __ jccb(Assembler::notEqual, L_failed); 1894 1895 // It is safe to examine both src.length and dst.length. 1896 __ movl2ptr(src_pos, SRC_POS); // reload rsi 1897 arraycopy_range_checks(src, src_pos, dst, dst_pos, LENGTH, L_failed); 1898 // (Now src_pos and dst_pos are killed, but not src and dst.) 1899 1900 // We'll need this temp (don't forget to pop it after the type check). 1901 __ push(rbx); 1902 Register rbx_src_klass = rbx; 1903 1904 __ mov(rbx_src_klass, rcx_src_klass); // spill away from rcx 1905 __ movptr(rsi_dst_klass, dst_klass_addr); 1906 Address super_check_offset_addr(rsi_dst_klass, sco_offset); 1907 Label L_fail_array_check; 1908 generate_type_check(rbx_src_klass, 1909 super_check_offset_addr, dst_klass_addr, 1910 rdi_temp, NULL, &L_fail_array_check); 1911 // (On fall-through, we have passed the array type check.) 1912 __ pop(rbx); 1913 __ jmp(L_plain_copy); 1914 1915 __ BIND(L_fail_array_check); 1916 // Reshuffle arguments so we can call checkcast_arraycopy: 1917 1918 // match initial saves for checkcast_arraycopy 1919 // push(rsi); // already done; see above 1920 // push(rdi); // already done; see above 1921 // push(rbx); // already done; see above 1922 1923 // Marshal outgoing arguments now, freeing registers. 1924 Address from_arg(rsp, 16+ 4); // from 1925 Address to_arg(rsp, 16+ 8); // to 1926 Address length_arg(rsp, 16+12); // elements count 1927 Address ckoff_arg(rsp, 16+16); // super_check_offset 1928 Address ckval_arg(rsp, 16+20); // super_klass 1929 1930 Address SRC_POS_arg(rsp, 16+ 8); 1931 Address DST_POS_arg(rsp, 16+16); 1932 Address LENGTH_arg(rsp, 16+20); 1933 // push rbx, changed the incoming offsets (why not just use rbp,??) 1934 // assert(SRC_POS_arg.disp() == SRC_POS.disp() + 4, ""); 1935 1936 __ movptr(rbx, Address(rsi_dst_klass, ek_offset)); 1937 __ movl2ptr(length, LENGTH_arg); // reload elements count 1938 __ movl2ptr(src_pos, SRC_POS_arg); // reload src_pos 1939 __ movl2ptr(dst_pos, DST_POS_arg); // reload dst_pos 1940 1941 __ movptr(ckval_arg, rbx); // destination element type 1942 __ movl(rbx, Address(rbx, sco_offset)); 1943 __ movl(ckoff_arg, rbx); // corresponding class check offset 1944 1945 __ movl(length_arg, length); // outgoing length argument 1946 1947 __ lea(from, Address(src, src_pos, Address::times_ptr, 1948 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 1949 __ movptr(from_arg, from); 1950 1951 __ lea(to, Address(dst, dst_pos, Address::times_ptr, 1952 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 1953 __ movptr(to_arg, to); 1954 __ jump(RuntimeAddress(entry_checkcast_arraycopy)); 1955 } 1956 1957 return start; 1958 } 1959 1960 void generate_arraycopy_stubs() { 1961 address entry; 1962 address entry_jbyte_arraycopy; 1963 address entry_jshort_arraycopy; 1964 address entry_jint_arraycopy; 1965 address entry_oop_arraycopy; 1966 address entry_jlong_arraycopy; 1967 address entry_checkcast_arraycopy; 1968 1969 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = 1970 generate_disjoint_copy(T_BYTE, true, Address::times_1, &entry, 1971 "arrayof_jbyte_disjoint_arraycopy"); 1972 StubRoutines::_arrayof_jbyte_arraycopy = 1973 generate_conjoint_copy(T_BYTE, true, Address::times_1, entry, 1974 NULL, "arrayof_jbyte_arraycopy"); 1975 StubRoutines::_jbyte_disjoint_arraycopy = 1976 generate_disjoint_copy(T_BYTE, false, Address::times_1, &entry, 1977 "jbyte_disjoint_arraycopy"); 1978 StubRoutines::_jbyte_arraycopy = 1979 generate_conjoint_copy(T_BYTE, false, Address::times_1, entry, 1980 &entry_jbyte_arraycopy, "jbyte_arraycopy"); 1981 1982 StubRoutines::_arrayof_jshort_disjoint_arraycopy = 1983 generate_disjoint_copy(T_SHORT, true, Address::times_2, &entry, 1984 "arrayof_jshort_disjoint_arraycopy"); 1985 StubRoutines::_arrayof_jshort_arraycopy = 1986 generate_conjoint_copy(T_SHORT, true, Address::times_2, entry, 1987 NULL, "arrayof_jshort_arraycopy"); 1988 StubRoutines::_jshort_disjoint_arraycopy = 1989 generate_disjoint_copy(T_SHORT, false, Address::times_2, &entry, 1990 "jshort_disjoint_arraycopy"); 1991 StubRoutines::_jshort_arraycopy = 1992 generate_conjoint_copy(T_SHORT, false, Address::times_2, entry, 1993 &entry_jshort_arraycopy, "jshort_arraycopy"); 1994 1995 // Next arrays are always aligned on 4 bytes at least. 1996 StubRoutines::_jint_disjoint_arraycopy = 1997 generate_disjoint_copy(T_INT, true, Address::times_4, &entry, 1998 "jint_disjoint_arraycopy"); 1999 StubRoutines::_jint_arraycopy = 2000 generate_conjoint_copy(T_INT, true, Address::times_4, entry, 2001 &entry_jint_arraycopy, "jint_arraycopy"); 2002 2003 StubRoutines::_oop_disjoint_arraycopy = 2004 generate_disjoint_copy(T_OBJECT, true, Address::times_ptr, &entry, 2005 "oop_disjoint_arraycopy"); 2006 StubRoutines::_oop_arraycopy = 2007 generate_conjoint_copy(T_OBJECT, true, Address::times_ptr, entry, 2008 &entry_oop_arraycopy, "oop_arraycopy"); 2009 2010 StubRoutines::_oop_disjoint_arraycopy_uninit = 2011 generate_disjoint_copy(T_OBJECT, true, Address::times_ptr, &entry, 2012 "oop_disjoint_arraycopy_uninit", 2013 /*dest_uninitialized*/true); 2014 StubRoutines::_oop_arraycopy_uninit = 2015 generate_conjoint_copy(T_OBJECT, true, Address::times_ptr, entry, 2016 NULL, "oop_arraycopy_uninit", 2017 /*dest_uninitialized*/true); 2018 2019 StubRoutines::_jlong_disjoint_arraycopy = 2020 generate_disjoint_long_copy(&entry, "jlong_disjoint_arraycopy"); 2021 StubRoutines::_jlong_arraycopy = 2022 generate_conjoint_long_copy(entry, &entry_jlong_arraycopy, 2023 "jlong_arraycopy"); 2024 2025 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); 2026 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); 2027 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); 2028 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); 2029 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); 2030 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); 2031 2032 StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy; 2033 StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy; 2034 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = StubRoutines::_oop_disjoint_arraycopy_uninit; 2035 StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy; 2036 2037 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; 2038 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; 2039 StubRoutines::_arrayof_oop_arraycopy_uninit = StubRoutines::_oop_arraycopy_uninit; 2040 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; 2041 2042 StubRoutines::_checkcast_arraycopy = 2043 generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); 2044 StubRoutines::_checkcast_arraycopy_uninit = 2045 generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, /*dest_uninitialized*/true); 2046 2047 StubRoutines::_unsafe_arraycopy = 2048 generate_unsafe_copy("unsafe_arraycopy", 2049 entry_jbyte_arraycopy, 2050 entry_jshort_arraycopy, 2051 entry_jint_arraycopy, 2052 entry_jlong_arraycopy); 2053 2054 StubRoutines::_generic_arraycopy = 2055 generate_generic_copy("generic_arraycopy", 2056 entry_jbyte_arraycopy, 2057 entry_jshort_arraycopy, 2058 entry_jint_arraycopy, 2059 entry_oop_arraycopy, 2060 entry_jlong_arraycopy, 2061 entry_checkcast_arraycopy); 2062 } 2063 2064 // AES intrinsic stubs 2065 enum {AESBlockSize = 16}; 2066 2067 address generate_key_shuffle_mask() { 2068 __ align(16); 2069 StubCodeMark mark(this, "StubRoutines", "key_shuffle_mask"); 2070 address start = __ pc(); 2071 __ emit_data(0x00010203, relocInfo::none, 0 ); 2072 __ emit_data(0x04050607, relocInfo::none, 0 ); 2073 __ emit_data(0x08090a0b, relocInfo::none, 0 ); 2074 __ emit_data(0x0c0d0e0f, relocInfo::none, 0 ); 2075 return start; 2076 } 2077 2078 address generate_counter_shuffle_mask() { 2079 __ align(16); 2080 StubCodeMark mark(this, "StubRoutines", "counter_shuffle_mask"); 2081 address start = __ pc(); 2082 __ emit_data(0x0c0d0e0f, relocInfo::none, 0); 2083 __ emit_data(0x08090a0b, relocInfo::none, 0); 2084 __ emit_data(0x04050607, relocInfo::none, 0); 2085 __ emit_data(0x00010203, relocInfo::none, 0); 2086 return start; 2087 } 2088 2089 // Utility routine for loading a 128-bit key word in little endian format 2090 // can optionally specify that the shuffle mask is already in an xmmregister 2091 void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) { 2092 __ movdqu(xmmdst, Address(key, offset)); 2093 if (xmm_shuf_mask != NULL) { 2094 __ pshufb(xmmdst, xmm_shuf_mask); 2095 } else { 2096 __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 2097 } 2098 } 2099 2100 // aesenc using specified key+offset 2101 // can optionally specify that the shuffle mask is already in an xmmregister 2102 void aes_enc_key(XMMRegister xmmdst, XMMRegister xmmtmp, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) { 2103 load_key(xmmtmp, key, offset, xmm_shuf_mask); 2104 __ aesenc(xmmdst, xmmtmp); 2105 } 2106 2107 // aesdec using specified key+offset 2108 // can optionally specify that the shuffle mask is already in an xmmregister 2109 void aes_dec_key(XMMRegister xmmdst, XMMRegister xmmtmp, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) { 2110 load_key(xmmtmp, key, offset, xmm_shuf_mask); 2111 __ aesdec(xmmdst, xmmtmp); 2112 } 2113 2114 // Utility routine for increase 128bit counter (iv in CTR mode) 2115 // XMM_128bit, D3, D2, D1, D0 2116 void inc_counter(Register reg, XMMRegister xmmdst, int inc_delta, Label& next_block) { 2117 __ pextrd(reg, xmmdst, 0x0); 2118 __ addl(reg, inc_delta); 2119 __ pinsrd(xmmdst, reg, 0x0); 2120 __ jcc(Assembler::carryClear, next_block); // jump if no carry 2121 2122 __ pextrd(reg, xmmdst, 0x01); // Carry-> D1 2123 __ addl(reg, 0x01); 2124 __ pinsrd(xmmdst, reg, 0x01); 2125 __ jcc(Assembler::carryClear, next_block); // jump if no carry 2126 2127 __ pextrd(reg, xmmdst, 0x02); // Carry-> D2 2128 __ addl(reg, 0x01); 2129 __ pinsrd(xmmdst, reg, 0x02); 2130 __ jcc(Assembler::carryClear, next_block); // jump if no carry 2131 2132 __ pextrd(reg, xmmdst, 0x03); // Carry -> D3 2133 __ addl(reg, 0x01); 2134 __ pinsrd(xmmdst, reg, 0x03); 2135 2136 __ BIND(next_block); // next instruction 2137 } 2138 2139 2140 // Arguments: 2141 // 2142 // Inputs: 2143 // c_rarg0 - source byte array address 2144 // c_rarg1 - destination byte array address 2145 // c_rarg2 - K (key) in little endian int array 2146 // 2147 address generate_aescrypt_encryptBlock() { 2148 assert(UseAES, "need AES instructions and misaligned SSE support"); 2149 __ align(CodeEntryAlignment); 2150 StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock"); 2151 Label L_doLast; 2152 address start = __ pc(); 2153 2154 const Register from = rdx; // source array address 2155 const Register to = rdx; // destination array address 2156 const Register key = rcx; // key array address 2157 const Register keylen = rax; 2158 const Address from_param(rbp, 8+0); 2159 const Address to_param (rbp, 8+4); 2160 const Address key_param (rbp, 8+8); 2161 2162 const XMMRegister xmm_result = xmm0; 2163 const XMMRegister xmm_key_shuf_mask = xmm1; 2164 const XMMRegister xmm_temp1 = xmm2; 2165 const XMMRegister xmm_temp2 = xmm3; 2166 const XMMRegister xmm_temp3 = xmm4; 2167 const XMMRegister xmm_temp4 = xmm5; 2168 2169 __ enter(); // required for proper stackwalking of RuntimeStub frame 2170 2171 __ movptr(from, from_param); 2172 __ movptr(key, key_param); 2173 2174 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 2175 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 2176 2177 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 2178 __ movdqu(xmm_result, Address(from, 0)); // get 16 bytes of input 2179 __ movptr(to, to_param); 2180 2181 // For encryption, the java expanded key ordering is just what we need 2182 2183 load_key(xmm_temp1, key, 0x00, xmm_key_shuf_mask); 2184 __ pxor(xmm_result, xmm_temp1); 2185 2186 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 2187 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 2188 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 2189 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 2190 2191 __ aesenc(xmm_result, xmm_temp1); 2192 __ aesenc(xmm_result, xmm_temp2); 2193 __ aesenc(xmm_result, xmm_temp3); 2194 __ aesenc(xmm_result, xmm_temp4); 2195 2196 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 2197 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 2198 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 2199 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 2200 2201 __ aesenc(xmm_result, xmm_temp1); 2202 __ aesenc(xmm_result, xmm_temp2); 2203 __ aesenc(xmm_result, xmm_temp3); 2204 __ aesenc(xmm_result, xmm_temp4); 2205 2206 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 2207 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 2208 2209 __ cmpl(keylen, 44); 2210 __ jccb(Assembler::equal, L_doLast); 2211 2212 __ aesenc(xmm_result, xmm_temp1); 2213 __ aesenc(xmm_result, xmm_temp2); 2214 2215 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 2216 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 2217 2218 __ cmpl(keylen, 52); 2219 __ jccb(Assembler::equal, L_doLast); 2220 2221 __ aesenc(xmm_result, xmm_temp1); 2222 __ aesenc(xmm_result, xmm_temp2); 2223 2224 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 2225 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 2226 2227 __ BIND(L_doLast); 2228 __ aesenc(xmm_result, xmm_temp1); 2229 __ aesenclast(xmm_result, xmm_temp2); 2230 __ movdqu(Address(to, 0), xmm_result); // store the result 2231 __ xorptr(rax, rax); // return 0 2232 __ leave(); // required for proper stackwalking of RuntimeStub frame 2233 __ ret(0); 2234 2235 return start; 2236 } 2237 2238 2239 // Arguments: 2240 // 2241 // Inputs: 2242 // c_rarg0 - source byte array address 2243 // c_rarg1 - destination byte array address 2244 // c_rarg2 - K (key) in little endian int array 2245 // 2246 address generate_aescrypt_decryptBlock() { 2247 assert(UseAES, "need AES instructions and misaligned SSE support"); 2248 __ align(CodeEntryAlignment); 2249 StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock"); 2250 Label L_doLast; 2251 address start = __ pc(); 2252 2253 const Register from = rdx; // source array address 2254 const Register to = rdx; // destination array address 2255 const Register key = rcx; // key array address 2256 const Register keylen = rax; 2257 const Address from_param(rbp, 8+0); 2258 const Address to_param (rbp, 8+4); 2259 const Address key_param (rbp, 8+8); 2260 2261 const XMMRegister xmm_result = xmm0; 2262 const XMMRegister xmm_key_shuf_mask = xmm1; 2263 const XMMRegister xmm_temp1 = xmm2; 2264 const XMMRegister xmm_temp2 = xmm3; 2265 const XMMRegister xmm_temp3 = xmm4; 2266 const XMMRegister xmm_temp4 = xmm5; 2267 2268 __ enter(); // required for proper stackwalking of RuntimeStub frame 2269 2270 __ movptr(from, from_param); 2271 __ movptr(key, key_param); 2272 2273 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 2274 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 2275 2276 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 2277 __ movdqu(xmm_result, Address(from, 0)); 2278 __ movptr(to, to_param); 2279 2280 // for decryption java expanded key ordering is rotated one position from what we want 2281 // so we start from 0x10 here and hit 0x00 last 2282 // we don't know if the key is aligned, hence not using load-execute form 2283 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 2284 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 2285 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 2286 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 2287 2288 __ pxor (xmm_result, xmm_temp1); 2289 __ aesdec(xmm_result, xmm_temp2); 2290 __ aesdec(xmm_result, xmm_temp3); 2291 __ aesdec(xmm_result, xmm_temp4); 2292 2293 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 2294 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 2295 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 2296 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 2297 2298 __ aesdec(xmm_result, xmm_temp1); 2299 __ aesdec(xmm_result, xmm_temp2); 2300 __ aesdec(xmm_result, xmm_temp3); 2301 __ aesdec(xmm_result, xmm_temp4); 2302 2303 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 2304 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 2305 load_key(xmm_temp3, key, 0x00, xmm_key_shuf_mask); 2306 2307 __ cmpl(keylen, 44); 2308 __ jccb(Assembler::equal, L_doLast); 2309 2310 __ aesdec(xmm_result, xmm_temp1); 2311 __ aesdec(xmm_result, xmm_temp2); 2312 2313 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 2314 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 2315 2316 __ cmpl(keylen, 52); 2317 __ jccb(Assembler::equal, L_doLast); 2318 2319 __ aesdec(xmm_result, xmm_temp1); 2320 __ aesdec(xmm_result, xmm_temp2); 2321 2322 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 2323 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 2324 2325 __ BIND(L_doLast); 2326 __ aesdec(xmm_result, xmm_temp1); 2327 __ aesdec(xmm_result, xmm_temp2); 2328 2329 // for decryption the aesdeclast operation is always on key+0x00 2330 __ aesdeclast(xmm_result, xmm_temp3); 2331 __ movdqu(Address(to, 0), xmm_result); // store the result 2332 __ xorptr(rax, rax); // return 0 2333 __ leave(); // required for proper stackwalking of RuntimeStub frame 2334 __ ret(0); 2335 2336 return start; 2337 } 2338 2339 void handleSOERegisters(bool saving) { 2340 const int saveFrameSizeInBytes = 4 * wordSize; 2341 const Address saved_rbx (rbp, -3 * wordSize); 2342 const Address saved_rsi (rbp, -2 * wordSize); 2343 const Address saved_rdi (rbp, -1 * wordSize); 2344 2345 if (saving) { 2346 __ subptr(rsp, saveFrameSizeInBytes); 2347 __ movptr(saved_rsi, rsi); 2348 __ movptr(saved_rdi, rdi); 2349 __ movptr(saved_rbx, rbx); 2350 } else { 2351 // restoring 2352 __ movptr(rsi, saved_rsi); 2353 __ movptr(rdi, saved_rdi); 2354 __ movptr(rbx, saved_rbx); 2355 } 2356 } 2357 2358 // Arguments: 2359 // 2360 // Inputs: 2361 // c_rarg0 - source byte array address 2362 // c_rarg1 - destination byte array address 2363 // c_rarg2 - K (key) in little endian int array 2364 // c_rarg3 - r vector byte array address 2365 // c_rarg4 - input length 2366 // 2367 // Output: 2368 // rax - input length 2369 // 2370 address generate_cipherBlockChaining_encryptAESCrypt() { 2371 assert(UseAES, "need AES instructions and misaligned SSE support"); 2372 __ align(CodeEntryAlignment); 2373 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt"); 2374 address start = __ pc(); 2375 2376 Label L_exit, L_key_192_256, L_key_256, L_loopTop_128, L_loopTop_192, L_loopTop_256; 2377 const Register from = rsi; // source array address 2378 const Register to = rdx; // destination array address 2379 const Register key = rcx; // key array address 2380 const Register rvec = rdi; // r byte array initialized from initvector array address 2381 // and left with the results of the last encryption block 2382 const Register len_reg = rbx; // src len (must be multiple of blocksize 16) 2383 const Register pos = rax; 2384 2385 // xmm register assignments for the loops below 2386 const XMMRegister xmm_result = xmm0; 2387 const XMMRegister xmm_temp = xmm1; 2388 // first 6 keys preloaded into xmm2-xmm7 2389 const int XMM_REG_NUM_KEY_FIRST = 2; 2390 const int XMM_REG_NUM_KEY_LAST = 7; 2391 const XMMRegister xmm_key0 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 2392 2393 __ enter(); // required for proper stackwalking of RuntimeStub frame 2394 handleSOERegisters(true /*saving*/); 2395 2396 // load registers from incoming parameters 2397 const Address from_param(rbp, 8+0); 2398 const Address to_param (rbp, 8+4); 2399 const Address key_param (rbp, 8+8); 2400 const Address rvec_param (rbp, 8+12); 2401 const Address len_param (rbp, 8+16); 2402 __ movptr(from , from_param); 2403 __ movptr(to , to_param); 2404 __ movptr(key , key_param); 2405 __ movptr(rvec , rvec_param); 2406 __ movptr(len_reg , len_param); 2407 2408 const XMMRegister xmm_key_shuf_mask = xmm_temp; // used temporarily to swap key bytes up front 2409 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 2410 // load up xmm regs 2 thru 7 with keys 0-5 2411 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x00; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) { 2412 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 2413 offset += 0x10; 2414 } 2415 2416 __ movdqu(xmm_result, Address(rvec, 0x00)); // initialize xmm_result with r vec 2417 2418 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 2419 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 2420 __ cmpl(rax, 44); 2421 __ jcc(Assembler::notEqual, L_key_192_256); 2422 2423 // 128 bit code follows here 2424 __ movl(pos, 0); 2425 __ align(OptoLoopAlignment); 2426 __ BIND(L_loopTop_128); 2427 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 2428 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 2429 2430 __ pxor (xmm_result, xmm_key0); // do the aes rounds 2431 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) { 2432 __ aesenc(xmm_result, as_XMMRegister(rnum)); 2433 } 2434 for (int key_offset = 0x60; key_offset <= 0x90; key_offset += 0x10) { 2435 aes_enc_key(xmm_result, xmm_temp, key, key_offset); 2436 } 2437 load_key(xmm_temp, key, 0xa0); 2438 __ aesenclast(xmm_result, xmm_temp); 2439 2440 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 2441 // no need to store r to memory until we exit 2442 __ addptr(pos, AESBlockSize); 2443 __ subptr(len_reg, AESBlockSize); 2444 __ jcc(Assembler::notEqual, L_loopTop_128); 2445 2446 __ BIND(L_exit); 2447 __ movdqu(Address(rvec, 0), xmm_result); // final value of r stored in rvec of CipherBlockChaining object 2448 2449 handleSOERegisters(false /*restoring*/); 2450 __ movptr(rax, len_param); // return length 2451 __ leave(); // required for proper stackwalking of RuntimeStub frame 2452 __ ret(0); 2453 2454 __ BIND(L_key_192_256); 2455 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256) 2456 __ cmpl(rax, 52); 2457 __ jcc(Assembler::notEqual, L_key_256); 2458 2459 // 192-bit code follows here (could be changed to use more xmm registers) 2460 __ movl(pos, 0); 2461 __ align(OptoLoopAlignment); 2462 __ BIND(L_loopTop_192); 2463 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 2464 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 2465 2466 __ pxor (xmm_result, xmm_key0); // do the aes rounds 2467 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) { 2468 __ aesenc(xmm_result, as_XMMRegister(rnum)); 2469 } 2470 for (int key_offset = 0x60; key_offset <= 0xb0; key_offset += 0x10) { 2471 aes_enc_key(xmm_result, xmm_temp, key, key_offset); 2472 } 2473 load_key(xmm_temp, key, 0xc0); 2474 __ aesenclast(xmm_result, xmm_temp); 2475 2476 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 2477 // no need to store r to memory until we exit 2478 __ addptr(pos, AESBlockSize); 2479 __ subptr(len_reg, AESBlockSize); 2480 __ jcc(Assembler::notEqual, L_loopTop_192); 2481 __ jmp(L_exit); 2482 2483 __ BIND(L_key_256); 2484 // 256-bit code follows here (could be changed to use more xmm registers) 2485 __ movl(pos, 0); 2486 __ align(OptoLoopAlignment); 2487 __ BIND(L_loopTop_256); 2488 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 2489 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 2490 2491 __ pxor (xmm_result, xmm_key0); // do the aes rounds 2492 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) { 2493 __ aesenc(xmm_result, as_XMMRegister(rnum)); 2494 } 2495 for (int key_offset = 0x60; key_offset <= 0xd0; key_offset += 0x10) { 2496 aes_enc_key(xmm_result, xmm_temp, key, key_offset); 2497 } 2498 load_key(xmm_temp, key, 0xe0); 2499 __ aesenclast(xmm_result, xmm_temp); 2500 2501 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 2502 // no need to store r to memory until we exit 2503 __ addptr(pos, AESBlockSize); 2504 __ subptr(len_reg, AESBlockSize); 2505 __ jcc(Assembler::notEqual, L_loopTop_256); 2506 __ jmp(L_exit); 2507 2508 return start; 2509 } 2510 2511 2512 // CBC AES Decryption. 2513 // In 32-bit stub, because of lack of registers we do not try to parallelize 4 blocks at a time. 2514 // 2515 // Arguments: 2516 // 2517 // Inputs: 2518 // c_rarg0 - source byte array address 2519 // c_rarg1 - destination byte array address 2520 // c_rarg2 - K (key) in little endian int array 2521 // c_rarg3 - r vector byte array address 2522 // c_rarg4 - input length 2523 // 2524 // Output: 2525 // rax - input length 2526 // 2527 2528 address generate_cipherBlockChaining_decryptAESCrypt_Parallel() { 2529 assert(UseAES, "need AES instructions and misaligned SSE support"); 2530 __ align(CodeEntryAlignment); 2531 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); 2532 address start = __ pc(); 2533 2534 const Register from = rsi; // source array address 2535 const Register to = rdx; // destination array address 2536 const Register key = rcx; // key array address 2537 const Register rvec = rdi; // r byte array initialized from initvector array address 2538 // and left with the results of the last encryption block 2539 const Register len_reg = rbx; // src len (must be multiple of blocksize 16) 2540 const Register pos = rax; 2541 2542 const int PARALLEL_FACTOR = 4; 2543 const int ROUNDS[3] = { 10, 12, 14 }; //aes rounds for key128, key192, key256 2544 2545 Label L_exit; 2546 Label L_singleBlock_loopTop[3]; //128, 192, 256 2547 Label L_multiBlock_loopTop[3]; //128, 192, 256 2548 2549 const XMMRegister xmm_prev_block_cipher = xmm0; // holds cipher of previous block 2550 const XMMRegister xmm_key_shuf_mask = xmm1; 2551 2552 const XMMRegister xmm_key_tmp0 = xmm2; 2553 const XMMRegister xmm_key_tmp1 = xmm3; 2554 2555 // registers holding the six results in the parallelized loop 2556 const XMMRegister xmm_result0 = xmm4; 2557 const XMMRegister xmm_result1 = xmm5; 2558 const XMMRegister xmm_result2 = xmm6; 2559 const XMMRegister xmm_result3 = xmm7; 2560 2561 __ enter(); // required for proper stackwalking of RuntimeStub frame 2562 handleSOERegisters(true /*saving*/); 2563 2564 // load registers from incoming parameters 2565 const Address from_param(rbp, 8+0); 2566 const Address to_param (rbp, 8+4); 2567 const Address key_param (rbp, 8+8); 2568 const Address rvec_param (rbp, 8+12); 2569 const Address len_param (rbp, 8+16); 2570 2571 __ movptr(from , from_param); 2572 __ movptr(to , to_param); 2573 __ movptr(key , key_param); 2574 __ movptr(rvec , rvec_param); 2575 __ movptr(len_reg , len_param); 2576 2577 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 2578 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // initialize with initial rvec 2579 2580 __ xorptr(pos, pos); 2581 2582 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 2583 // rvec is reused 2584 __ movl(rvec, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 2585 __ cmpl(rvec, 52); 2586 __ jcc(Assembler::equal, L_multiBlock_loopTop[1]); 2587 __ cmpl(rvec, 60); 2588 __ jcc(Assembler::equal, L_multiBlock_loopTop[2]); 2589 2590 #define DoFour(opc, src_reg) \ 2591 __ opc(xmm_result0, src_reg); \ 2592 __ opc(xmm_result1, src_reg); \ 2593 __ opc(xmm_result2, src_reg); \ 2594 __ opc(xmm_result3, src_reg); \ 2595 2596 for (int k = 0; k < 3; ++k) { 2597 __ align(OptoLoopAlignment); 2598 __ BIND(L_multiBlock_loopTop[k]); 2599 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left 2600 __ jcc(Assembler::less, L_singleBlock_loopTop[k]); 2601 2602 __ movdqu(xmm_result0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); // get next 4 blocks into xmmresult registers 2603 __ movdqu(xmm_result1, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 2604 __ movdqu(xmm_result2, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 2605 __ movdqu(xmm_result3, Address(from, pos, Address::times_1, 3 * AESBlockSize)); 2606 2607 // the java expanded key ordering is rotated one position from what we want 2608 // so we start from 0x10 here and hit 0x00 last 2609 load_key(xmm_key_tmp0, key, 0x10, xmm_key_shuf_mask); 2610 DoFour(pxor, xmm_key_tmp0); //xor with first key 2611 // do the aes dec rounds 2612 for (int rnum = 1; rnum <= ROUNDS[k];) { 2613 //load two keys at a time 2614 //k1->0x20, ..., k9->0xa0, k10->0x00 2615 load_key(xmm_key_tmp1, key, (rnum + 1) * 0x10, xmm_key_shuf_mask); 2616 load_key(xmm_key_tmp0, key, ((rnum + 2) % (ROUNDS[k] + 1)) * 0x10, xmm_key_shuf_mask); // hit 0x00 last! 2617 DoFour(aesdec, xmm_key_tmp1); 2618 rnum++; 2619 if (rnum != ROUNDS[k]) { 2620 DoFour(aesdec, xmm_key_tmp0); 2621 } 2622 else { 2623 DoFour(aesdeclast, xmm_key_tmp0); 2624 } 2625 rnum++; 2626 } 2627 2628 // for each result, xor with the r vector of previous cipher block 2629 __ pxor(xmm_result0, xmm_prev_block_cipher); 2630 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 2631 __ pxor(xmm_result1, xmm_prev_block_cipher); 2632 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 2633 __ pxor(xmm_result2, xmm_prev_block_cipher); 2634 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 2635 __ pxor(xmm_result3, xmm_prev_block_cipher); 2636 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 3 * AESBlockSize)); // this will carry over to next set of blocks 2637 2638 // store 4 results into the next 64 bytes of output 2639 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); 2640 __ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1); 2641 __ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2); 2642 __ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3); 2643 2644 __ addptr(pos, 4 * AESBlockSize); 2645 __ subptr(len_reg, 4 * AESBlockSize); 2646 __ jmp(L_multiBlock_loopTop[k]); 2647 2648 //singleBlock starts here 2649 __ align(OptoLoopAlignment); 2650 __ BIND(L_singleBlock_loopTop[k]); 2651 __ cmpptr(len_reg, 0); // any blocks left? 2652 __ jcc(Assembler::equal, L_exit); 2653 __ movdqu(xmm_result0, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 2654 __ movdqa(xmm_result1, xmm_result0); 2655 2656 load_key(xmm_key_tmp0, key, 0x10, xmm_key_shuf_mask); 2657 __ pxor(xmm_result0, xmm_key_tmp0); 2658 // do the aes dec rounds 2659 for (int rnum = 1; rnum < ROUNDS[k]; rnum++) { 2660 // the java expanded key ordering is rotated one position from what we want 2661 load_key(xmm_key_tmp0, key, (rnum + 1) * 0x10, xmm_key_shuf_mask); 2662 __ aesdec(xmm_result0, xmm_key_tmp0); 2663 } 2664 load_key(xmm_key_tmp0, key, 0x00, xmm_key_shuf_mask); 2665 __ aesdeclast(xmm_result0, xmm_key_tmp0); 2666 __ pxor(xmm_result0, xmm_prev_block_cipher); // xor with the current r vector 2667 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result0); // store into the next 16 bytes of output 2668 // no need to store r to memory until we exit 2669 __ movdqa(xmm_prev_block_cipher, xmm_result1); // set up next r vector with cipher input from this block 2670 2671 __ addptr(pos, AESBlockSize); 2672 __ subptr(len_reg, AESBlockSize); 2673 __ jmp(L_singleBlock_loopTop[k]); 2674 }//for 128/192/256 2675 2676 __ BIND(L_exit); 2677 __ movptr(rvec, rvec_param); // restore this since reused earlier 2678 __ movdqu(Address(rvec, 0), xmm_prev_block_cipher); // final value of r stored in rvec of CipherBlockChaining object 2679 handleSOERegisters(false /*restoring*/); 2680 __ movptr(rax, len_param); // return length 2681 __ leave(); // required for proper stackwalking of RuntimeStub frame 2682 __ ret(0); 2683 2684 return start; 2685 } 2686 2687 // CTR AES crypt. 2688 // In 32-bit stub, parallelize 4 blocks at a time 2689 // Arguments: 2690 // 2691 // Inputs: 2692 // c_rarg0 - source byte array address 2693 // c_rarg1 - destination byte array address 2694 // c_rarg2 - K (key) in little endian int array 2695 // c_rarg3 - counter vector byte array address 2696 // c_rarg4 - input length 2697 // 2698 // Output: 2699 // rax - input length 2700 // 2701 address generate_counterMode_AESCrypt_Parallel() { 2702 assert(UseAES, "need AES instructions and misaligned SSE support"); 2703 __ align(CodeEntryAlignment); 2704 StubCodeMark mark(this, "StubRoutines", "counterMode_AESCrypt"); 2705 address start = __ pc(); 2706 const Register from = rsi; // source array address 2707 const Register to = rdx; // destination array address 2708 const Register key = rcx; // key array address 2709 const Register counter = rdi; // counter byte array initialized from initvector array address 2710 // and updated with the incremented counter in the end 2711 const Register len_reg = rbx; 2712 const Register pos = rax; 2713 2714 __ enter(); // required for proper stackwalking of RuntimeStub frame 2715 handleSOERegisters(true /*saving*/); // save rbx, rsi, rdi 2716 2717 // load registers from incoming parameters 2718 const Address from_param(rbp, 8+0); 2719 const Address to_param (rbp, 8+4); 2720 const Address key_param (rbp, 8+8); 2721 const Address rvec_param (rbp, 8+12); 2722 const Address len_param (rbp, 8+16); 2723 const Address saved_counter_param(rbp, 8 + 20); 2724 const Address used_addr_param(rbp, 8 + 24); 2725 2726 __ movptr(from , from_param); 2727 __ movptr(to , to_param); 2728 __ movptr(len_reg , len_param); 2729 2730 // Use the partially used encrpyted counter from last invocation 2731 Label L_exit_preLoop, L_preLoop_start; 2732 2733 // Use the registers 'counter' and 'key' here in this preloop 2734 // to hold of last 2 params 'used' and 'saved_encCounter_start' 2735 Register used = counter; 2736 Register saved_encCounter_start = key; 2737 Register used_addr = saved_encCounter_start; 2738 2739 __ movptr(used_addr, used_addr_param); 2740 __ movptr(used, Address(used_addr, 0)); 2741 __ movptr(saved_encCounter_start, saved_counter_param); 2742 2743 __ BIND(L_preLoop_start); 2744 __ cmpptr(used, 16); 2745 __ jcc(Assembler::aboveEqual, L_exit_preLoop); 2746 __ cmpptr(len_reg, 0); 2747 __ jcc(Assembler::lessEqual, L_exit_preLoop); 2748 __ movb(rax, Address(saved_encCounter_start, used)); 2749 __ xorb(rax, Address(from, 0)); 2750 __ movb(Address(to, 0), rax); 2751 __ addptr(from, 1); 2752 __ addptr(to, 1); 2753 __ addptr(used, 1); 2754 __ subptr(len_reg, 1); 2755 2756 __ jmp(L_preLoop_start); 2757 2758 __ BIND(L_exit_preLoop); 2759 __ movptr(used_addr, used_addr_param); 2760 __ movptr(used_addr, used_addr_param); 2761 __ movl(Address(used_addr, 0), used); 2762 2763 // load the parameters 'key' and 'counter' 2764 __ movptr(key, key_param); 2765 __ movptr(counter, rvec_param); 2766 2767 // xmm register assignments for the loops below 2768 const XMMRegister xmm_curr_counter = xmm0; 2769 const XMMRegister xmm_counter_shuf_mask = xmm1; // need to be reloaded 2770 const XMMRegister xmm_key_shuf_mask = xmm2; // need to be reloaded 2771 const XMMRegister xmm_key = xmm3; 2772 const XMMRegister xmm_result0 = xmm4; 2773 const XMMRegister xmm_result1 = xmm5; 2774 const XMMRegister xmm_result2 = xmm6; 2775 const XMMRegister xmm_result3 = xmm7; 2776 const XMMRegister xmm_from0 = xmm1; //reuse XMM register 2777 const XMMRegister xmm_from1 = xmm2; 2778 const XMMRegister xmm_from2 = xmm3; 2779 const XMMRegister xmm_from3 = xmm4; 2780 2781 //for key_128, key_192, key_256 2782 const int rounds[3] = {10, 12, 14}; 2783 Label L_singleBlockLoopTop[3]; 2784 Label L_multiBlock_loopTop[3]; 2785 Label L_key192_top, L_key256_top; 2786 Label L_incCounter[3][4]; // 3: different key length, 4: 4 blocks at a time 2787 Label L_incCounter_single[3]; //for single block, key128, key192, key256 2788 Label L_processTail_insr[3], L_processTail_4_insr[3], L_processTail_2_insr[3], L_processTail_1_insr[3], L_processTail_exit_insr[3]; 2789 Label L_processTail_extr[3], L_processTail_4_extr[3], L_processTail_2_extr[3], L_processTail_1_extr[3], L_processTail_exit_extr[3]; 2790 2791 Label L_exit; 2792 const int PARALLEL_FACTOR = 4; //because of the limited register number 2793 2794 // initialize counter with initial counter 2795 __ movdqu(xmm_curr_counter, Address(counter, 0x00)); 2796 __ movdqu(xmm_counter_shuf_mask, ExternalAddress(StubRoutines::x86::counter_shuffle_mask_addr())); 2797 __ pshufb(xmm_curr_counter, xmm_counter_shuf_mask); //counter is shuffled for increase 2798 2799 // key length could be only {11, 13, 15} * 4 = {44, 52, 60} 2800 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 2801 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 2802 __ cmpl(rax, 52); 2803 __ jcc(Assembler::equal, L_key192_top); 2804 __ cmpl(rax, 60); 2805 __ jcc(Assembler::equal, L_key256_top); 2806 2807 //key128 begins here 2808 __ movptr(pos, 0); // init pos before L_multiBlock_loopTop 2809 2810 #define CTR_DoFour(opc, src_reg) \ 2811 __ opc(xmm_result0, src_reg); \ 2812 __ opc(xmm_result1, src_reg); \ 2813 __ opc(xmm_result2, src_reg); \ 2814 __ opc(xmm_result3, src_reg); 2815 2816 // k == 0 : generate code for key_128 2817 // k == 1 : generate code for key_192 2818 // k == 2 : generate code for key_256 2819 for (int k = 0; k < 3; ++k) { 2820 //multi blocks starts here 2821 __ align(OptoLoopAlignment); 2822 __ BIND(L_multiBlock_loopTop[k]); 2823 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least PARALLEL_FACTOR blocks left 2824 __ jcc(Assembler::less, L_singleBlockLoopTop[k]); 2825 2826 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 2827 __ movdqu(xmm_counter_shuf_mask, ExternalAddress(StubRoutines::x86::counter_shuffle_mask_addr())); 2828 2829 //load, then increase counters 2830 CTR_DoFour(movdqa, xmm_curr_counter); 2831 __ push(rbx); 2832 inc_counter(rbx, xmm_result1, 0x01, L_incCounter[k][0]); 2833 inc_counter(rbx, xmm_result2, 0x02, L_incCounter[k][1]); 2834 inc_counter(rbx, xmm_result3, 0x03, L_incCounter[k][2]); 2835 inc_counter(rbx, xmm_curr_counter, 0x04, L_incCounter[k][3]); 2836 __ pop (rbx); 2837 2838 load_key(xmm_key, key, 0x00, xmm_key_shuf_mask); // load Round 0 key. interleaving for better performance 2839 2840 CTR_DoFour(pshufb, xmm_counter_shuf_mask); // after increased, shuffled counters back for PXOR 2841 CTR_DoFour(pxor, xmm_key); //PXOR with Round 0 key 2842 2843 for (int i = 1; i < rounds[k]; ++i) { 2844 load_key(xmm_key, key, (0x10 * i), xmm_key_shuf_mask); 2845 CTR_DoFour(aesenc, xmm_key); 2846 } 2847 load_key(xmm_key, key, (0x10 * rounds[k]), xmm_key_shuf_mask); 2848 CTR_DoFour(aesenclast, xmm_key); 2849 2850 // get next PARALLEL_FACTOR blocks into xmm_from registers 2851 __ movdqu(xmm_from0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 2852 __ movdqu(xmm_from1, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 2853 __ movdqu(xmm_from2, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 2854 2855 // PXOR with input text 2856 __ pxor(xmm_result0, xmm_from0); //result0 is xmm4 2857 __ pxor(xmm_result1, xmm_from1); 2858 __ pxor(xmm_result2, xmm_from2); 2859 2860 // store PARALLEL_FACTOR results into the next 64 bytes of output 2861 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); 2862 __ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1); 2863 __ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2); 2864 2865 // do it here after xmm_result0 is saved, because xmm_from3 reuse the same register of xmm_result0. 2866 __ movdqu(xmm_from3, Address(from, pos, Address::times_1, 3 * AESBlockSize)); 2867 __ pxor(xmm_result3, xmm_from3); 2868 __ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3); 2869 2870 __ addptr(pos, PARALLEL_FACTOR * AESBlockSize); // increase the length of crypt text 2871 __ subptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // decrease the remaining length 2872 __ jmp(L_multiBlock_loopTop[k]); 2873 2874 // singleBlock starts here 2875 __ align(OptoLoopAlignment); 2876 __ BIND(L_singleBlockLoopTop[k]); 2877 __ cmpptr(len_reg, 0); 2878 __ jcc(Assembler::equal, L_exit); 2879 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 2880 __ movdqu(xmm_counter_shuf_mask, ExternalAddress(StubRoutines::x86::counter_shuffle_mask_addr())); 2881 __ movdqa(xmm_result0, xmm_curr_counter); 2882 load_key(xmm_key, key, 0x00, xmm_key_shuf_mask); 2883 __ push(rbx);//rbx is used for increasing counter 2884 inc_counter(rbx, xmm_curr_counter, 0x01, L_incCounter_single[k]); 2885 __ pop (rbx); 2886 __ pshufb(xmm_result0, xmm_counter_shuf_mask); 2887 __ pxor(xmm_result0, xmm_key); 2888 for (int i = 1; i < rounds[k]; i++) { 2889 load_key(xmm_key, key, (0x10 * i), xmm_key_shuf_mask); 2890 __ aesenc(xmm_result0, xmm_key); 2891 } 2892 load_key(xmm_key, key, (0x10 * rounds[k]), xmm_key_shuf_mask); 2893 __ aesenclast(xmm_result0, xmm_key); 2894 __ cmpptr(len_reg, AESBlockSize); 2895 __ jcc(Assembler::less, L_processTail_insr[k]); 2896 __ movdqu(xmm_from0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 2897 __ pxor(xmm_result0, xmm_from0); 2898 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); 2899 __ addptr(pos, AESBlockSize); 2900 __ subptr(len_reg, AESBlockSize); 2901 __ jmp(L_singleBlockLoopTop[k]); 2902 2903 __ BIND(L_processTail_insr[k]); // Process the tail part of the input array 2904 __ addptr(pos, len_reg); // 1. Insert bytes from src array into xmm_from0 register 2905 __ testptr(len_reg, 8); 2906 __ jcc(Assembler::zero, L_processTail_4_insr[k]); 2907 __ subptr(pos,8); 2908 __ pinsrd(xmm_from0, Address(from, pos), 0); 2909 __ pinsrd(xmm_from0, Address(from, pos, Address::times_1, 4), 1); 2910 __ BIND(L_processTail_4_insr[k]); 2911 __ testptr(len_reg, 4); 2912 __ jcc(Assembler::zero, L_processTail_2_insr[k]); 2913 __ subptr(pos,4); 2914 __ pslldq(xmm_from0, 4); 2915 __ pinsrd(xmm_from0, Address(from, pos), 0); 2916 __ BIND(L_processTail_2_insr[k]); 2917 __ testptr(len_reg, 2); 2918 __ jcc(Assembler::zero, L_processTail_1_insr[k]); 2919 __ subptr(pos, 2); 2920 __ pslldq(xmm_from0, 2); 2921 __ pinsrw(xmm_from0, Address(from, pos), 0); 2922 __ BIND(L_processTail_1_insr[k]); 2923 __ testptr(len_reg, 1); 2924 __ jcc(Assembler::zero, L_processTail_exit_insr[k]); 2925 __ subptr(pos, 1); 2926 __ pslldq(xmm_from0, 1); 2927 __ pinsrb(xmm_from0, Address(from, pos), 0); 2928 __ BIND(L_processTail_exit_insr[k]); 2929 2930 __ movptr(saved_encCounter_start, saved_counter_param); 2931 __ movdqu(Address(saved_encCounter_start, 0), xmm_result0); // 2. Perform pxor of the encrypted counter and plaintext Bytes. 2932 __ pxor(xmm_result0, xmm_from0); // Also the encrypted counter is saved for next invocation. 2933 2934 __ testptr(len_reg, 8); 2935 __ jcc(Assembler::zero, L_processTail_4_extr[k]); // 3. Extract bytes from xmm_result0 into the dest. array 2936 __ pextrd(Address(to, pos), xmm_result0, 0); 2937 __ pextrd(Address(to, pos, Address::times_1, 4), xmm_result0, 1); 2938 __ psrldq(xmm_result0, 8); 2939 __ addptr(pos, 8); 2940 __ BIND(L_processTail_4_extr[k]); 2941 __ testptr(len_reg, 4); 2942 __ jcc(Assembler::zero, L_processTail_2_extr[k]); 2943 __ pextrd(Address(to, pos), xmm_result0, 0); 2944 __ psrldq(xmm_result0, 4); 2945 __ addptr(pos, 4); 2946 __ BIND(L_processTail_2_extr[k]); 2947 __ testptr(len_reg, 2); 2948 __ jcc(Assembler::zero, L_processTail_1_extr[k]); 2949 __ pextrb(Address(to, pos), xmm_result0, 0); 2950 __ pextrb(Address(to, pos, Address::times_1, 1), xmm_result0, 1); 2951 __ psrldq(xmm_result0, 2); 2952 __ addptr(pos, 2); 2953 __ BIND(L_processTail_1_extr[k]); 2954 __ testptr(len_reg, 1); 2955 __ jcc(Assembler::zero, L_processTail_exit_extr[k]); 2956 __ pextrb(Address(to, pos), xmm_result0, 0); 2957 2958 __ BIND(L_processTail_exit_extr[k]); 2959 __ movptr(used_addr, used_addr_param); 2960 __ movl(Address(used_addr, 0), len_reg); 2961 __ jmp(L_exit); 2962 } 2963 2964 __ BIND(L_exit); 2965 __ movdqu(xmm_counter_shuf_mask, ExternalAddress(StubRoutines::x86::counter_shuffle_mask_addr())); 2966 __ pshufb(xmm_curr_counter, xmm_counter_shuf_mask); //counter is shuffled back. 2967 __ movdqu(Address(counter, 0), xmm_curr_counter); //save counter back 2968 handleSOERegisters(false /*restoring*/); 2969 __ movptr(rax, len_param); // return length 2970 __ leave(); // required for proper stackwalking of RuntimeStub frame 2971 __ ret(0); 2972 2973 __ BIND (L_key192_top); 2974 __ movptr(pos, 0); // init pos before L_multiBlock_loopTop 2975 __ jmp(L_multiBlock_loopTop[1]); //key192 2976 2977 __ BIND (L_key256_top); 2978 __ movptr(pos, 0); // init pos before L_multiBlock_loopTop 2979 __ jmp(L_multiBlock_loopTop[2]); //key192 2980 2981 return start; 2982 } 2983 2984 // ofs and limit are use for multi-block byte array. 2985 // int com.sun.security.provider.MD5.implCompress(byte[] b, int ofs) 2986 address generate_md5_implCompress(bool multi_block, const char *name) { 2987 __ align(CodeEntryAlignment); 2988 StubCodeMark mark(this, "StubRoutines", name); 2989 address start = __ pc(); 2990 2991 const Register buf_param = rbp; 2992 const Address state_param(rsp, 0 * wordSize); 2993 const Address ofs_param (rsp, 1 * wordSize); 2994 const Address limit_param(rsp, 2 * wordSize); 2995 2996 __ enter(); 2997 __ push(rbx); 2998 __ push(rdi); 2999 __ push(rsi); 3000 __ push(rbp); 3001 __ subptr(rsp, 3 * wordSize); 3002 3003 __ movptr(rsi, Address(rbp, 8 + 4)); 3004 __ movptr(state_param, rsi); 3005 if (multi_block) { 3006 __ movptr(rsi, Address(rbp, 8 + 8)); 3007 __ movptr(ofs_param, rsi); 3008 __ movptr(rsi, Address(rbp, 8 + 12)); 3009 __ movptr(limit_param, rsi); 3010 } 3011 __ movptr(buf_param, Address(rbp, 8 + 0)); // do it last because it override rbp 3012 __ fast_md5(buf_param, state_param, ofs_param, limit_param, multi_block); 3013 3014 __ addptr(rsp, 3 * wordSize); 3015 __ pop(rbp); 3016 __ pop(rsi); 3017 __ pop(rdi); 3018 __ pop(rbx); 3019 __ leave(); 3020 __ ret(0); 3021 return start; 3022 } 3023 3024 address generate_upper_word_mask() { 3025 __ align64(); 3026 StubCodeMark mark(this, "StubRoutines", "upper_word_mask"); 3027 address start = __ pc(); 3028 __ emit_data(0x00000000, relocInfo::none, 0); 3029 __ emit_data(0x00000000, relocInfo::none, 0); 3030 __ emit_data(0x00000000, relocInfo::none, 0); 3031 __ emit_data(0xFFFFFFFF, relocInfo::none, 0); 3032 return start; 3033 } 3034 3035 address generate_shuffle_byte_flip_mask() { 3036 __ align64(); 3037 StubCodeMark mark(this, "StubRoutines", "shuffle_byte_flip_mask"); 3038 address start = __ pc(); 3039 __ emit_data(0x0c0d0e0f, relocInfo::none, 0); 3040 __ emit_data(0x08090a0b, relocInfo::none, 0); 3041 __ emit_data(0x04050607, relocInfo::none, 0); 3042 __ emit_data(0x00010203, relocInfo::none, 0); 3043 return start; 3044 } 3045 3046 // ofs and limit are use for multi-block byte array. 3047 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 3048 address generate_sha1_implCompress(bool multi_block, const char *name) { 3049 __ align(CodeEntryAlignment); 3050 StubCodeMark mark(this, "StubRoutines", name); 3051 address start = __ pc(); 3052 3053 Register buf = rax; 3054 Register state = rdx; 3055 Register ofs = rcx; 3056 Register limit = rdi; 3057 3058 const Address buf_param(rbp, 8 + 0); 3059 const Address state_param(rbp, 8 + 4); 3060 const Address ofs_param(rbp, 8 + 8); 3061 const Address limit_param(rbp, 8 + 12); 3062 3063 const XMMRegister abcd = xmm0; 3064 const XMMRegister e0 = xmm1; 3065 const XMMRegister e1 = xmm2; 3066 const XMMRegister msg0 = xmm3; 3067 3068 const XMMRegister msg1 = xmm4; 3069 const XMMRegister msg2 = xmm5; 3070 const XMMRegister msg3 = xmm6; 3071 const XMMRegister shuf_mask = xmm7; 3072 3073 __ enter(); 3074 __ subptr(rsp, 8 * wordSize); 3075 handleSOERegisters(true /*saving*/); 3076 3077 __ movptr(buf, buf_param); 3078 __ movptr(state, state_param); 3079 if (multi_block) { 3080 __ movptr(ofs, ofs_param); 3081 __ movptr(limit, limit_param); 3082 } 3083 3084 __ fast_sha1(abcd, e0, e1, msg0, msg1, msg2, msg3, shuf_mask, 3085 buf, state, ofs, limit, rsp, multi_block); 3086 3087 handleSOERegisters(false /*restoring*/); 3088 __ addptr(rsp, 8 * wordSize); 3089 __ leave(); 3090 __ ret(0); 3091 return start; 3092 } 3093 3094 address generate_pshuffle_byte_flip_mask() { 3095 __ align64(); 3096 StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask"); 3097 address start = __ pc(); 3098 __ emit_data(0x00010203, relocInfo::none, 0); 3099 __ emit_data(0x04050607, relocInfo::none, 0); 3100 __ emit_data(0x08090a0b, relocInfo::none, 0); 3101 __ emit_data(0x0c0d0e0f, relocInfo::none, 0); 3102 return start; 3103 } 3104 3105 // ofs and limit are use for multi-block byte array. 3106 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 3107 address generate_sha256_implCompress(bool multi_block, const char *name) { 3108 __ align(CodeEntryAlignment); 3109 StubCodeMark mark(this, "StubRoutines", name); 3110 address start = __ pc(); 3111 3112 Register buf = rbx; 3113 Register state = rsi; 3114 Register ofs = rdx; 3115 Register limit = rcx; 3116 3117 const Address buf_param(rbp, 8 + 0); 3118 const Address state_param(rbp, 8 + 4); 3119 const Address ofs_param(rbp, 8 + 8); 3120 const Address limit_param(rbp, 8 + 12); 3121 3122 const XMMRegister msg = xmm0; 3123 const XMMRegister state0 = xmm1; 3124 const XMMRegister state1 = xmm2; 3125 const XMMRegister msgtmp0 = xmm3; 3126 3127 const XMMRegister msgtmp1 = xmm4; 3128 const XMMRegister msgtmp2 = xmm5; 3129 const XMMRegister msgtmp3 = xmm6; 3130 const XMMRegister msgtmp4 = xmm7; 3131 3132 __ enter(); 3133 __ subptr(rsp, 8 * wordSize); 3134 handleSOERegisters(true /*saving*/); 3135 __ movptr(buf, buf_param); 3136 __ movptr(state, state_param); 3137 if (multi_block) { 3138 __ movptr(ofs, ofs_param); 3139 __ movptr(limit, limit_param); 3140 } 3141 3142 __ fast_sha256(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3143 buf, state, ofs, limit, rsp, multi_block); 3144 3145 handleSOERegisters(false); 3146 __ addptr(rsp, 8 * wordSize); 3147 __ leave(); 3148 __ ret(0); 3149 return start; 3150 } 3151 3152 // byte swap x86 long 3153 address generate_ghash_long_swap_mask() { 3154 __ align(CodeEntryAlignment); 3155 StubCodeMark mark(this, "StubRoutines", "ghash_long_swap_mask"); 3156 address start = __ pc(); 3157 __ emit_data(0x0b0a0908, relocInfo::none, 0); 3158 __ emit_data(0x0f0e0d0c, relocInfo::none, 0); 3159 __ emit_data(0x03020100, relocInfo::none, 0); 3160 __ emit_data(0x07060504, relocInfo::none, 0); 3161 3162 return start; 3163 } 3164 3165 // byte swap x86 byte array 3166 address generate_ghash_byte_swap_mask() { 3167 __ align(CodeEntryAlignment); 3168 StubCodeMark mark(this, "StubRoutines", "ghash_byte_swap_mask"); 3169 address start = __ pc(); 3170 __ emit_data(0x0c0d0e0f, relocInfo::none, 0); 3171 __ emit_data(0x08090a0b, relocInfo::none, 0); 3172 __ emit_data(0x04050607, relocInfo::none, 0); 3173 __ emit_data(0x00010203, relocInfo::none, 0); 3174 return start; 3175 } 3176 3177 /* Single and multi-block ghash operations */ 3178 address generate_ghash_processBlocks() { 3179 assert(UseGHASHIntrinsics, "need GHASH intrinsics and CLMUL support"); 3180 __ align(CodeEntryAlignment); 3181 Label L_ghash_loop, L_exit; 3182 StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); 3183 address start = __ pc(); 3184 3185 const Register state = rdi; 3186 const Register subkeyH = rsi; 3187 const Register data = rdx; 3188 const Register blocks = rcx; 3189 3190 const Address state_param(rbp, 8+0); 3191 const Address subkeyH_param(rbp, 8+4); 3192 const Address data_param(rbp, 8+8); 3193 const Address blocks_param(rbp, 8+12); 3194 3195 const XMMRegister xmm_temp0 = xmm0; 3196 const XMMRegister xmm_temp1 = xmm1; 3197 const XMMRegister xmm_temp2 = xmm2; 3198 const XMMRegister xmm_temp3 = xmm3; 3199 const XMMRegister xmm_temp4 = xmm4; 3200 const XMMRegister xmm_temp5 = xmm5; 3201 const XMMRegister xmm_temp6 = xmm6; 3202 const XMMRegister xmm_temp7 = xmm7; 3203 3204 __ enter(); 3205 handleSOERegisters(true); // Save registers 3206 3207 __ movptr(state, state_param); 3208 __ movptr(subkeyH, subkeyH_param); 3209 __ movptr(data, data_param); 3210 __ movptr(blocks, blocks_param); 3211 3212 __ movdqu(xmm_temp0, Address(state, 0)); 3213 __ pshufb(xmm_temp0, ExternalAddress(StubRoutines::x86::ghash_long_swap_mask_addr())); 3214 3215 __ movdqu(xmm_temp1, Address(subkeyH, 0)); 3216 __ pshufb(xmm_temp1, ExternalAddress(StubRoutines::x86::ghash_long_swap_mask_addr())); 3217 3218 __ BIND(L_ghash_loop); 3219 __ movdqu(xmm_temp2, Address(data, 0)); 3220 __ pshufb(xmm_temp2, ExternalAddress(StubRoutines::x86::ghash_byte_swap_mask_addr())); 3221 3222 __ pxor(xmm_temp0, xmm_temp2); 3223 3224 // 3225 // Multiply with the hash key 3226 // 3227 __ movdqu(xmm_temp3, xmm_temp0); 3228 __ pclmulqdq(xmm_temp3, xmm_temp1, 0); // xmm3 holds a0*b0 3229 __ movdqu(xmm_temp4, xmm_temp0); 3230 __ pclmulqdq(xmm_temp4, xmm_temp1, 16); // xmm4 holds a0*b1 3231 3232 __ movdqu(xmm_temp5, xmm_temp0); 3233 __ pclmulqdq(xmm_temp5, xmm_temp1, 1); // xmm5 holds a1*b0 3234 __ movdqu(xmm_temp6, xmm_temp0); 3235 __ pclmulqdq(xmm_temp6, xmm_temp1, 17); // xmm6 holds a1*b1 3236 3237 __ pxor(xmm_temp4, xmm_temp5); // xmm4 holds a0*b1 + a1*b0 3238 3239 __ movdqu(xmm_temp5, xmm_temp4); // move the contents of xmm4 to xmm5 3240 __ psrldq(xmm_temp4, 8); // shift by xmm4 64 bits to the right 3241 __ pslldq(xmm_temp5, 8); // shift by xmm5 64 bits to the left 3242 __ pxor(xmm_temp3, xmm_temp5); 3243 __ pxor(xmm_temp6, xmm_temp4); // Register pair <xmm6:xmm3> holds the result 3244 // of the carry-less multiplication of 3245 // xmm0 by xmm1. 3246 3247 // We shift the result of the multiplication by one bit position 3248 // to the left to cope for the fact that the bits are reversed. 3249 __ movdqu(xmm_temp7, xmm_temp3); 3250 __ movdqu(xmm_temp4, xmm_temp6); 3251 __ pslld (xmm_temp3, 1); 3252 __ pslld(xmm_temp6, 1); 3253 __ psrld(xmm_temp7, 31); 3254 __ psrld(xmm_temp4, 31); 3255 __ movdqu(xmm_temp5, xmm_temp7); 3256 __ pslldq(xmm_temp4, 4); 3257 __ pslldq(xmm_temp7, 4); 3258 __ psrldq(xmm_temp5, 12); 3259 __ por(xmm_temp3, xmm_temp7); 3260 __ por(xmm_temp6, xmm_temp4); 3261 __ por(xmm_temp6, xmm_temp5); 3262 3263 // 3264 // First phase of the reduction 3265 // 3266 // Move xmm3 into xmm4, xmm5, xmm7 in order to perform the shifts 3267 // independently. 3268 __ movdqu(xmm_temp7, xmm_temp3); 3269 __ movdqu(xmm_temp4, xmm_temp3); 3270 __ movdqu(xmm_temp5, xmm_temp3); 3271 __ pslld(xmm_temp7, 31); // packed right shift shifting << 31 3272 __ pslld(xmm_temp4, 30); // packed right shift shifting << 30 3273 __ pslld(xmm_temp5, 25); // packed right shift shifting << 25 3274 __ pxor(xmm_temp7, xmm_temp4); // xor the shifted versions 3275 __ pxor(xmm_temp7, xmm_temp5); 3276 __ movdqu(xmm_temp4, xmm_temp7); 3277 __ pslldq(xmm_temp7, 12); 3278 __ psrldq(xmm_temp4, 4); 3279 __ pxor(xmm_temp3, xmm_temp7); // first phase of the reduction complete 3280 3281 // 3282 // Second phase of the reduction 3283 // 3284 // Make 3 copies of xmm3 in xmm2, xmm5, xmm7 for doing these 3285 // shift operations. 3286 __ movdqu(xmm_temp2, xmm_temp3); 3287 __ movdqu(xmm_temp7, xmm_temp3); 3288 __ movdqu(xmm_temp5, xmm_temp3); 3289 __ psrld(xmm_temp2, 1); // packed left shifting >> 1 3290 __ psrld(xmm_temp7, 2); // packed left shifting >> 2 3291 __ psrld(xmm_temp5, 7); // packed left shifting >> 7 3292 __ pxor(xmm_temp2, xmm_temp7); // xor the shifted versions 3293 __ pxor(xmm_temp2, xmm_temp5); 3294 __ pxor(xmm_temp2, xmm_temp4); 3295 __ pxor(xmm_temp3, xmm_temp2); 3296 __ pxor(xmm_temp6, xmm_temp3); // the result is in xmm6 3297 3298 __ decrement(blocks); 3299 __ jcc(Assembler::zero, L_exit); 3300 __ movdqu(xmm_temp0, xmm_temp6); 3301 __ addptr(data, 16); 3302 __ jmp(L_ghash_loop); 3303 3304 __ BIND(L_exit); 3305 // Byte swap 16-byte result 3306 __ pshufb(xmm_temp6, ExternalAddress(StubRoutines::x86::ghash_long_swap_mask_addr())); 3307 __ movdqu(Address(state, 0), xmm_temp6); // store the result 3308 3309 handleSOERegisters(false); // restore registers 3310 __ leave(); 3311 __ ret(0); 3312 return start; 3313 } 3314 3315 /** 3316 * Arguments: 3317 * 3318 * Inputs: 3319 * rsp(4) - int crc 3320 * rsp(8) - byte* buf 3321 * rsp(12) - int length 3322 * 3323 * Output: 3324 * rax - int crc result 3325 */ 3326 address generate_updateBytesCRC32() { 3327 assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions"); 3328 3329 __ align(CodeEntryAlignment); 3330 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32"); 3331 3332 address start = __ pc(); 3333 3334 const Register crc = rdx; // crc 3335 const Register buf = rsi; // source java byte array address 3336 const Register len = rcx; // length 3337 const Register table = rdi; // crc_table address (reuse register) 3338 const Register tmp = rbx; 3339 assert_different_registers(crc, buf, len, table, tmp, rax); 3340 3341 BLOCK_COMMENT("Entry:"); 3342 __ enter(); // required for proper stackwalking of RuntimeStub frame 3343 __ push(rsi); 3344 __ push(rdi); 3345 __ push(rbx); 3346 3347 Address crc_arg(rbp, 8 + 0); 3348 Address buf_arg(rbp, 8 + 4); 3349 Address len_arg(rbp, 8 + 8); 3350 3351 // Load up: 3352 __ movl(crc, crc_arg); 3353 __ movptr(buf, buf_arg); 3354 __ movl(len, len_arg); 3355 3356 __ kernel_crc32(crc, buf, len, table, tmp); 3357 3358 __ movl(rax, crc); 3359 __ pop(rbx); 3360 __ pop(rdi); 3361 __ pop(rsi); 3362 __ vzeroupper(); 3363 __ leave(); // required for proper stackwalking of RuntimeStub frame 3364 __ ret(0); 3365 3366 return start; 3367 } 3368 3369 /** 3370 * Arguments: 3371 * 3372 * Inputs: 3373 * rsp(4) - int crc 3374 * rsp(8) - byte* buf 3375 * rsp(12) - int length 3376 * rsp(16) - table_start - optional (present only when doing a library_calll, 3377 * not used by x86 algorithm) 3378 * 3379 * Output: 3380 * rax - int crc result 3381 */ 3382 address generate_updateBytesCRC32C(bool is_pclmulqdq_supported) { 3383 assert(UseCRC32CIntrinsics, "need SSE4_2"); 3384 __ align(CodeEntryAlignment); 3385 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32C"); 3386 address start = __ pc(); 3387 const Register crc = rax; // crc 3388 const Register buf = rcx; // source java byte array address 3389 const Register len = rdx; // length 3390 const Register d = rbx; 3391 const Register g = rsi; 3392 const Register h = rdi; 3393 const Register empty = 0; // will never be used, in order not 3394 // to change a signature for crc32c_IPL_Alg2_Alt2 3395 // between 64/32 I'm just keeping it here 3396 assert_different_registers(crc, buf, len, d, g, h); 3397 3398 BLOCK_COMMENT("Entry:"); 3399 __ enter(); // required for proper stackwalking of RuntimeStub frame 3400 Address crc_arg(rsp, 4 + 4 + 0); // ESP+4 + 3401 // we need to add additional 4 because __ enter 3402 // have just pushed ebp on a stack 3403 Address buf_arg(rsp, 4 + 4 + 4); 3404 Address len_arg(rsp, 4 + 4 + 8); 3405 // Load up: 3406 __ movl(crc, crc_arg); 3407 __ movl(buf, buf_arg); 3408 __ movl(len, len_arg); 3409 __ push(d); 3410 __ push(g); 3411 __ push(h); 3412 __ crc32c_ipl_alg2_alt2(crc, buf, len, 3413 d, g, h, 3414 empty, empty, empty, 3415 xmm0, xmm1, xmm2, 3416 is_pclmulqdq_supported); 3417 __ pop(h); 3418 __ pop(g); 3419 __ pop(d); 3420 __ vzeroupper(); 3421 __ leave(); // required for proper stackwalking of RuntimeStub frame 3422 __ ret(0); 3423 3424 return start; 3425 } 3426 3427 address generate_libmExp() { 3428 StubCodeMark mark(this, "StubRoutines", "libmExp"); 3429 3430 address start = __ pc(); 3431 3432 const XMMRegister x0 = xmm0; 3433 const XMMRegister x1 = xmm1; 3434 const XMMRegister x2 = xmm2; 3435 const XMMRegister x3 = xmm3; 3436 3437 const XMMRegister x4 = xmm4; 3438 const XMMRegister x5 = xmm5; 3439 const XMMRegister x6 = xmm6; 3440 const XMMRegister x7 = xmm7; 3441 3442 const Register tmp = rbx; 3443 3444 BLOCK_COMMENT("Entry:"); 3445 __ enter(); // required for proper stackwalking of RuntimeStub frame 3446 __ fast_exp(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 3447 __ leave(); // required for proper stackwalking of RuntimeStub frame 3448 __ ret(0); 3449 3450 return start; 3451 3452 } 3453 3454 address generate_libmLog() { 3455 StubCodeMark mark(this, "StubRoutines", "libmLog"); 3456 3457 address start = __ pc(); 3458 3459 const XMMRegister x0 = xmm0; 3460 const XMMRegister x1 = xmm1; 3461 const XMMRegister x2 = xmm2; 3462 const XMMRegister x3 = xmm3; 3463 3464 const XMMRegister x4 = xmm4; 3465 const XMMRegister x5 = xmm5; 3466 const XMMRegister x6 = xmm6; 3467 const XMMRegister x7 = xmm7; 3468 3469 const Register tmp = rbx; 3470 3471 BLOCK_COMMENT("Entry:"); 3472 __ enter(); // required for proper stackwalking of RuntimeStub frame 3473 __ fast_log(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 3474 __ leave(); // required for proper stackwalking of RuntimeStub frame 3475 __ ret(0); 3476 3477 return start; 3478 3479 } 3480 3481 address generate_libmLog10() { 3482 StubCodeMark mark(this, "StubRoutines", "libmLog10"); 3483 3484 address start = __ pc(); 3485 3486 const XMMRegister x0 = xmm0; 3487 const XMMRegister x1 = xmm1; 3488 const XMMRegister x2 = xmm2; 3489 const XMMRegister x3 = xmm3; 3490 3491 const XMMRegister x4 = xmm4; 3492 const XMMRegister x5 = xmm5; 3493 const XMMRegister x6 = xmm6; 3494 const XMMRegister x7 = xmm7; 3495 3496 const Register tmp = rbx; 3497 3498 BLOCK_COMMENT("Entry:"); 3499 __ enter(); // required for proper stackwalking of RuntimeStub frame 3500 __ fast_log10(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 3501 __ leave(); // required for proper stackwalking of RuntimeStub frame 3502 __ ret(0); 3503 3504 return start; 3505 3506 } 3507 3508 address generate_libmPow() { 3509 StubCodeMark mark(this, "StubRoutines", "libmPow"); 3510 3511 address start = __ pc(); 3512 3513 const XMMRegister x0 = xmm0; 3514 const XMMRegister x1 = xmm1; 3515 const XMMRegister x2 = xmm2; 3516 const XMMRegister x3 = xmm3; 3517 3518 const XMMRegister x4 = xmm4; 3519 const XMMRegister x5 = xmm5; 3520 const XMMRegister x6 = xmm6; 3521 const XMMRegister x7 = xmm7; 3522 3523 const Register tmp = rbx; 3524 3525 BLOCK_COMMENT("Entry:"); 3526 __ enter(); // required for proper stackwalking of RuntimeStub frame 3527 __ fast_pow(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 3528 __ leave(); // required for proper stackwalking of RuntimeStub frame 3529 __ ret(0); 3530 3531 return start; 3532 3533 } 3534 3535 address generate_libm_reduce_pi04l() { 3536 StubCodeMark mark(this, "StubRoutines", "libm_reduce_pi04l"); 3537 3538 address start = __ pc(); 3539 3540 BLOCK_COMMENT("Entry:"); 3541 __ libm_reduce_pi04l(rax, rcx, rdx, rbx, rsi, rdi, rbp, rsp); 3542 3543 return start; 3544 3545 } 3546 3547 address generate_libm_sin_cos_huge() { 3548 StubCodeMark mark(this, "StubRoutines", "libm_sin_cos_huge"); 3549 3550 address start = __ pc(); 3551 3552 const XMMRegister x0 = xmm0; 3553 const XMMRegister x1 = xmm1; 3554 3555 BLOCK_COMMENT("Entry:"); 3556 __ libm_sincos_huge(x0, x1, rax, rcx, rdx, rbx, rsi, rdi, rbp, rsp); 3557 3558 return start; 3559 3560 } 3561 3562 address generate_libmSin() { 3563 StubCodeMark mark(this, "StubRoutines", "libmSin"); 3564 3565 address start = __ pc(); 3566 3567 const XMMRegister x0 = xmm0; 3568 const XMMRegister x1 = xmm1; 3569 const XMMRegister x2 = xmm2; 3570 const XMMRegister x3 = xmm3; 3571 3572 const XMMRegister x4 = xmm4; 3573 const XMMRegister x5 = xmm5; 3574 const XMMRegister x6 = xmm6; 3575 const XMMRegister x7 = xmm7; 3576 3577 BLOCK_COMMENT("Entry:"); 3578 __ enter(); // required for proper stackwalking of RuntimeStub frame 3579 __ fast_sin(x0, x1, x2, x3, x4, x5, x6, x7, rax, rbx, rdx); 3580 __ leave(); // required for proper stackwalking of RuntimeStub frame 3581 __ ret(0); 3582 3583 return start; 3584 3585 } 3586 3587 address generate_libmCos() { 3588 StubCodeMark mark(this, "StubRoutines", "libmCos"); 3589 3590 address start = __ pc(); 3591 3592 const XMMRegister x0 = xmm0; 3593 const XMMRegister x1 = xmm1; 3594 const XMMRegister x2 = xmm2; 3595 const XMMRegister x3 = xmm3; 3596 3597 const XMMRegister x4 = xmm4; 3598 const XMMRegister x5 = xmm5; 3599 const XMMRegister x6 = xmm6; 3600 const XMMRegister x7 = xmm7; 3601 3602 const Register tmp = rbx; 3603 3604 BLOCK_COMMENT("Entry:"); 3605 __ enter(); // required for proper stackwalking of RuntimeStub frame 3606 __ fast_cos(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 3607 __ leave(); // required for proper stackwalking of RuntimeStub frame 3608 __ ret(0); 3609 3610 return start; 3611 3612 } 3613 3614 address generate_libm_tan_cot_huge() { 3615 StubCodeMark mark(this, "StubRoutines", "libm_tan_cot_huge"); 3616 3617 address start = __ pc(); 3618 3619 const XMMRegister x0 = xmm0; 3620 const XMMRegister x1 = xmm1; 3621 3622 BLOCK_COMMENT("Entry:"); 3623 __ libm_tancot_huge(x0, x1, rax, rcx, rdx, rbx, rsi, rdi, rbp, rsp); 3624 3625 return start; 3626 3627 } 3628 3629 address generate_libmTan() { 3630 StubCodeMark mark(this, "StubRoutines", "libmTan"); 3631 3632 address start = __ pc(); 3633 3634 const XMMRegister x0 = xmm0; 3635 const XMMRegister x1 = xmm1; 3636 const XMMRegister x2 = xmm2; 3637 const XMMRegister x3 = xmm3; 3638 3639 const XMMRegister x4 = xmm4; 3640 const XMMRegister x5 = xmm5; 3641 const XMMRegister x6 = xmm6; 3642 const XMMRegister x7 = xmm7; 3643 3644 const Register tmp = rbx; 3645 3646 BLOCK_COMMENT("Entry:"); 3647 __ enter(); // required for proper stackwalking of RuntimeStub frame 3648 __ fast_tan(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 3649 __ leave(); // required for proper stackwalking of RuntimeStub frame 3650 __ ret(0); 3651 3652 return start; 3653 3654 } 3655 3656 address generate_method_entry_barrier() { 3657 __ align(CodeEntryAlignment); 3658 StubCodeMark mark(this, "StubRoutines", "nmethod_entry_barrier"); 3659 3660 Label deoptimize_label; 3661 3662 address start = __ pc(); 3663 3664 __ push(-1); // cookie, this is used for writing the new rsp when deoptimizing 3665 3666 BLOCK_COMMENT("Entry:"); 3667 __ enter(); // save rbp 3668 3669 // save rbx, because we want to use that value. 3670 // We could do without it but then we depend on the number of slots used by pusha 3671 __ push(rbx); 3672 3673 __ lea(rbx, Address(rsp, wordSize * 3)); // 1 for cookie, 1 for rbp, 1 for rbx - this should be the return address 3674 3675 __ pusha(); 3676 3677 // xmm0 and xmm1 may be used for passing float/double arguments 3678 3679 if (UseSSE >= 2) { 3680 const int xmm_size = wordSize * 4; 3681 __ subptr(rsp, xmm_size * 2); 3682 __ movdbl(Address(rsp, xmm_size * 1), xmm1); 3683 __ movdbl(Address(rsp, xmm_size * 0), xmm0); 3684 } else if (UseSSE >= 1) { 3685 const int xmm_size = wordSize * 2; 3686 __ subptr(rsp, xmm_size * 2); 3687 __ movflt(Address(rsp, xmm_size * 1), xmm1); 3688 __ movflt(Address(rsp, xmm_size * 0), xmm0); 3689 } 3690 3691 __ call_VM_leaf(CAST_FROM_FN_PTR(address, static_cast<int (*)(address*)>(BarrierSetNMethod::nmethod_stub_entry_barrier)), rbx); 3692 3693 if (UseSSE >= 2) { 3694 const int xmm_size = wordSize * 4; 3695 __ movdbl(xmm0, Address(rsp, xmm_size * 0)); 3696 __ movdbl(xmm1, Address(rsp, xmm_size * 1)); 3697 __ addptr(rsp, xmm_size * 2); 3698 } else if (UseSSE >= 1) { 3699 const int xmm_size = wordSize * 2; 3700 __ movflt(xmm0, Address(rsp, xmm_size * 0)); 3701 __ movflt(xmm1, Address(rsp, xmm_size * 1)); 3702 __ addptr(rsp, xmm_size * 2); 3703 } 3704 3705 __ cmpl(rax, 1); // 1 means deoptimize 3706 __ jcc(Assembler::equal, deoptimize_label); 3707 3708 __ popa(); 3709 __ pop(rbx); 3710 3711 __ leave(); 3712 3713 __ addptr(rsp, 1 * wordSize); // cookie 3714 __ ret(0); 3715 3716 __ BIND(deoptimize_label); 3717 3718 __ popa(); 3719 __ pop(rbx); 3720 3721 __ leave(); 3722 3723 // this can be taken out, but is good for verification purposes. getting a SIGSEGV 3724 // here while still having a correct stack is valuable 3725 __ testptr(rsp, Address(rsp, 0)); 3726 3727 __ movptr(rsp, Address(rsp, 0)); // new rsp was written in the barrier 3728 __ jmp(Address(rsp, -1 * wordSize)); // jmp target should be callers verified_entry_point 3729 3730 return start; 3731 } 3732 3733 public: 3734 // Information about frame layout at time of blocking runtime call. 3735 // Note that we only have to preserve callee-saved registers since 3736 // the compilers are responsible for supplying a continuation point 3737 // if they expect all registers to be preserved. 3738 enum layout { 3739 thread_off, // last_java_sp 3740 arg1_off, 3741 arg2_off, 3742 rbp_off, // callee saved register 3743 ret_pc, 3744 framesize 3745 }; 3746 3747 private: 3748 3749 #undef __ 3750 #define __ masm-> 3751 3752 //------------------------------------------------------------------------------------------------------------------------ 3753 // Continuation point for throwing of implicit exceptions that are not handled in 3754 // the current activation. Fabricates an exception oop and initiates normal 3755 // exception dispatching in this frame. 3756 // 3757 // Previously the compiler (c2) allowed for callee save registers on Java calls. 3758 // This is no longer true after adapter frames were removed but could possibly 3759 // be brought back in the future if the interpreter code was reworked and it 3760 // was deemed worthwhile. The comment below was left to describe what must 3761 // happen here if callee saves were resurrected. As it stands now this stub 3762 // could actually be a vanilla BufferBlob and have now oopMap at all. 3763 // Since it doesn't make much difference we've chosen to leave it the 3764 // way it was in the callee save days and keep the comment. 3765 3766 // If we need to preserve callee-saved values we need a callee-saved oop map and 3767 // therefore have to make these stubs into RuntimeStubs rather than BufferBlobs. 3768 // If the compiler needs all registers to be preserved between the fault 3769 // point and the exception handler then it must assume responsibility for that in 3770 // AbstractCompiler::continuation_for_implicit_null_exception or 3771 // continuation_for_implicit_division_by_zero_exception. All other implicit 3772 // exceptions (e.g., NullPointerException or AbstractMethodError on entry) are 3773 // either at call sites or otherwise assume that stack unwinding will be initiated, 3774 // so caller saved registers were assumed volatile in the compiler. 3775 address generate_throw_exception(const char* name, address runtime_entry, 3776 Register arg1 = noreg, Register arg2 = noreg) { 3777 3778 int insts_size = 256; 3779 int locs_size = 32; 3780 3781 CodeBuffer code(name, insts_size, locs_size); 3782 OopMapSet* oop_maps = new OopMapSet(); 3783 MacroAssembler* masm = new MacroAssembler(&code); 3784 3785 address start = __ pc(); 3786 3787 // This is an inlined and slightly modified version of call_VM 3788 // which has the ability to fetch the return PC out of 3789 // thread-local storage and also sets up last_Java_sp slightly 3790 // differently than the real call_VM 3791 Register java_thread = rbx; 3792 __ get_thread(java_thread); 3793 3794 __ enter(); // required for proper stackwalking of RuntimeStub frame 3795 3796 // pc and rbp, already pushed 3797 __ subptr(rsp, (framesize-2) * wordSize); // prolog 3798 3799 // Frame is now completed as far as size and linkage. 3800 3801 int frame_complete = __ pc() - start; 3802 3803 // push java thread (becomes first argument of C function) 3804 __ movptr(Address(rsp, thread_off * wordSize), java_thread); 3805 if (arg1 != noreg) { 3806 __ movptr(Address(rsp, arg1_off * wordSize), arg1); 3807 } 3808 if (arg2 != noreg) { 3809 assert(arg1 != noreg, "missing reg arg"); 3810 __ movptr(Address(rsp, arg2_off * wordSize), arg2); 3811 } 3812 3813 // Set up last_Java_sp and last_Java_fp 3814 __ set_last_Java_frame(java_thread, rsp, rbp, NULL); 3815 3816 // Call runtime 3817 BLOCK_COMMENT("call runtime_entry"); 3818 __ call(RuntimeAddress(runtime_entry)); 3819 // Generate oop map 3820 OopMap* map = new OopMap(framesize, 0); 3821 oop_maps->add_gc_map(__ pc() - start, map); 3822 3823 // restore the thread (cannot use the pushed argument since arguments 3824 // may be overwritten by C code generated by an optimizing compiler); 3825 // however can use the register value directly if it is callee saved. 3826 __ get_thread(java_thread); 3827 3828 __ reset_last_Java_frame(java_thread, true); 3829 3830 __ leave(); // required for proper stackwalking of RuntimeStub frame 3831 3832 // check for pending exceptions 3833 #ifdef ASSERT 3834 Label L; 3835 __ cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 3836 __ jcc(Assembler::notEqual, L); 3837 __ should_not_reach_here(); 3838 __ bind(L); 3839 #endif /* ASSERT */ 3840 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 3841 3842 3843 RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete, framesize, oop_maps, false); 3844 return stub->entry_point(); 3845 } 3846 3847 3848 void create_control_words() { 3849 // Round to nearest, 53-bit mode, exceptions masked 3850 StubRoutines::x86::_fpu_cntrl_wrd_std = 0x027F; 3851 // Round to zero, 53-bit mode, exception mased 3852 StubRoutines::x86::_fpu_cntrl_wrd_trunc = 0x0D7F; 3853 // Round to nearest, 24-bit mode, exceptions masked 3854 StubRoutines::x86::_fpu_cntrl_wrd_24 = 0x007F; 3855 // Round to nearest, 64-bit mode, exceptions masked 3856 StubRoutines::x86::_mxcsr_std = 0x1F80; 3857 // Note: the following two constants are 80-bit values 3858 // layout is critical for correct loading by FPU. 3859 // Bias for strict fp multiply/divide 3860 StubRoutines::x86::_fpu_subnormal_bias1[0]= 0x00000000; // 2^(-15360) == 0x03ff 8000 0000 0000 0000 3861 StubRoutines::x86::_fpu_subnormal_bias1[1]= 0x80000000; 3862 StubRoutines::x86::_fpu_subnormal_bias1[2]= 0x03ff; 3863 // Un-Bias for strict fp multiply/divide 3864 StubRoutines::x86::_fpu_subnormal_bias2[0]= 0x00000000; // 2^(+15360) == 0x7bff 8000 0000 0000 0000 3865 StubRoutines::x86::_fpu_subnormal_bias2[1]= 0x80000000; 3866 StubRoutines::x86::_fpu_subnormal_bias2[2]= 0x7bff; 3867 } 3868 3869 //--------------------------------------------------------------------------- 3870 // Initialization 3871 3872 void generate_initial() { 3873 // Generates all stubs and initializes the entry points 3874 3875 //------------------------------------------------------------------------------------------------------------------------ 3876 // entry points that exist in all platforms 3877 // Note: This is code that could be shared among different platforms - however the benefit seems to be smaller than 3878 // the disadvantage of having a much more complicated generator structure. See also comment in stubRoutines.hpp. 3879 StubRoutines::_forward_exception_entry = generate_forward_exception(); 3880 3881 StubRoutines::_call_stub_entry = 3882 generate_call_stub(StubRoutines::_call_stub_return_address); 3883 // is referenced by megamorphic call 3884 StubRoutines::_catch_exception_entry = generate_catch_exception(); 3885 3886 // platform dependent 3887 create_control_words(); 3888 3889 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr(); 3890 StubRoutines::x86::_verify_fpu_cntrl_wrd_entry = generate_verify_fpu_cntrl_wrd(); 3891 StubRoutines::x86::_d2i_wrapper = generate_d2i_wrapper(T_INT, CAST_FROM_FN_PTR(address, SharedRuntime::d2i)); 3892 StubRoutines::x86::_d2l_wrapper = generate_d2i_wrapper(T_LONG, CAST_FROM_FN_PTR(address, SharedRuntime::d2l)); 3893 3894 // Build this early so it's available for the interpreter 3895 StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception", 3896 CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError)); 3897 StubRoutines::_throw_delayed_StackOverflowError_entry = generate_throw_exception("delayed StackOverflowError throw_exception", 3898 CAST_FROM_FN_PTR(address, SharedRuntime::throw_delayed_StackOverflowError)); 3899 3900 if (UseCRC32Intrinsics) { 3901 // set table address before stub generation which use it 3902 StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table; 3903 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32(); 3904 } 3905 3906 if (UseCRC32CIntrinsics) { 3907 bool supports_clmul = VM_Version::supports_clmul(); 3908 StubRoutines::x86::generate_CRC32C_table(supports_clmul); 3909 StubRoutines::_crc32c_table_addr = (address)StubRoutines::x86::_crc32c_table; 3910 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C(supports_clmul); 3911 } 3912 if (VM_Version::supports_sse2() && UseLibmIntrinsic && InlineIntrinsics) { 3913 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin) || 3914 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos) || 3915 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) { 3916 StubRoutines::x86::_L_2il0floatpacket_0_adr = (address)StubRoutines::x86::_L_2il0floatpacket_0; 3917 StubRoutines::x86::_Pi4Inv_adr = (address)StubRoutines::x86::_Pi4Inv; 3918 StubRoutines::x86::_Pi4x3_adr = (address)StubRoutines::x86::_Pi4x3; 3919 StubRoutines::x86::_Pi4x4_adr = (address)StubRoutines::x86::_Pi4x4; 3920 StubRoutines::x86::_ones_adr = (address)StubRoutines::x86::_ones; 3921 } 3922 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dexp)) { 3923 StubRoutines::_dexp = generate_libmExp(); 3924 } 3925 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog)) { 3926 StubRoutines::_dlog = generate_libmLog(); 3927 } 3928 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog10)) { 3929 StubRoutines::_dlog10 = generate_libmLog10(); 3930 } 3931 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dpow)) { 3932 StubRoutines::_dpow = generate_libmPow(); 3933 } 3934 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin) || 3935 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos) || 3936 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) { 3937 StubRoutines::_dlibm_reduce_pi04l = generate_libm_reduce_pi04l(); 3938 } 3939 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin) || 3940 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) { 3941 StubRoutines::_dlibm_sin_cos_huge = generate_libm_sin_cos_huge(); 3942 } 3943 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) { 3944 StubRoutines::_dsin = generate_libmSin(); 3945 } 3946 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) { 3947 StubRoutines::_dcos = generate_libmCos(); 3948 } 3949 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) { 3950 StubRoutines::_dlibm_tan_cot_huge = generate_libm_tan_cot_huge(); 3951 StubRoutines::_dtan = generate_libmTan(); 3952 } 3953 } 3954 } 3955 3956 void generate_all() { 3957 // Generates all stubs and initializes the entry points 3958 3959 // These entry points require SharedInfo::stack0 to be set up in non-core builds 3960 // and need to be relocatable, so they each fabricate a RuntimeStub internally. 3961 StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError)); 3962 StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError)); 3963 StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call)); 3964 3965 //------------------------------------------------------------------------------------------------------------------------ 3966 // entry points that are platform specific 3967 3968 StubRoutines::x86::_vector_float_sign_mask = generate_vector_mask("vector_float_sign_mask", 0x7FFFFFFF); 3969 StubRoutines::x86::_vector_float_sign_flip = generate_vector_mask("vector_float_sign_flip", 0x80000000); 3970 StubRoutines::x86::_vector_double_sign_mask = generate_vector_mask_long_double("vector_double_sign_mask", 0x7FFFFFFF, 0xFFFFFFFF); 3971 StubRoutines::x86::_vector_double_sign_flip = generate_vector_mask_long_double("vector_double_sign_flip", 0x80000000, 0x00000000); 3972 StubRoutines::x86::_vector_short_to_byte_mask = generate_vector_mask("vector_short_to_byte_mask", 0x00ff00ff); 3973 StubRoutines::x86::_vector_int_to_byte_mask = generate_vector_mask("vector_int_to_byte_mask", 0x000000ff); 3974 StubRoutines::x86::_vector_int_to_short_mask = generate_vector_mask("vector_int_to_short_mask", 0x0000ffff); 3975 StubRoutines::x86::_vector_32_bit_mask = generate_vector_custom_i32("vector_32_bit_mask", Assembler::AVX_512bit, 3976 0xFFFFFFFF, 0, 0, 0); 3977 StubRoutines::x86::_vector_64_bit_mask = generate_vector_custom_i32("vector_64_bit_mask", Assembler::AVX_512bit, 3978 0xFFFFFFFF, 0xFFFFFFFF, 0, 0); 3979 StubRoutines::x86::_vector_int_shuffle_mask = generate_vector_mask("vector_int_shuffle_mask", 0x03020100); 3980 StubRoutines::x86::_vector_byte_shuffle_mask = generate_vector_byte_shuffle_mask("vector_byte_shuffle_mask"); 3981 StubRoutines::x86::_vector_short_shuffle_mask = generate_vector_mask("vector_short_shuffle_mask", 0x01000100); 3982 StubRoutines::x86::_vector_long_shuffle_mask = generate_vector_mask_long_double("vector_long_shuffle_mask", 0x00000001, 0x0); 3983 StubRoutines::x86::_vector_byte_perm_mask = generate_vector_byte_perm_mask("vector_byte_perm_mask"); 3984 StubRoutines::x86::_vector_long_sign_mask = generate_vector_mask_long_double("vector_long_sign_mask", 0x80000000, 0x00000000); 3985 StubRoutines::x86::_vector_all_bits_set = generate_vector_mask("vector_all_bits_set", 0xFFFFFFFF); 3986 StubRoutines::x86::_vector_int_mask_cmp_bits = generate_vector_mask("vector_int_mask_cmp_bits", 0x00000001); 3987 StubRoutines::x86::_vector_iota_indices = generate_iota_indices("iota_indices"); 3988 3989 if (UsePopCountInstruction && VM_Version::supports_avx2() && !VM_Version::supports_avx512_vpopcntdq()) { 3990 // lut implementation influenced by counting 1s algorithm from section 5-1 of Hackers' Delight. 3991 StubRoutines::x86::_vector_popcount_lut = generate_popcount_avx_lut("popcount_lut"); 3992 } 3993 3994 // support for verify_oop (must happen after universe_init) 3995 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); 3996 3997 // arraycopy stubs used by compilers 3998 generate_arraycopy_stubs(); 3999 4000 // don't bother generating these AES intrinsic stubs unless global flag is set 4001 if (UseAESIntrinsics) { 4002 StubRoutines::x86::_key_shuffle_mask_addr = generate_key_shuffle_mask(); // might be needed by the others 4003 4004 StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock(); 4005 StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock(); 4006 StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt(); 4007 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel(); 4008 } 4009 4010 if (UseAESCTRIntrinsics) { 4011 StubRoutines::x86::_counter_shuffle_mask_addr = generate_counter_shuffle_mask(); 4012 StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt_Parallel(); 4013 } 4014 4015 if (UseMD5Intrinsics) { 4016 StubRoutines::_md5_implCompress = generate_md5_implCompress(false, "md5_implCompress"); 4017 StubRoutines::_md5_implCompressMB = generate_md5_implCompress(true, "md5_implCompressMB"); 4018 } 4019 if (UseSHA1Intrinsics) { 4020 StubRoutines::x86::_upper_word_mask_addr = generate_upper_word_mask(); 4021 StubRoutines::x86::_shuffle_byte_flip_mask_addr = generate_shuffle_byte_flip_mask(); 4022 StubRoutines::_sha1_implCompress = generate_sha1_implCompress(false, "sha1_implCompress"); 4023 StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(true, "sha1_implCompressMB"); 4024 } 4025 if (UseSHA256Intrinsics) { 4026 StubRoutines::x86::_k256_adr = (address)StubRoutines::x86::_k256; 4027 StubRoutines::x86::_pshuffle_byte_flip_mask_addr = generate_pshuffle_byte_flip_mask(); 4028 StubRoutines::_sha256_implCompress = generate_sha256_implCompress(false, "sha256_implCompress"); 4029 StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true, "sha256_implCompressMB"); 4030 } 4031 4032 // Generate GHASH intrinsics code 4033 if (UseGHASHIntrinsics) { 4034 StubRoutines::x86::_ghash_long_swap_mask_addr = generate_ghash_long_swap_mask(); 4035 StubRoutines::x86::_ghash_byte_swap_mask_addr = generate_ghash_byte_swap_mask(); 4036 StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks(); 4037 } 4038 4039 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); 4040 if (bs_nm != NULL) { 4041 StubRoutines::x86::_method_entry_barrier = generate_method_entry_barrier(); 4042 } 4043 } 4044 4045 4046 public: 4047 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 4048 if (all) { 4049 generate_all(); 4050 } else { 4051 generate_initial(); 4052 } 4053 } 4054 }; // end class declaration 4055 4056 #define UCM_TABLE_MAX_ENTRIES 8 4057 void StubGenerator_generate(CodeBuffer* code, bool all) { 4058 if (UnsafeCopyMemory::_table == NULL) { 4059 UnsafeCopyMemory::create_table(UCM_TABLE_MAX_ENTRIES); 4060 } 4061 StubGenerator g(code, all); 4062 }