1 /* 2 * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "nativeInst_x86.hpp" 30 #include "oops/instanceOop.hpp" 31 #include "oops/method.hpp" 32 #include "oops/objArrayKlass.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "prims/methodHandles.hpp" 35 #include "runtime/frame.inline.hpp" 36 #include "runtime/handles.inline.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/stubCodeGenerator.hpp" 39 #include "runtime/stubRoutines.hpp" 40 #include "runtime/thread.inline.hpp" 41 #include "utilities/macros.hpp" 42 #include "utilities/top.hpp" 43 #ifdef COMPILER2 44 #include "opto/runtime.hpp" 45 #endif 46 #if INCLUDE_ALL_GCS 47 #include "shenandoahBarrierSetAssembler_x86.hpp" 48 #endif 49 50 // Declaration and definition of StubGenerator (no .hpp file). 51 // For a more detailed description of the stub routine structure 52 // see the comment in stubRoutines.hpp 53 54 #define __ _masm-> 55 #define a__ ((Assembler*)_masm)-> 56 57 #ifdef PRODUCT 58 #define BLOCK_COMMENT(str) /* nothing */ 59 #else 60 #define BLOCK_COMMENT(str) __ block_comment(str) 61 #endif 62 63 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 64 65 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions 66 const int FPU_CNTRL_WRD_MASK = 0xFFFF; 67 68 // ------------------------------------------------------------------------------------------------------------------------- 69 // Stub Code definitions 70 71 static address handle_unsafe_access() { 72 JavaThread* thread = JavaThread::current(); 73 address pc = thread->saved_exception_pc(); 74 // pc is the instruction which we must emulate 75 // doing a no-op is fine: return garbage from the load 76 // therefore, compute npc 77 address npc = Assembler::locate_next_instruction(pc); 78 79 // request an async exception 80 thread->set_pending_unsafe_access_error(); 81 82 // return address of next instruction to execute 83 return npc; 84 } 85 86 class StubGenerator: public StubCodeGenerator { 87 private: 88 89 #ifdef PRODUCT 90 #define inc_counter_np(counter) ((void)0) 91 #else 92 void inc_counter_np_(int& counter) { 93 __ incrementl(ExternalAddress((address)&counter)); 94 } 95 #define inc_counter_np(counter) \ 96 BLOCK_COMMENT("inc_counter " #counter); \ 97 inc_counter_np_(counter); 98 #endif //PRODUCT 99 100 void inc_copy_counter_np(BasicType t) { 101 #ifndef PRODUCT 102 switch (t) { 103 case T_BYTE: inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); return; 104 case T_SHORT: inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); return; 105 case T_INT: inc_counter_np(SharedRuntime::_jint_array_copy_ctr); return; 106 case T_LONG: inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); return; 107 case T_OBJECT: inc_counter_np(SharedRuntime::_oop_array_copy_ctr); return; 108 } 109 ShouldNotReachHere(); 110 #endif //PRODUCT 111 } 112 113 //------------------------------------------------------------------------------------------------------------------------ 114 // Call stubs are used to call Java from C 115 // 116 // [ return_from_Java ] <--- rsp 117 // [ argument word n ] 118 // ... 119 // -N [ argument word 1 ] 120 // -7 [ Possible padding for stack alignment ] 121 // -6 [ Possible padding for stack alignment ] 122 // -5 [ Possible padding for stack alignment ] 123 // -4 [ mxcsr save ] <--- rsp_after_call 124 // -3 [ saved rbx, ] 125 // -2 [ saved rsi ] 126 // -1 [ saved rdi ] 127 // 0 [ saved rbp, ] <--- rbp, 128 // 1 [ return address ] 129 // 2 [ ptr. to call wrapper ] 130 // 3 [ result ] 131 // 4 [ result_type ] 132 // 5 [ method ] 133 // 6 [ entry_point ] 134 // 7 [ parameters ] 135 // 8 [ parameter_size ] 136 // 9 [ thread ] 137 138 139 address generate_call_stub(address& return_address) { 140 StubCodeMark mark(this, "StubRoutines", "call_stub"); 141 address start = __ pc(); 142 143 // stub code parameters / addresses 144 assert(frame::entry_frame_call_wrapper_offset == 2, "adjust this code"); 145 bool sse_save = false; 146 const Address rsp_after_call(rbp, -4 * wordSize); // same as in generate_catch_exception()! 147 const int locals_count_in_bytes (4*wordSize); 148 const Address mxcsr_save (rbp, -4 * wordSize); 149 const Address saved_rbx (rbp, -3 * wordSize); 150 const Address saved_rsi (rbp, -2 * wordSize); 151 const Address saved_rdi (rbp, -1 * wordSize); 152 const Address result (rbp, 3 * wordSize); 153 const Address result_type (rbp, 4 * wordSize); 154 const Address method (rbp, 5 * wordSize); 155 const Address entry_point (rbp, 6 * wordSize); 156 const Address parameters (rbp, 7 * wordSize); 157 const Address parameter_size(rbp, 8 * wordSize); 158 const Address thread (rbp, 9 * wordSize); // same as in generate_catch_exception()! 159 sse_save = UseSSE > 0; 160 161 // stub code 162 __ enter(); 163 __ movptr(rcx, parameter_size); // parameter counter 164 __ shlptr(rcx, Interpreter::logStackElementSize); // convert parameter count to bytes 165 __ addptr(rcx, locals_count_in_bytes); // reserve space for register saves 166 __ subptr(rsp, rcx); 167 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack 168 169 // save rdi, rsi, & rbx, according to C calling conventions 170 __ movptr(saved_rdi, rdi); 171 __ movptr(saved_rsi, rsi); 172 __ movptr(saved_rbx, rbx); 173 // save and initialize %mxcsr 174 if (sse_save) { 175 Label skip_ldmx; 176 __ stmxcsr(mxcsr_save); 177 __ movl(rax, mxcsr_save); 178 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 179 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 180 __ cmp32(rax, mxcsr_std); 181 __ jcc(Assembler::equal, skip_ldmx); 182 __ ldmxcsr(mxcsr_std); 183 __ bind(skip_ldmx); 184 } 185 186 // make sure the control word is correct. 187 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); 188 189 #ifdef ASSERT 190 // make sure we have no pending exceptions 191 { Label L; 192 __ movptr(rcx, thread); 193 __ cmpptr(Address(rcx, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 194 __ jcc(Assembler::equal, L); 195 __ stop("StubRoutines::call_stub: entered with pending exception"); 196 __ bind(L); 197 } 198 #endif 199 200 // pass parameters if any 201 BLOCK_COMMENT("pass parameters if any"); 202 Label parameters_done; 203 __ movl(rcx, parameter_size); // parameter counter 204 __ testl(rcx, rcx); 205 __ jcc(Assembler::zero, parameters_done); 206 207 // parameter passing loop 208 209 Label loop; 210 // Copy Java parameters in reverse order (receiver last) 211 // Note that the argument order is inverted in the process 212 // source is rdx[rcx: N-1..0] 213 // dest is rsp[rbx: 0..N-1] 214 215 __ movptr(rdx, parameters); // parameter pointer 216 __ xorptr(rbx, rbx); 217 218 __ BIND(loop); 219 220 // get parameter 221 __ movptr(rax, Address(rdx, rcx, Interpreter::stackElementScale(), -wordSize)); 222 __ movptr(Address(rsp, rbx, Interpreter::stackElementScale(), 223 Interpreter::expr_offset_in_bytes(0)), rax); // store parameter 224 __ increment(rbx); 225 __ decrement(rcx); 226 __ jcc(Assembler::notZero, loop); 227 228 // call Java function 229 __ BIND(parameters_done); 230 __ movptr(rbx, method); // get Method* 231 __ movptr(rax, entry_point); // get entry_point 232 __ mov(rsi, rsp); // set sender sp 233 BLOCK_COMMENT("call Java function"); 234 __ call(rax); 235 236 BLOCK_COMMENT("call_stub_return_address:"); 237 return_address = __ pc(); 238 239 #ifdef COMPILER2 240 { 241 Label L_skip; 242 if (UseSSE >= 2) { 243 __ verify_FPU(0, "call_stub_return"); 244 } else { 245 for (int i = 1; i < 8; i++) { 246 __ ffree(i); 247 } 248 249 // UseSSE <= 1 so double result should be left on TOS 250 __ movl(rsi, result_type); 251 __ cmpl(rsi, T_DOUBLE); 252 __ jcc(Assembler::equal, L_skip); 253 if (UseSSE == 0) { 254 // UseSSE == 0 so float result should be left on TOS 255 __ cmpl(rsi, T_FLOAT); 256 __ jcc(Assembler::equal, L_skip); 257 } 258 __ ffree(0); 259 } 260 __ BIND(L_skip); 261 } 262 #endif // COMPILER2 263 264 // store result depending on type 265 // (everything that is not T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) 266 __ movptr(rdi, result); 267 Label is_long, is_float, is_double, exit; 268 __ movl(rsi, result_type); 269 __ cmpl(rsi, T_LONG); 270 __ jcc(Assembler::equal, is_long); 271 __ cmpl(rsi, T_FLOAT); 272 __ jcc(Assembler::equal, is_float); 273 __ cmpl(rsi, T_DOUBLE); 274 __ jcc(Assembler::equal, is_double); 275 276 // handle T_INT case 277 __ movl(Address(rdi, 0), rax); 278 __ BIND(exit); 279 280 // check that FPU stack is empty 281 __ verify_FPU(0, "generate_call_stub"); 282 283 // pop parameters 284 __ lea(rsp, rsp_after_call); 285 286 // restore %mxcsr 287 if (sse_save) { 288 __ ldmxcsr(mxcsr_save); 289 } 290 291 // restore rdi, rsi and rbx, 292 __ movptr(rbx, saved_rbx); 293 __ movptr(rsi, saved_rsi); 294 __ movptr(rdi, saved_rdi); 295 __ addptr(rsp, 4*wordSize); 296 297 // return 298 __ pop(rbp); 299 __ ret(0); 300 301 // handle return types different from T_INT 302 __ BIND(is_long); 303 __ movl(Address(rdi, 0 * wordSize), rax); 304 __ movl(Address(rdi, 1 * wordSize), rdx); 305 __ jmp(exit); 306 307 __ BIND(is_float); 308 // interpreter uses xmm0 for return values 309 if (UseSSE >= 1) { 310 __ movflt(Address(rdi, 0), xmm0); 311 } else { 312 __ fstp_s(Address(rdi, 0)); 313 } 314 __ jmp(exit); 315 316 __ BIND(is_double); 317 // interpreter uses xmm0 for return values 318 if (UseSSE >= 2) { 319 __ movdbl(Address(rdi, 0), xmm0); 320 } else { 321 __ fstp_d(Address(rdi, 0)); 322 } 323 __ jmp(exit); 324 325 return start; 326 } 327 328 329 //------------------------------------------------------------------------------------------------------------------------ 330 // Return point for a Java call if there's an exception thrown in Java code. 331 // The exception is caught and transformed into a pending exception stored in 332 // JavaThread that can be tested from within the VM. 333 // 334 // Note: Usually the parameters are removed by the callee. In case of an exception 335 // crossing an activation frame boundary, that is not the case if the callee 336 // is compiled code => need to setup the rsp. 337 // 338 // rax,: exception oop 339 340 address generate_catch_exception() { 341 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 342 const Address rsp_after_call(rbp, -4 * wordSize); // same as in generate_call_stub()! 343 const Address thread (rbp, 9 * wordSize); // same as in generate_call_stub()! 344 address start = __ pc(); 345 346 // get thread directly 347 __ movptr(rcx, thread); 348 #ifdef ASSERT 349 // verify that threads correspond 350 { Label L; 351 __ get_thread(rbx); 352 __ cmpptr(rbx, rcx); 353 __ jcc(Assembler::equal, L); 354 __ stop("StubRoutines::catch_exception: threads must correspond"); 355 __ bind(L); 356 } 357 #endif 358 // set pending exception 359 __ verify_oop(rax); 360 __ movptr(Address(rcx, Thread::pending_exception_offset()), rax ); 361 __ lea(Address(rcx, Thread::exception_file_offset ()), 362 ExternalAddress((address)__FILE__)); 363 __ movl(Address(rcx, Thread::exception_line_offset ()), __LINE__ ); 364 // complete return to VM 365 assert(StubRoutines::_call_stub_return_address != NULL, "_call_stub_return_address must have been generated before"); 366 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address)); 367 368 return start; 369 } 370 371 372 //------------------------------------------------------------------------------------------------------------------------ 373 // Continuation point for runtime calls returning with a pending exception. 374 // The pending exception check happened in the runtime or native call stub. 375 // The pending exception in Thread is converted into a Java-level exception. 376 // 377 // Contract with Java-level exception handlers: 378 // rax: exception 379 // rdx: throwing pc 380 // 381 // NOTE: At entry of this stub, exception-pc must be on stack !! 382 383 address generate_forward_exception() { 384 StubCodeMark mark(this, "StubRoutines", "forward exception"); 385 address start = __ pc(); 386 const Register thread = rcx; 387 388 // other registers used in this stub 389 const Register exception_oop = rax; 390 const Register handler_addr = rbx; 391 const Register exception_pc = rdx; 392 393 // Upon entry, the sp points to the return address returning into Java 394 // (interpreted or compiled) code; i.e., the return address becomes the 395 // throwing pc. 396 // 397 // Arguments pushed before the runtime call are still on the stack but 398 // the exception handler will reset the stack pointer -> ignore them. 399 // A potential result in registers can be ignored as well. 400 401 #ifdef ASSERT 402 // make sure this code is only executed if there is a pending exception 403 { Label L; 404 __ get_thread(thread); 405 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 406 __ jcc(Assembler::notEqual, L); 407 __ stop("StubRoutines::forward exception: no pending exception (1)"); 408 __ bind(L); 409 } 410 #endif 411 412 // compute exception handler into rbx, 413 __ get_thread(thread); 414 __ movptr(exception_pc, Address(rsp, 0)); 415 BLOCK_COMMENT("call exception_handler_for_return_address"); 416 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc); 417 __ mov(handler_addr, rax); 418 419 // setup rax & rdx, remove return address & clear pending exception 420 __ get_thread(thread); 421 __ pop(exception_pc); 422 __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset())); 423 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD); 424 425 #ifdef ASSERT 426 // make sure exception is set 427 { Label L; 428 __ testptr(exception_oop, exception_oop); 429 __ jcc(Assembler::notEqual, L); 430 __ stop("StubRoutines::forward exception: no pending exception (2)"); 431 __ bind(L); 432 } 433 #endif 434 435 // Verify that there is really a valid exception in RAX. 436 __ verify_oop(exception_oop); 437 438 // continue at exception handler (return address removed) 439 // rax: exception 440 // rbx: exception handler 441 // rdx: throwing pc 442 __ jmp(handler_addr); 443 444 return start; 445 } 446 447 448 //---------------------------------------------------------------------------------------------------- 449 // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest) 450 // 451 // xchg exists as far back as 8086, lock needed for MP only 452 // Stack layout immediately after call: 453 // 454 // 0 [ret addr ] <--- rsp 455 // 1 [ ex ] 456 // 2 [ dest ] 457 // 458 // Result: *dest <- ex, return (old *dest) 459 // 460 // Note: win32 does not currently use this code 461 462 address generate_atomic_xchg() { 463 StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); 464 address start = __ pc(); 465 466 __ push(rdx); 467 Address exchange(rsp, 2 * wordSize); 468 Address dest_addr(rsp, 3 * wordSize); 469 __ movl(rax, exchange); 470 __ movptr(rdx, dest_addr); 471 __ xchgl(rax, Address(rdx, 0)); 472 __ pop(rdx); 473 __ ret(0); 474 475 return start; 476 } 477 478 //---------------------------------------------------------------------------------------------------- 479 // Support for void verify_mxcsr() 480 // 481 // This routine is used with -Xcheck:jni to verify that native 482 // JNI code does not return to Java code without restoring the 483 // MXCSR register to our expected state. 484 485 486 address generate_verify_mxcsr() { 487 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr"); 488 address start = __ pc(); 489 490 const Address mxcsr_save(rsp, 0); 491 492 if (CheckJNICalls && UseSSE > 0 ) { 493 Label ok_ret; 494 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 495 __ push(rax); 496 __ subptr(rsp, wordSize); // allocate a temp location 497 __ stmxcsr(mxcsr_save); 498 __ movl(rax, mxcsr_save); 499 __ andl(rax, MXCSR_MASK); 500 __ cmp32(rax, mxcsr_std); 501 __ jcc(Assembler::equal, ok_ret); 502 503 __ warn("MXCSR changed by native JNI code."); 504 505 __ ldmxcsr(mxcsr_std); 506 507 __ bind(ok_ret); 508 __ addptr(rsp, wordSize); 509 __ pop(rax); 510 } 511 512 __ ret(0); 513 514 return start; 515 } 516 517 518 //--------------------------------------------------------------------------- 519 // Support for void verify_fpu_cntrl_wrd() 520 // 521 // This routine is used with -Xcheck:jni to verify that native 522 // JNI code does not return to Java code without restoring the 523 // FP control word to our expected state. 524 525 address generate_verify_fpu_cntrl_wrd() { 526 StubCodeMark mark(this, "StubRoutines", "verify_spcw"); 527 address start = __ pc(); 528 529 const Address fpu_cntrl_wrd_save(rsp, 0); 530 531 if (CheckJNICalls) { 532 Label ok_ret; 533 __ push(rax); 534 __ subptr(rsp, wordSize); // allocate a temp location 535 __ fnstcw(fpu_cntrl_wrd_save); 536 __ movl(rax, fpu_cntrl_wrd_save); 537 __ andl(rax, FPU_CNTRL_WRD_MASK); 538 ExternalAddress fpu_std(StubRoutines::addr_fpu_cntrl_wrd_std()); 539 __ cmp32(rax, fpu_std); 540 __ jcc(Assembler::equal, ok_ret); 541 542 __ warn("Floating point control word changed by native JNI code."); 543 544 __ fldcw(fpu_std); 545 546 __ bind(ok_ret); 547 __ addptr(rsp, wordSize); 548 __ pop(rax); 549 } 550 551 __ ret(0); 552 553 return start; 554 } 555 556 //--------------------------------------------------------------------------- 557 // Wrapper for slow-case handling of double-to-integer conversion 558 // d2i or f2i fast case failed either because it is nan or because 559 // of under/overflow. 560 // Input: FPU TOS: float value 561 // Output: rax, (rdx): integer (long) result 562 563 address generate_d2i_wrapper(BasicType t, address fcn) { 564 StubCodeMark mark(this, "StubRoutines", "d2i_wrapper"); 565 address start = __ pc(); 566 567 // Capture info about frame layout 568 enum layout { FPUState_off = 0, 569 rbp_off = FPUStateSizeInWords, 570 rdi_off, 571 rsi_off, 572 rcx_off, 573 rbx_off, 574 saved_argument_off, 575 saved_argument_off2, // 2nd half of double 576 framesize 577 }; 578 579 assert(FPUStateSizeInWords == 27, "update stack layout"); 580 581 // Save outgoing argument to stack across push_FPU_state() 582 __ subptr(rsp, wordSize * 2); 583 __ fstp_d(Address(rsp, 0)); 584 585 // Save CPU & FPU state 586 __ push(rbx); 587 __ push(rcx); 588 __ push(rsi); 589 __ push(rdi); 590 __ push(rbp); 591 __ push_FPU_state(); 592 593 // push_FPU_state() resets the FP top of stack 594 // Load original double into FP top of stack 595 __ fld_d(Address(rsp, saved_argument_off * wordSize)); 596 // Store double into stack as outgoing argument 597 __ subptr(rsp, wordSize*2); 598 __ fst_d(Address(rsp, 0)); 599 600 // Prepare FPU for doing math in C-land 601 __ empty_FPU_stack(); 602 // Call the C code to massage the double. Result in EAX 603 if (t == T_INT) 604 { BLOCK_COMMENT("SharedRuntime::d2i"); } 605 else if (t == T_LONG) 606 { BLOCK_COMMENT("SharedRuntime::d2l"); } 607 __ call_VM_leaf( fcn, 2 ); 608 609 // Restore CPU & FPU state 610 __ pop_FPU_state(); 611 __ pop(rbp); 612 __ pop(rdi); 613 __ pop(rsi); 614 __ pop(rcx); 615 __ pop(rbx); 616 __ addptr(rsp, wordSize * 2); 617 618 __ ret(0); 619 620 return start; 621 } 622 623 624 //--------------------------------------------------------------------------- 625 // The following routine generates a subroutine to throw an asynchronous 626 // UnknownError when an unsafe access gets a fault that could not be 627 // reasonably prevented by the programmer. (Example: SIGBUS/OBJERR.) 628 address generate_handler_for_unsafe_access() { 629 StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access"); 630 address start = __ pc(); 631 632 __ push(0); // hole for return address-to-be 633 __ pusha(); // push registers 634 Address next_pc(rsp, RegisterImpl::number_of_registers * BytesPerWord); 635 BLOCK_COMMENT("call handle_unsafe_access"); 636 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, handle_unsafe_access))); 637 __ movptr(next_pc, rax); // stuff next address 638 __ popa(); 639 __ ret(0); // jump to next address 640 641 return start; 642 } 643 644 645 //---------------------------------------------------------------------------------------------------- 646 // Non-destructive plausibility checks for oops 647 648 address generate_verify_oop() { 649 StubCodeMark mark(this, "StubRoutines", "verify_oop"); 650 address start = __ pc(); 651 652 // Incoming arguments on stack after saving rax,: 653 // 654 // [tos ]: saved rdx 655 // [tos + 1]: saved EFLAGS 656 // [tos + 2]: return address 657 // [tos + 3]: char* error message 658 // [tos + 4]: oop object to verify 659 // [tos + 5]: saved rax, - saved by caller and bashed 660 661 Label exit, error; 662 __ pushf(); 663 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr())); 664 __ push(rdx); // save rdx 665 // make sure object is 'reasonable' 666 __ movptr(rax, Address(rsp, 4 * wordSize)); // get object 667 __ testptr(rax, rax); 668 __ jcc(Assembler::zero, exit); // if obj is NULL it is ok 669 670 // Check if the oop is in the right area of memory 671 const int oop_mask = Universe::verify_oop_mask(); 672 const int oop_bits = Universe::verify_oop_bits(); 673 __ mov(rdx, rax); 674 __ andptr(rdx, oop_mask); 675 __ cmpptr(rdx, oop_bits); 676 __ jcc(Assembler::notZero, error); 677 678 // make sure klass is 'reasonable', which is not zero. 679 __ movptr(rax, Address(rax, oopDesc::klass_offset_in_bytes())); // get klass 680 __ testptr(rax, rax); 681 __ jcc(Assembler::zero, error); // if klass is NULL it is broken 682 683 // return if everything seems ok 684 __ bind(exit); 685 __ movptr(rax, Address(rsp, 5 * wordSize)); // get saved rax, back 686 __ pop(rdx); // restore rdx 687 __ popf(); // restore EFLAGS 688 __ ret(3 * wordSize); // pop arguments 689 690 // handle errors 691 __ bind(error); 692 __ movptr(rax, Address(rsp, 5 * wordSize)); // get saved rax, back 693 __ pop(rdx); // get saved rdx back 694 __ popf(); // get saved EFLAGS off stack -- will be ignored 695 __ pusha(); // push registers (eip = return address & msg are already pushed) 696 BLOCK_COMMENT("call MacroAssembler::debug"); 697 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32))); 698 __ popa(); 699 __ ret(3 * wordSize); // pop arguments 700 return start; 701 } 702 703 // 704 // Generate pre-barrier for array stores 705 // 706 // Input: 707 // start - starting address 708 // count - element count 709 void gen_write_ref_array_pre_barrier(Register src, Register start, Register count, bool uninitialized_target) { 710 assert_different_registers(start, count); 711 BarrierSet* bs = Universe::heap()->barrier_set(); 712 switch (bs->kind()) { 713 case BarrierSet::G1SATBCT: 714 case BarrierSet::G1SATBCTLogging: 715 // With G1, don't generate the call if we statically know that the target in uninitialized 716 if (!uninitialized_target) { 717 __ pusha(); // push registers 718 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 719 start, count); 720 __ popa(); 721 } 722 break; 723 case BarrierSet::CardTableModRef: 724 case BarrierSet::CardTableExtension: 725 case BarrierSet::ModRef: 726 break; 727 #if INCLUDE_ALL_GCS 728 case BarrierSet::ShenandoahBarrierSet: 729 ShenandoahBarrierSetAssembler::bsasm()->arraycopy_prologue(_masm, uninitialized_target, src, start, count); 730 break; 731 #endif 732 default : 733 ShouldNotReachHere(); 734 735 } 736 } 737 738 739 // 740 // Generate a post-barrier for an array store 741 // 742 // start - starting address 743 // count - element count 744 // 745 // The two input registers are overwritten. 746 // 747 void gen_write_ref_array_post_barrier(Register start, Register count) { 748 BarrierSet* bs = Universe::heap()->barrier_set(); 749 assert_different_registers(start, count); 750 switch (bs->kind()) { 751 case BarrierSet::G1SATBCT: 752 case BarrierSet::G1SATBCTLogging: 753 { 754 __ pusha(); // push registers 755 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), 756 start, count); 757 __ popa(); 758 } 759 break; 760 761 case BarrierSet::CardTableModRef: 762 case BarrierSet::CardTableExtension: 763 { 764 CardTableModRefBS* ct = (CardTableModRefBS*)bs; 765 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 766 767 Label L_loop; 768 const Register end = count; // elements count; end == start+count-1 769 assert_different_registers(start, end); 770 771 __ lea(end, Address(start, count, Address::times_ptr, -wordSize)); 772 __ shrptr(start, CardTableModRefBS::card_shift); 773 __ shrptr(end, CardTableModRefBS::card_shift); 774 __ subptr(end, start); // end --> count 775 __ BIND(L_loop); 776 intptr_t disp = (intptr_t) ct->byte_map_base; 777 Address cardtable(start, count, Address::times_1, disp); 778 __ movb(cardtable, 0); 779 __ decrement(count); 780 __ jcc(Assembler::greaterEqual, L_loop); 781 } 782 break; 783 case BarrierSet::ModRef: 784 case BarrierSet::ShenandoahBarrierSet: 785 break; 786 default : 787 ShouldNotReachHere(); 788 789 } 790 } 791 792 793 // Copy 64 bytes chunks 794 // 795 // Inputs: 796 // from - source array address 797 // to_from - destination array address - from 798 // qword_count - 8-bytes element count, negative 799 // 800 void xmm_copy_forward(Register from, Register to_from, Register qword_count) { 801 assert( UseSSE >= 2, "supported cpu only" ); 802 Label L_copy_64_bytes_loop, L_copy_64_bytes, L_copy_8_bytes, L_exit; 803 // Copy 64-byte chunks 804 __ jmpb(L_copy_64_bytes); 805 __ align(OptoLoopAlignment); 806 __ BIND(L_copy_64_bytes_loop); 807 808 if (UseUnalignedLoadStores) { 809 if (UseAVX >= 2) { 810 __ vmovdqu(xmm0, Address(from, 0)); 811 __ vmovdqu(Address(from, to_from, Address::times_1, 0), xmm0); 812 __ vmovdqu(xmm1, Address(from, 32)); 813 __ vmovdqu(Address(from, to_from, Address::times_1, 32), xmm1); 814 } else { 815 __ movdqu(xmm0, Address(from, 0)); 816 __ movdqu(Address(from, to_from, Address::times_1, 0), xmm0); 817 __ movdqu(xmm1, Address(from, 16)); 818 __ movdqu(Address(from, to_from, Address::times_1, 16), xmm1); 819 __ movdqu(xmm2, Address(from, 32)); 820 __ movdqu(Address(from, to_from, Address::times_1, 32), xmm2); 821 __ movdqu(xmm3, Address(from, 48)); 822 __ movdqu(Address(from, to_from, Address::times_1, 48), xmm3); 823 } 824 } else { 825 __ movq(xmm0, Address(from, 0)); 826 __ movq(Address(from, to_from, Address::times_1, 0), xmm0); 827 __ movq(xmm1, Address(from, 8)); 828 __ movq(Address(from, to_from, Address::times_1, 8), xmm1); 829 __ movq(xmm2, Address(from, 16)); 830 __ movq(Address(from, to_from, Address::times_1, 16), xmm2); 831 __ movq(xmm3, Address(from, 24)); 832 __ movq(Address(from, to_from, Address::times_1, 24), xmm3); 833 __ movq(xmm4, Address(from, 32)); 834 __ movq(Address(from, to_from, Address::times_1, 32), xmm4); 835 __ movq(xmm5, Address(from, 40)); 836 __ movq(Address(from, to_from, Address::times_1, 40), xmm5); 837 __ movq(xmm6, Address(from, 48)); 838 __ movq(Address(from, to_from, Address::times_1, 48), xmm6); 839 __ movq(xmm7, Address(from, 56)); 840 __ movq(Address(from, to_from, Address::times_1, 56), xmm7); 841 } 842 843 __ addl(from, 64); 844 __ BIND(L_copy_64_bytes); 845 __ subl(qword_count, 8); 846 __ jcc(Assembler::greaterEqual, L_copy_64_bytes_loop); 847 848 if (UseUnalignedLoadStores && (UseAVX >= 2)) { 849 // clean upper bits of YMM registers 850 __ vpxor(xmm0, xmm0); 851 __ vpxor(xmm1, xmm1); 852 } 853 __ addl(qword_count, 8); 854 __ jccb(Assembler::zero, L_exit); 855 // 856 // length is too short, just copy qwords 857 // 858 __ BIND(L_copy_8_bytes); 859 __ movq(xmm0, Address(from, 0)); 860 __ movq(Address(from, to_from, Address::times_1), xmm0); 861 __ addl(from, 8); 862 __ decrement(qword_count); 863 __ jcc(Assembler::greater, L_copy_8_bytes); 864 __ BIND(L_exit); 865 } 866 867 // Copy 64 bytes chunks 868 // 869 // Inputs: 870 // from - source array address 871 // to_from - destination array address - from 872 // qword_count - 8-bytes element count, negative 873 // 874 void mmx_copy_forward(Register from, Register to_from, Register qword_count) { 875 assert( VM_Version::supports_mmx(), "supported cpu only" ); 876 Label L_copy_64_bytes_loop, L_copy_64_bytes, L_copy_8_bytes, L_exit; 877 // Copy 64-byte chunks 878 __ jmpb(L_copy_64_bytes); 879 __ align(OptoLoopAlignment); 880 __ BIND(L_copy_64_bytes_loop); 881 __ movq(mmx0, Address(from, 0)); 882 __ movq(mmx1, Address(from, 8)); 883 __ movq(mmx2, Address(from, 16)); 884 __ movq(Address(from, to_from, Address::times_1, 0), mmx0); 885 __ movq(mmx3, Address(from, 24)); 886 __ movq(Address(from, to_from, Address::times_1, 8), mmx1); 887 __ movq(mmx4, Address(from, 32)); 888 __ movq(Address(from, to_from, Address::times_1, 16), mmx2); 889 __ movq(mmx5, Address(from, 40)); 890 __ movq(Address(from, to_from, Address::times_1, 24), mmx3); 891 __ movq(mmx6, Address(from, 48)); 892 __ movq(Address(from, to_from, Address::times_1, 32), mmx4); 893 __ movq(mmx7, Address(from, 56)); 894 __ movq(Address(from, to_from, Address::times_1, 40), mmx5); 895 __ movq(Address(from, to_from, Address::times_1, 48), mmx6); 896 __ movq(Address(from, to_from, Address::times_1, 56), mmx7); 897 __ addptr(from, 64); 898 __ BIND(L_copy_64_bytes); 899 __ subl(qword_count, 8); 900 __ jcc(Assembler::greaterEqual, L_copy_64_bytes_loop); 901 __ addl(qword_count, 8); 902 __ jccb(Assembler::zero, L_exit); 903 // 904 // length is too short, just copy qwords 905 // 906 __ BIND(L_copy_8_bytes); 907 __ movq(mmx0, Address(from, 0)); 908 __ movq(Address(from, to_from, Address::times_1), mmx0); 909 __ addptr(from, 8); 910 __ decrement(qword_count); 911 __ jcc(Assembler::greater, L_copy_8_bytes); 912 __ BIND(L_exit); 913 __ emms(); 914 } 915 916 address generate_disjoint_copy(BasicType t, bool aligned, 917 Address::ScaleFactor sf, 918 address* entry, const char *name, 919 bool dest_uninitialized = false) { 920 __ align(CodeEntryAlignment); 921 StubCodeMark mark(this, "StubRoutines", name); 922 address start = __ pc(); 923 924 Label L_0_count, L_exit, L_skip_align1, L_skip_align2, L_copy_byte; 925 Label L_copy_2_bytes, L_copy_4_bytes, L_copy_64_bytes; 926 927 int shift = Address::times_ptr - sf; 928 929 const Register from = rsi; // source array address 930 const Register to = rdi; // destination array address 931 const Register count = rcx; // elements count 932 const Register to_from = to; // (to - from) 933 const Register saved_to = rdx; // saved destination array address 934 935 __ enter(); // required for proper stackwalking of RuntimeStub frame 936 __ push(rsi); 937 __ push(rdi); 938 __ movptr(from , Address(rsp, 12+ 4)); 939 __ movptr(to , Address(rsp, 12+ 8)); 940 __ movl(count, Address(rsp, 12+ 12)); 941 942 if (entry != NULL) { 943 *entry = __ pc(); // Entry point from conjoint arraycopy stub. 944 BLOCK_COMMENT("Entry:"); 945 } 946 947 if (t == T_OBJECT) { 948 __ testl(count, count); 949 __ jcc(Assembler::zero, L_0_count); 950 gen_write_ref_array_pre_barrier(from, to, count, dest_uninitialized); 951 __ mov(saved_to, to); // save 'to' 952 } 953 954 __ subptr(to, from); // to --> to_from 955 __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element 956 __ jcc(Assembler::below, L_copy_4_bytes); // use unsigned cmp 957 if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) { 958 // align source address at 4 bytes address boundary 959 if (t == T_BYTE) { 960 // One byte misalignment happens only for byte arrays 961 __ testl(from, 1); 962 __ jccb(Assembler::zero, L_skip_align1); 963 __ movb(rax, Address(from, 0)); 964 __ movb(Address(from, to_from, Address::times_1, 0), rax); 965 __ increment(from); 966 __ decrement(count); 967 __ BIND(L_skip_align1); 968 } 969 // Two bytes misalignment happens only for byte and short (char) arrays 970 __ testl(from, 2); 971 __ jccb(Assembler::zero, L_skip_align2); 972 __ movw(rax, Address(from, 0)); 973 __ movw(Address(from, to_from, Address::times_1, 0), rax); 974 __ addptr(from, 2); 975 __ subl(count, 1<<(shift-1)); 976 __ BIND(L_skip_align2); 977 } 978 if (!VM_Version::supports_mmx()) { 979 __ mov(rax, count); // save 'count' 980 __ shrl(count, shift); // bytes count 981 __ addptr(to_from, from);// restore 'to' 982 __ rep_mov(); 983 __ subptr(to_from, from);// restore 'to_from' 984 __ mov(count, rax); // restore 'count' 985 __ jmpb(L_copy_2_bytes); // all dwords were copied 986 } else { 987 if (!UseUnalignedLoadStores) { 988 // align to 8 bytes, we know we are 4 byte aligned to start 989 __ testptr(from, 4); 990 __ jccb(Assembler::zero, L_copy_64_bytes); 991 __ movl(rax, Address(from, 0)); 992 __ movl(Address(from, to_from, Address::times_1, 0), rax); 993 __ addptr(from, 4); 994 __ subl(count, 1<<shift); 995 } 996 __ BIND(L_copy_64_bytes); 997 __ mov(rax, count); 998 __ shrl(rax, shift+1); // 8 bytes chunk count 999 // 1000 // Copy 8-byte chunks through MMX registers, 8 per iteration of the loop 1001 // 1002 if (UseXMMForArrayCopy) { 1003 xmm_copy_forward(from, to_from, rax); 1004 } else { 1005 mmx_copy_forward(from, to_from, rax); 1006 } 1007 } 1008 // copy tailing dword 1009 __ BIND(L_copy_4_bytes); 1010 __ testl(count, 1<<shift); 1011 __ jccb(Assembler::zero, L_copy_2_bytes); 1012 __ movl(rax, Address(from, 0)); 1013 __ movl(Address(from, to_from, Address::times_1, 0), rax); 1014 if (t == T_BYTE || t == T_SHORT) { 1015 __ addptr(from, 4); 1016 __ BIND(L_copy_2_bytes); 1017 // copy tailing word 1018 __ testl(count, 1<<(shift-1)); 1019 __ jccb(Assembler::zero, L_copy_byte); 1020 __ movw(rax, Address(from, 0)); 1021 __ movw(Address(from, to_from, Address::times_1, 0), rax); 1022 if (t == T_BYTE) { 1023 __ addptr(from, 2); 1024 __ BIND(L_copy_byte); 1025 // copy tailing byte 1026 __ testl(count, 1); 1027 __ jccb(Assembler::zero, L_exit); 1028 __ movb(rax, Address(from, 0)); 1029 __ movb(Address(from, to_from, Address::times_1, 0), rax); 1030 __ BIND(L_exit); 1031 } else { 1032 __ BIND(L_copy_byte); 1033 } 1034 } else { 1035 __ BIND(L_copy_2_bytes); 1036 } 1037 1038 if (t == T_OBJECT) { 1039 __ movl(count, Address(rsp, 12+12)); // reread 'count' 1040 __ mov(to, saved_to); // restore 'to' 1041 gen_write_ref_array_post_barrier(to, count); 1042 __ BIND(L_0_count); 1043 } 1044 inc_copy_counter_np(t); 1045 __ pop(rdi); 1046 __ pop(rsi); 1047 __ leave(); // required for proper stackwalking of RuntimeStub frame 1048 __ xorptr(rax, rax); // return 0 1049 __ ret(0); 1050 return start; 1051 } 1052 1053 1054 address generate_fill(BasicType t, bool aligned, const char *name) { 1055 __ align(CodeEntryAlignment); 1056 StubCodeMark mark(this, "StubRoutines", name); 1057 address start = __ pc(); 1058 1059 BLOCK_COMMENT("Entry:"); 1060 1061 const Register to = rdi; // source array address 1062 const Register value = rdx; // value 1063 const Register count = rsi; // elements count 1064 1065 __ enter(); // required for proper stackwalking of RuntimeStub frame 1066 __ push(rsi); 1067 __ push(rdi); 1068 __ movptr(to , Address(rsp, 12+ 4)); 1069 __ movl(value, Address(rsp, 12+ 8)); 1070 __ movl(count, Address(rsp, 12+ 12)); 1071 1072 __ generate_fill(t, aligned, to, value, count, rax, xmm0); 1073 1074 __ pop(rdi); 1075 __ pop(rsi); 1076 __ leave(); // required for proper stackwalking of RuntimeStub frame 1077 __ ret(0); 1078 return start; 1079 } 1080 1081 address generate_conjoint_copy(BasicType t, bool aligned, 1082 Address::ScaleFactor sf, 1083 address nooverlap_target, 1084 address* entry, const char *name, 1085 bool dest_uninitialized = false) { 1086 __ align(CodeEntryAlignment); 1087 StubCodeMark mark(this, "StubRoutines", name); 1088 address start = __ pc(); 1089 1090 Label L_0_count, L_exit, L_skip_align1, L_skip_align2, L_copy_byte; 1091 Label L_copy_2_bytes, L_copy_4_bytes, L_copy_8_bytes, L_copy_8_bytes_loop; 1092 1093 int shift = Address::times_ptr - sf; 1094 1095 const Register src = rax; // source array address 1096 const Register dst = rdx; // destination array address 1097 const Register from = rsi; // source array address 1098 const Register to = rdi; // destination array address 1099 const Register count = rcx; // elements count 1100 const Register end = rax; // array end address 1101 1102 __ enter(); // required for proper stackwalking of RuntimeStub frame 1103 __ push(rsi); 1104 __ push(rdi); 1105 __ movptr(src , Address(rsp, 12+ 4)); // from 1106 __ movptr(dst , Address(rsp, 12+ 8)); // to 1107 __ movl2ptr(count, Address(rsp, 12+12)); // count 1108 1109 if (entry != NULL) { 1110 *entry = __ pc(); // Entry point from generic arraycopy stub. 1111 BLOCK_COMMENT("Entry:"); 1112 } 1113 1114 // nooverlap_target expects arguments in rsi and rdi. 1115 __ mov(from, src); 1116 __ mov(to , dst); 1117 1118 // arrays overlap test: dispatch to disjoint stub if necessary. 1119 RuntimeAddress nooverlap(nooverlap_target); 1120 __ cmpptr(dst, src); 1121 __ lea(end, Address(src, count, sf, 0)); // src + count * elem_size 1122 __ jump_cc(Assembler::belowEqual, nooverlap); 1123 __ cmpptr(dst, end); 1124 __ jump_cc(Assembler::aboveEqual, nooverlap); 1125 1126 if (t == T_OBJECT) { 1127 __ testl(count, count); 1128 __ jcc(Assembler::zero, L_0_count); 1129 gen_write_ref_array_pre_barrier(src, dst, count, dest_uninitialized); 1130 } 1131 1132 // copy from high to low 1133 __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element 1134 __ jcc(Assembler::below, L_copy_4_bytes); // use unsigned cmp 1135 if (t == T_BYTE || t == T_SHORT) { 1136 // Align the end of destination array at 4 bytes address boundary 1137 __ lea(end, Address(dst, count, sf, 0)); 1138 if (t == T_BYTE) { 1139 // One byte misalignment happens only for byte arrays 1140 __ testl(end, 1); 1141 __ jccb(Assembler::zero, L_skip_align1); 1142 __ decrement(count); 1143 __ movb(rdx, Address(from, count, sf, 0)); 1144 __ movb(Address(to, count, sf, 0), rdx); 1145 __ BIND(L_skip_align1); 1146 } 1147 // Two bytes misalignment happens only for byte and short (char) arrays 1148 __ testl(end, 2); 1149 __ jccb(Assembler::zero, L_skip_align2); 1150 __ subptr(count, 1<<(shift-1)); 1151 __ movw(rdx, Address(from, count, sf, 0)); 1152 __ movw(Address(to, count, sf, 0), rdx); 1153 __ BIND(L_skip_align2); 1154 __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element 1155 __ jcc(Assembler::below, L_copy_4_bytes); 1156 } 1157 1158 if (!VM_Version::supports_mmx()) { 1159 __ std(); 1160 __ mov(rax, count); // Save 'count' 1161 __ mov(rdx, to); // Save 'to' 1162 __ lea(rsi, Address(from, count, sf, -4)); 1163 __ lea(rdi, Address(to , count, sf, -4)); 1164 __ shrptr(count, shift); // bytes count 1165 __ rep_mov(); 1166 __ cld(); 1167 __ mov(count, rax); // restore 'count' 1168 __ andl(count, (1<<shift)-1); // mask the number of rest elements 1169 __ movptr(from, Address(rsp, 12+4)); // reread 'from' 1170 __ mov(to, rdx); // restore 'to' 1171 __ jmpb(L_copy_2_bytes); // all dword were copied 1172 } else { 1173 // Align to 8 bytes the end of array. It is aligned to 4 bytes already. 1174 __ testptr(end, 4); 1175 __ jccb(Assembler::zero, L_copy_8_bytes); 1176 __ subl(count, 1<<shift); 1177 __ movl(rdx, Address(from, count, sf, 0)); 1178 __ movl(Address(to, count, sf, 0), rdx); 1179 __ jmpb(L_copy_8_bytes); 1180 1181 __ align(OptoLoopAlignment); 1182 // Move 8 bytes 1183 __ BIND(L_copy_8_bytes_loop); 1184 if (UseXMMForArrayCopy) { 1185 __ movq(xmm0, Address(from, count, sf, 0)); 1186 __ movq(Address(to, count, sf, 0), xmm0); 1187 } else { 1188 __ movq(mmx0, Address(from, count, sf, 0)); 1189 __ movq(Address(to, count, sf, 0), mmx0); 1190 } 1191 __ BIND(L_copy_8_bytes); 1192 __ subl(count, 2<<shift); 1193 __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop); 1194 __ addl(count, 2<<shift); 1195 if (!UseXMMForArrayCopy) { 1196 __ emms(); 1197 } 1198 } 1199 __ BIND(L_copy_4_bytes); 1200 // copy prefix qword 1201 __ testl(count, 1<<shift); 1202 __ jccb(Assembler::zero, L_copy_2_bytes); 1203 __ movl(rdx, Address(from, count, sf, -4)); 1204 __ movl(Address(to, count, sf, -4), rdx); 1205 1206 if (t == T_BYTE || t == T_SHORT) { 1207 __ subl(count, (1<<shift)); 1208 __ BIND(L_copy_2_bytes); 1209 // copy prefix dword 1210 __ testl(count, 1<<(shift-1)); 1211 __ jccb(Assembler::zero, L_copy_byte); 1212 __ movw(rdx, Address(from, count, sf, -2)); 1213 __ movw(Address(to, count, sf, -2), rdx); 1214 if (t == T_BYTE) { 1215 __ subl(count, 1<<(shift-1)); 1216 __ BIND(L_copy_byte); 1217 // copy prefix byte 1218 __ testl(count, 1); 1219 __ jccb(Assembler::zero, L_exit); 1220 __ movb(rdx, Address(from, 0)); 1221 __ movb(Address(to, 0), rdx); 1222 __ BIND(L_exit); 1223 } else { 1224 __ BIND(L_copy_byte); 1225 } 1226 } else { 1227 __ BIND(L_copy_2_bytes); 1228 } 1229 if (t == T_OBJECT) { 1230 __ movl2ptr(count, Address(rsp, 12+12)); // reread count 1231 gen_write_ref_array_post_barrier(to, count); 1232 __ BIND(L_0_count); 1233 } 1234 inc_copy_counter_np(t); 1235 __ pop(rdi); 1236 __ pop(rsi); 1237 __ leave(); // required for proper stackwalking of RuntimeStub frame 1238 __ xorptr(rax, rax); // return 0 1239 __ ret(0); 1240 return start; 1241 } 1242 1243 1244 address generate_disjoint_long_copy(address* entry, const char *name) { 1245 __ align(CodeEntryAlignment); 1246 StubCodeMark mark(this, "StubRoutines", name); 1247 address start = __ pc(); 1248 1249 Label L_copy_8_bytes, L_copy_8_bytes_loop; 1250 const Register from = rax; // source array address 1251 const Register to = rdx; // destination array address 1252 const Register count = rcx; // elements count 1253 const Register to_from = rdx; // (to - from) 1254 1255 __ enter(); // required for proper stackwalking of RuntimeStub frame 1256 __ movptr(from , Address(rsp, 8+0)); // from 1257 __ movptr(to , Address(rsp, 8+4)); // to 1258 __ movl2ptr(count, Address(rsp, 8+8)); // count 1259 1260 *entry = __ pc(); // Entry point from conjoint arraycopy stub. 1261 BLOCK_COMMENT("Entry:"); 1262 1263 __ subptr(to, from); // to --> to_from 1264 if (VM_Version::supports_mmx()) { 1265 if (UseXMMForArrayCopy) { 1266 xmm_copy_forward(from, to_from, count); 1267 } else { 1268 mmx_copy_forward(from, to_from, count); 1269 } 1270 } else { 1271 __ jmpb(L_copy_8_bytes); 1272 __ align(OptoLoopAlignment); 1273 __ BIND(L_copy_8_bytes_loop); 1274 __ fild_d(Address(from, 0)); 1275 __ fistp_d(Address(from, to_from, Address::times_1)); 1276 __ addptr(from, 8); 1277 __ BIND(L_copy_8_bytes); 1278 __ decrement(count); 1279 __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop); 1280 } 1281 inc_copy_counter_np(T_LONG); 1282 __ leave(); // required for proper stackwalking of RuntimeStub frame 1283 __ xorptr(rax, rax); // return 0 1284 __ ret(0); 1285 return start; 1286 } 1287 1288 address generate_conjoint_long_copy(address nooverlap_target, 1289 address* entry, const char *name) { 1290 __ align(CodeEntryAlignment); 1291 StubCodeMark mark(this, "StubRoutines", name); 1292 address start = __ pc(); 1293 1294 Label L_copy_8_bytes, L_copy_8_bytes_loop; 1295 const Register from = rax; // source array address 1296 const Register to = rdx; // destination array address 1297 const Register count = rcx; // elements count 1298 const Register end_from = rax; // source array end address 1299 1300 __ enter(); // required for proper stackwalking of RuntimeStub frame 1301 __ movptr(from , Address(rsp, 8+0)); // from 1302 __ movptr(to , Address(rsp, 8+4)); // to 1303 __ movl2ptr(count, Address(rsp, 8+8)); // count 1304 1305 *entry = __ pc(); // Entry point from generic arraycopy stub. 1306 BLOCK_COMMENT("Entry:"); 1307 1308 // arrays overlap test 1309 __ cmpptr(to, from); 1310 RuntimeAddress nooverlap(nooverlap_target); 1311 __ jump_cc(Assembler::belowEqual, nooverlap); 1312 __ lea(end_from, Address(from, count, Address::times_8, 0)); 1313 __ cmpptr(to, end_from); 1314 __ movptr(from, Address(rsp, 8)); // from 1315 __ jump_cc(Assembler::aboveEqual, nooverlap); 1316 1317 __ jmpb(L_copy_8_bytes); 1318 1319 __ align(OptoLoopAlignment); 1320 __ BIND(L_copy_8_bytes_loop); 1321 if (VM_Version::supports_mmx()) { 1322 if (UseXMMForArrayCopy) { 1323 __ movq(xmm0, Address(from, count, Address::times_8)); 1324 __ movq(Address(to, count, Address::times_8), xmm0); 1325 } else { 1326 __ movq(mmx0, Address(from, count, Address::times_8)); 1327 __ movq(Address(to, count, Address::times_8), mmx0); 1328 } 1329 } else { 1330 __ fild_d(Address(from, count, Address::times_8)); 1331 __ fistp_d(Address(to, count, Address::times_8)); 1332 } 1333 __ BIND(L_copy_8_bytes); 1334 __ decrement(count); 1335 __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop); 1336 1337 if (VM_Version::supports_mmx() && !UseXMMForArrayCopy) { 1338 __ emms(); 1339 } 1340 inc_copy_counter_np(T_LONG); 1341 __ leave(); // required for proper stackwalking of RuntimeStub frame 1342 __ xorptr(rax, rax); // return 0 1343 __ ret(0); 1344 return start; 1345 } 1346 1347 1348 // Helper for generating a dynamic type check. 1349 // The sub_klass must be one of {rbx, rdx, rsi}. 1350 // The temp is killed. 1351 void generate_type_check(Register sub_klass, 1352 Address& super_check_offset_addr, 1353 Address& super_klass_addr, 1354 Register temp, 1355 Label* L_success, Label* L_failure) { 1356 BLOCK_COMMENT("type_check:"); 1357 1358 Label L_fallthrough; 1359 #define LOCAL_JCC(assembler_con, label_ptr) \ 1360 if (label_ptr != NULL) __ jcc(assembler_con, *(label_ptr)); \ 1361 else __ jcc(assembler_con, L_fallthrough) /*omit semi*/ 1362 1363 // The following is a strange variation of the fast path which requires 1364 // one less register, because needed values are on the argument stack. 1365 // __ check_klass_subtype_fast_path(sub_klass, *super_klass*, temp, 1366 // L_success, L_failure, NULL); 1367 assert_different_registers(sub_klass, temp); 1368 1369 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 1370 1371 // if the pointers are equal, we are done (e.g., String[] elements) 1372 __ cmpptr(sub_klass, super_klass_addr); 1373 LOCAL_JCC(Assembler::equal, L_success); 1374 1375 // check the supertype display: 1376 __ movl2ptr(temp, super_check_offset_addr); 1377 Address super_check_addr(sub_klass, temp, Address::times_1, 0); 1378 __ movptr(temp, super_check_addr); // load displayed supertype 1379 __ cmpptr(temp, super_klass_addr); // test the super type 1380 LOCAL_JCC(Assembler::equal, L_success); 1381 1382 // if it was a primary super, we can just fail immediately 1383 __ cmpl(super_check_offset_addr, sc_offset); 1384 LOCAL_JCC(Assembler::notEqual, L_failure); 1385 1386 // The repne_scan instruction uses fixed registers, which will get spilled. 1387 // We happen to know this works best when super_klass is in rax. 1388 Register super_klass = temp; 1389 __ movptr(super_klass, super_klass_addr); 1390 __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, 1391 L_success, L_failure); 1392 1393 __ bind(L_fallthrough); 1394 1395 if (L_success == NULL) { BLOCK_COMMENT("L_success:"); } 1396 if (L_failure == NULL) { BLOCK_COMMENT("L_failure:"); } 1397 1398 #undef LOCAL_JCC 1399 } 1400 1401 // 1402 // Generate checkcasting array copy stub 1403 // 1404 // Input: 1405 // 4(rsp) - source array address 1406 // 8(rsp) - destination array address 1407 // 12(rsp) - element count, can be zero 1408 // 16(rsp) - size_t ckoff (super_check_offset) 1409 // 20(rsp) - oop ckval (super_klass) 1410 // 1411 // Output: 1412 // rax, == 0 - success 1413 // rax, == -1^K - failure, where K is partial transfer count 1414 // 1415 address generate_checkcast_copy(const char *name, address* entry, bool dest_uninitialized = false) { 1416 __ align(CodeEntryAlignment); 1417 StubCodeMark mark(this, "StubRoutines", name); 1418 address start = __ pc(); 1419 1420 Label L_load_element, L_store_element, L_do_card_marks, L_done; 1421 1422 // register use: 1423 // rax, rdx, rcx -- loop control (end_from, end_to, count) 1424 // rdi, rsi -- element access (oop, klass) 1425 // rbx, -- temp 1426 const Register from = rax; // source array address 1427 const Register to = rdx; // destination array address 1428 const Register length = rcx; // elements count 1429 const Register elem = rdi; // each oop copied 1430 const Register elem_klass = rsi; // each elem._klass (sub_klass) 1431 const Register temp = rbx; // lone remaining temp 1432 1433 __ enter(); // required for proper stackwalking of RuntimeStub frame 1434 1435 __ push(rsi); 1436 __ push(rdi); 1437 __ push(rbx); 1438 1439 Address from_arg(rsp, 16+ 4); // from 1440 Address to_arg(rsp, 16+ 8); // to 1441 Address length_arg(rsp, 16+12); // elements count 1442 Address ckoff_arg(rsp, 16+16); // super_check_offset 1443 Address ckval_arg(rsp, 16+20); // super_klass 1444 1445 // Load up: 1446 __ movptr(from, from_arg); 1447 __ movptr(to, to_arg); 1448 __ movl2ptr(length, length_arg); 1449 1450 if (entry != NULL) { 1451 *entry = __ pc(); // Entry point from generic arraycopy stub. 1452 BLOCK_COMMENT("Entry:"); 1453 } 1454 1455 //--------------------------------------------------------------- 1456 // Assembler stub will be used for this call to arraycopy 1457 // if the two arrays are subtypes of Object[] but the 1458 // destination array type is not equal to or a supertype 1459 // of the source type. Each element must be separately 1460 // checked. 1461 1462 // Loop-invariant addresses. They are exclusive end pointers. 1463 Address end_from_addr(from, length, Address::times_ptr, 0); 1464 Address end_to_addr(to, length, Address::times_ptr, 0); 1465 1466 Register end_from = from; // re-use 1467 Register end_to = to; // re-use 1468 Register count = length; // re-use 1469 1470 // Loop-variant addresses. They assume post-incremented count < 0. 1471 Address from_element_addr(end_from, count, Address::times_ptr, 0); 1472 Address to_element_addr(end_to, count, Address::times_ptr, 0); 1473 Address elem_klass_addr(elem, oopDesc::klass_offset_in_bytes()); 1474 1475 // Copy from low to high addresses, indexed from the end of each array. 1476 gen_write_ref_array_pre_barrier(from, to, count, dest_uninitialized); 1477 __ lea(end_from, end_from_addr); 1478 __ lea(end_to, end_to_addr); 1479 assert(length == count, ""); // else fix next line: 1480 __ negptr(count); // negate and test the length 1481 __ jccb(Assembler::notZero, L_load_element); 1482 1483 // Empty array: Nothing to do. 1484 __ xorptr(rax, rax); // return 0 on (trivial) success 1485 __ jmp(L_done); 1486 1487 // ======== begin loop ======== 1488 // (Loop is rotated; its entry is L_load_element.) 1489 // Loop control: 1490 // for (count = -count; count != 0; count++) 1491 // Base pointers src, dst are biased by 8*count,to last element. 1492 __ align(OptoLoopAlignment); 1493 1494 __ BIND(L_store_element); 1495 __ movptr(to_element_addr, elem); // store the oop 1496 __ increment(count); // increment the count toward zero 1497 #if INCLUDE_ALL_GCS 1498 if (UseShenandoahGC) { 1499 // Shenandoah barrier is too big for 8-bit offsets to work 1500 __ jcc(Assembler::zero, L_do_card_marks); 1501 } else 1502 #endif 1503 __ jccb(Assembler::zero, L_do_card_marks); 1504 1505 // ======== loop entry is here ======== 1506 __ BIND(L_load_element); 1507 #if INCLUDE_ALL_GCS 1508 if (UseShenandoahGC) { 1509 // Needs GC barriers 1510 __ load_heap_oop(elem, from_element_addr); 1511 } else 1512 #endif 1513 __ movptr(elem, from_element_addr); // load the oop 1514 __ testptr(elem, elem); 1515 #if INCLUDE_ALL_GCS 1516 if (UseShenandoahGC) { 1517 // Shenandoah barrier is too big for 8-bit offsets to work 1518 __ jcc(Assembler::zero, L_store_element); 1519 } else 1520 #endif 1521 __ jccb(Assembler::zero, L_store_element); 1522 1523 // (Could do a trick here: Remember last successful non-null 1524 // element stored and make a quick oop equality check on it.) 1525 1526 __ movptr(elem_klass, elem_klass_addr); // query the object klass 1527 generate_type_check(elem_klass, ckoff_arg, ckval_arg, temp, 1528 &L_store_element, NULL); 1529 // (On fall-through, we have failed the element type check.) 1530 // ======== end loop ======== 1531 1532 // It was a real error; we must depend on the caller to finish the job. 1533 // Register "count" = -1 * number of *remaining* oops, length_arg = *total* oops. 1534 // Emit GC store barriers for the oops we have copied (length_arg + count), 1535 // and report their number to the caller. 1536 assert_different_registers(to, count, rax); 1537 Label L_post_barrier; 1538 __ addl(count, length_arg); // transfers = (length - remaining) 1539 __ movl2ptr(rax, count); // save the value 1540 __ notptr(rax); // report (-1^K) to caller (does not affect flags) 1541 __ jccb(Assembler::notZero, L_post_barrier); 1542 __ jmp(L_done); // K == 0, nothing was copied, skip post barrier 1543 1544 // Come here on success only. 1545 __ BIND(L_do_card_marks); 1546 __ xorptr(rax, rax); // return 0 on success 1547 __ movl2ptr(count, length_arg); 1548 1549 __ BIND(L_post_barrier); 1550 __ movptr(to, to_arg); // reload 1551 gen_write_ref_array_post_barrier(to, count); 1552 1553 // Common exit point (success or failure). 1554 __ BIND(L_done); 1555 __ pop(rbx); 1556 __ pop(rdi); 1557 __ pop(rsi); 1558 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); 1559 __ leave(); // required for proper stackwalking of RuntimeStub frame 1560 __ ret(0); 1561 1562 return start; 1563 } 1564 1565 // 1566 // Generate 'unsafe' array copy stub 1567 // Though just as safe as the other stubs, it takes an unscaled 1568 // size_t argument instead of an element count. 1569 // 1570 // Input: 1571 // 4(rsp) - source array address 1572 // 8(rsp) - destination array address 1573 // 12(rsp) - byte count, can be zero 1574 // 1575 // Output: 1576 // rax, == 0 - success 1577 // rax, == -1 - need to call System.arraycopy 1578 // 1579 // Examines the alignment of the operands and dispatches 1580 // to a long, int, short, or byte copy loop. 1581 // 1582 address generate_unsafe_copy(const char *name, 1583 address byte_copy_entry, 1584 address short_copy_entry, 1585 address int_copy_entry, 1586 address long_copy_entry) { 1587 1588 Label L_long_aligned, L_int_aligned, L_short_aligned; 1589 1590 __ align(CodeEntryAlignment); 1591 StubCodeMark mark(this, "StubRoutines", name); 1592 address start = __ pc(); 1593 1594 const Register from = rax; // source array address 1595 const Register to = rdx; // destination array address 1596 const Register count = rcx; // elements count 1597 1598 __ enter(); // required for proper stackwalking of RuntimeStub frame 1599 __ push(rsi); 1600 __ push(rdi); 1601 Address from_arg(rsp, 12+ 4); // from 1602 Address to_arg(rsp, 12+ 8); // to 1603 Address count_arg(rsp, 12+12); // byte count 1604 1605 // Load up: 1606 __ movptr(from , from_arg); 1607 __ movptr(to , to_arg); 1608 __ movl2ptr(count, count_arg); 1609 1610 // bump this on entry, not on exit: 1611 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); 1612 1613 const Register bits = rsi; 1614 __ mov(bits, from); 1615 __ orptr(bits, to); 1616 __ orptr(bits, count); 1617 1618 __ testl(bits, BytesPerLong-1); 1619 __ jccb(Assembler::zero, L_long_aligned); 1620 1621 __ testl(bits, BytesPerInt-1); 1622 __ jccb(Assembler::zero, L_int_aligned); 1623 1624 __ testl(bits, BytesPerShort-1); 1625 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry)); 1626 1627 __ BIND(L_short_aligned); 1628 __ shrptr(count, LogBytesPerShort); // size => short_count 1629 __ movl(count_arg, count); // update 'count' 1630 __ jump(RuntimeAddress(short_copy_entry)); 1631 1632 __ BIND(L_int_aligned); 1633 __ shrptr(count, LogBytesPerInt); // size => int_count 1634 __ movl(count_arg, count); // update 'count' 1635 __ jump(RuntimeAddress(int_copy_entry)); 1636 1637 __ BIND(L_long_aligned); 1638 __ shrptr(count, LogBytesPerLong); // size => qword_count 1639 __ movl(count_arg, count); // update 'count' 1640 __ pop(rdi); // Do pops here since jlong_arraycopy stub does not do it. 1641 __ pop(rsi); 1642 __ jump(RuntimeAddress(long_copy_entry)); 1643 1644 return start; 1645 } 1646 1647 1648 // Perform range checks on the proposed arraycopy. 1649 // Smashes src_pos and dst_pos. (Uses them up for temps.) 1650 void arraycopy_range_checks(Register src, 1651 Register src_pos, 1652 Register dst, 1653 Register dst_pos, 1654 Address& length, 1655 Label& L_failed) { 1656 BLOCK_COMMENT("arraycopy_range_checks:"); 1657 const Register src_end = src_pos; // source array end position 1658 const Register dst_end = dst_pos; // destination array end position 1659 __ addl(src_end, length); // src_pos + length 1660 __ addl(dst_end, length); // dst_pos + length 1661 1662 // if (src_pos + length > arrayOop(src)->length() ) FAIL; 1663 __ cmpl(src_end, Address(src, arrayOopDesc::length_offset_in_bytes())); 1664 __ jcc(Assembler::above, L_failed); 1665 1666 // if (dst_pos + length > arrayOop(dst)->length() ) FAIL; 1667 __ cmpl(dst_end, Address(dst, arrayOopDesc::length_offset_in_bytes())); 1668 __ jcc(Assembler::above, L_failed); 1669 1670 BLOCK_COMMENT("arraycopy_range_checks done"); 1671 } 1672 1673 1674 // 1675 // Generate generic array copy stubs 1676 // 1677 // Input: 1678 // 4(rsp) - src oop 1679 // 8(rsp) - src_pos 1680 // 12(rsp) - dst oop 1681 // 16(rsp) - dst_pos 1682 // 20(rsp) - element count 1683 // 1684 // Output: 1685 // rax, == 0 - success 1686 // rax, == -1^K - failure, where K is partial transfer count 1687 // 1688 address generate_generic_copy(const char *name, 1689 address entry_jbyte_arraycopy, 1690 address entry_jshort_arraycopy, 1691 address entry_jint_arraycopy, 1692 address entry_oop_arraycopy, 1693 address entry_jlong_arraycopy, 1694 address entry_checkcast_arraycopy) { 1695 Label L_failed, L_failed_0, L_objArray; 1696 1697 { int modulus = CodeEntryAlignment; 1698 int target = modulus - 5; // 5 = sizeof jmp(L_failed) 1699 int advance = target - (__ offset() % modulus); 1700 if (advance < 0) advance += modulus; 1701 if (advance > 0) __ nop(advance); 1702 } 1703 StubCodeMark mark(this, "StubRoutines", name); 1704 1705 // Short-hop target to L_failed. Makes for denser prologue code. 1706 __ BIND(L_failed_0); 1707 __ jmp(L_failed); 1708 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed"); 1709 1710 __ align(CodeEntryAlignment); 1711 address start = __ pc(); 1712 1713 __ enter(); // required for proper stackwalking of RuntimeStub frame 1714 __ push(rsi); 1715 __ push(rdi); 1716 1717 // bump this on entry, not on exit: 1718 inc_counter_np(SharedRuntime::_generic_array_copy_ctr); 1719 1720 // Input values 1721 Address SRC (rsp, 12+ 4); 1722 Address SRC_POS (rsp, 12+ 8); 1723 Address DST (rsp, 12+12); 1724 Address DST_POS (rsp, 12+16); 1725 Address LENGTH (rsp, 12+20); 1726 1727 //----------------------------------------------------------------------- 1728 // Assembler stub will be used for this call to arraycopy 1729 // if the following conditions are met: 1730 // 1731 // (1) src and dst must not be null. 1732 // (2) src_pos must not be negative. 1733 // (3) dst_pos must not be negative. 1734 // (4) length must not be negative. 1735 // (5) src klass and dst klass should be the same and not NULL. 1736 // (6) src and dst should be arrays. 1737 // (7) src_pos + length must not exceed length of src. 1738 // (8) dst_pos + length must not exceed length of dst. 1739 // 1740 1741 const Register src = rax; // source array oop 1742 const Register src_pos = rsi; 1743 const Register dst = rdx; // destination array oop 1744 const Register dst_pos = rdi; 1745 const Register length = rcx; // transfer count 1746 1747 // if (src == NULL) return -1; 1748 __ movptr(src, SRC); // src oop 1749 __ testptr(src, src); 1750 __ jccb(Assembler::zero, L_failed_0); 1751 1752 // if (src_pos < 0) return -1; 1753 __ movl2ptr(src_pos, SRC_POS); // src_pos 1754 __ testl(src_pos, src_pos); 1755 __ jccb(Assembler::negative, L_failed_0); 1756 1757 // if (dst == NULL) return -1; 1758 __ movptr(dst, DST); // dst oop 1759 __ testptr(dst, dst); 1760 __ jccb(Assembler::zero, L_failed_0); 1761 1762 // if (dst_pos < 0) return -1; 1763 __ movl2ptr(dst_pos, DST_POS); // dst_pos 1764 __ testl(dst_pos, dst_pos); 1765 __ jccb(Assembler::negative, L_failed_0); 1766 1767 // if (length < 0) return -1; 1768 __ movl2ptr(length, LENGTH); // length 1769 __ testl(length, length); 1770 __ jccb(Assembler::negative, L_failed_0); 1771 1772 // if (src->klass() == NULL) return -1; 1773 Address src_klass_addr(src, oopDesc::klass_offset_in_bytes()); 1774 Address dst_klass_addr(dst, oopDesc::klass_offset_in_bytes()); 1775 const Register rcx_src_klass = rcx; // array klass 1776 __ movptr(rcx_src_klass, Address(src, oopDesc::klass_offset_in_bytes())); 1777 1778 #ifdef ASSERT 1779 // assert(src->klass() != NULL); 1780 BLOCK_COMMENT("assert klasses not null"); 1781 { Label L1, L2; 1782 __ testptr(rcx_src_klass, rcx_src_klass); 1783 __ jccb(Assembler::notZero, L2); // it is broken if klass is NULL 1784 __ bind(L1); 1785 __ stop("broken null klass"); 1786 __ bind(L2); 1787 __ cmpptr(dst_klass_addr, (int32_t)NULL_WORD); 1788 __ jccb(Assembler::equal, L1); // this would be broken also 1789 BLOCK_COMMENT("assert done"); 1790 } 1791 #endif //ASSERT 1792 1793 // Load layout helper (32-bits) 1794 // 1795 // |array_tag| | header_size | element_type | |log2_element_size| 1796 // 32 30 24 16 8 2 0 1797 // 1798 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 1799 // 1800 1801 int lh_offset = in_bytes(Klass::layout_helper_offset()); 1802 Address src_klass_lh_addr(rcx_src_klass, lh_offset); 1803 1804 // Handle objArrays completely differently... 1805 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 1806 __ cmpl(src_klass_lh_addr, objArray_lh); 1807 __ jcc(Assembler::equal, L_objArray); 1808 1809 // if (src->klass() != dst->klass()) return -1; 1810 __ cmpptr(rcx_src_klass, dst_klass_addr); 1811 __ jccb(Assembler::notEqual, L_failed_0); 1812 1813 const Register rcx_lh = rcx; // layout helper 1814 assert(rcx_lh == rcx_src_klass, "known alias"); 1815 __ movl(rcx_lh, src_klass_lh_addr); 1816 1817 // if (!src->is_Array()) return -1; 1818 __ cmpl(rcx_lh, Klass::_lh_neutral_value); 1819 __ jcc(Assembler::greaterEqual, L_failed_0); // signed cmp 1820 1821 // At this point, it is known to be a typeArray (array_tag 0x3). 1822 #ifdef ASSERT 1823 { Label L; 1824 __ cmpl(rcx_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift)); 1825 __ jcc(Assembler::greaterEqual, L); // signed cmp 1826 __ stop("must be a primitive array"); 1827 __ bind(L); 1828 } 1829 #endif 1830 1831 assert_different_registers(src, src_pos, dst, dst_pos, rcx_lh); 1832 arraycopy_range_checks(src, src_pos, dst, dst_pos, LENGTH, L_failed); 1833 1834 // TypeArrayKlass 1835 // 1836 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize); 1837 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize); 1838 // 1839 const Register rsi_offset = rsi; // array offset 1840 const Register src_array = src; // src array offset 1841 const Register dst_array = dst; // dst array offset 1842 const Register rdi_elsize = rdi; // log2 element size 1843 1844 __ mov(rsi_offset, rcx_lh); 1845 __ shrptr(rsi_offset, Klass::_lh_header_size_shift); 1846 __ andptr(rsi_offset, Klass::_lh_header_size_mask); // array_offset 1847 __ addptr(src_array, rsi_offset); // src array offset 1848 __ addptr(dst_array, rsi_offset); // dst array offset 1849 __ andptr(rcx_lh, Klass::_lh_log2_element_size_mask); // log2 elsize 1850 1851 // next registers should be set before the jump to corresponding stub 1852 const Register from = src; // source array address 1853 const Register to = dst; // destination array address 1854 const Register count = rcx; // elements count 1855 // some of them should be duplicated on stack 1856 #define FROM Address(rsp, 12+ 4) 1857 #define TO Address(rsp, 12+ 8) // Not used now 1858 #define COUNT Address(rsp, 12+12) // Only for oop arraycopy 1859 1860 BLOCK_COMMENT("scale indexes to element size"); 1861 __ movl2ptr(rsi, SRC_POS); // src_pos 1862 __ shlptr(rsi); // src_pos << rcx (log2 elsize) 1863 assert(src_array == from, ""); 1864 __ addptr(from, rsi); // from = src_array + SRC_POS << log2 elsize 1865 __ movl2ptr(rdi, DST_POS); // dst_pos 1866 __ shlptr(rdi); // dst_pos << rcx (log2 elsize) 1867 assert(dst_array == to, ""); 1868 __ addptr(to, rdi); // to = dst_array + DST_POS << log2 elsize 1869 __ movptr(FROM, from); // src_addr 1870 __ mov(rdi_elsize, rcx_lh); // log2 elsize 1871 __ movl2ptr(count, LENGTH); // elements count 1872 1873 BLOCK_COMMENT("choose copy loop based on element size"); 1874 __ cmpl(rdi_elsize, 0); 1875 1876 __ jump_cc(Assembler::equal, RuntimeAddress(entry_jbyte_arraycopy)); 1877 __ cmpl(rdi_elsize, LogBytesPerShort); 1878 __ jump_cc(Assembler::equal, RuntimeAddress(entry_jshort_arraycopy)); 1879 __ cmpl(rdi_elsize, LogBytesPerInt); 1880 __ jump_cc(Assembler::equal, RuntimeAddress(entry_jint_arraycopy)); 1881 #ifdef ASSERT 1882 __ cmpl(rdi_elsize, LogBytesPerLong); 1883 __ jccb(Assembler::notEqual, L_failed); 1884 #endif 1885 __ pop(rdi); // Do pops here since jlong_arraycopy stub does not do it. 1886 __ pop(rsi); 1887 __ jump(RuntimeAddress(entry_jlong_arraycopy)); 1888 1889 __ BIND(L_failed); 1890 __ xorptr(rax, rax); 1891 __ notptr(rax); // return -1 1892 __ pop(rdi); 1893 __ pop(rsi); 1894 __ leave(); // required for proper stackwalking of RuntimeStub frame 1895 __ ret(0); 1896 1897 // ObjArrayKlass 1898 __ BIND(L_objArray); 1899 // live at this point: rcx_src_klass, src[_pos], dst[_pos] 1900 1901 Label L_plain_copy, L_checkcast_copy; 1902 // test array classes for subtyping 1903 __ cmpptr(rcx_src_klass, dst_klass_addr); // usual case is exact equality 1904 __ jccb(Assembler::notEqual, L_checkcast_copy); 1905 1906 // Identically typed arrays can be copied without element-wise checks. 1907 assert_different_registers(src, src_pos, dst, dst_pos, rcx_src_klass); 1908 arraycopy_range_checks(src, src_pos, dst, dst_pos, LENGTH, L_failed); 1909 1910 __ BIND(L_plain_copy); 1911 __ movl2ptr(count, LENGTH); // elements count 1912 __ movl2ptr(src_pos, SRC_POS); // reload src_pos 1913 __ lea(from, Address(src, src_pos, Address::times_ptr, 1914 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr 1915 __ movl2ptr(dst_pos, DST_POS); // reload dst_pos 1916 __ lea(to, Address(dst, dst_pos, Address::times_ptr, 1917 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr 1918 __ movptr(FROM, from); // src_addr 1919 __ movptr(TO, to); // dst_addr 1920 __ movl(COUNT, count); // count 1921 __ jump(RuntimeAddress(entry_oop_arraycopy)); 1922 1923 __ BIND(L_checkcast_copy); 1924 // live at this point: rcx_src_klass, dst[_pos], src[_pos] 1925 { 1926 // Handy offsets: 1927 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 1928 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 1929 1930 Register rsi_dst_klass = rsi; 1931 Register rdi_temp = rdi; 1932 assert(rsi_dst_klass == src_pos, "expected alias w/ src_pos"); 1933 assert(rdi_temp == dst_pos, "expected alias w/ dst_pos"); 1934 Address dst_klass_lh_addr(rsi_dst_klass, lh_offset); 1935 1936 // Before looking at dst.length, make sure dst is also an objArray. 1937 __ movptr(rsi_dst_klass, dst_klass_addr); 1938 __ cmpl(dst_klass_lh_addr, objArray_lh); 1939 __ jccb(Assembler::notEqual, L_failed); 1940 1941 // It is safe to examine both src.length and dst.length. 1942 __ movl2ptr(src_pos, SRC_POS); // reload rsi 1943 arraycopy_range_checks(src, src_pos, dst, dst_pos, LENGTH, L_failed); 1944 // (Now src_pos and dst_pos are killed, but not src and dst.) 1945 1946 // We'll need this temp (don't forget to pop it after the type check). 1947 __ push(rbx); 1948 Register rbx_src_klass = rbx; 1949 1950 __ mov(rbx_src_klass, rcx_src_klass); // spill away from rcx 1951 __ movptr(rsi_dst_klass, dst_klass_addr); 1952 Address super_check_offset_addr(rsi_dst_klass, sco_offset); 1953 Label L_fail_array_check; 1954 generate_type_check(rbx_src_klass, 1955 super_check_offset_addr, dst_klass_addr, 1956 rdi_temp, NULL, &L_fail_array_check); 1957 // (On fall-through, we have passed the array type check.) 1958 __ pop(rbx); 1959 __ jmp(L_plain_copy); 1960 1961 __ BIND(L_fail_array_check); 1962 // Reshuffle arguments so we can call checkcast_arraycopy: 1963 1964 // match initial saves for checkcast_arraycopy 1965 // push(rsi); // already done; see above 1966 // push(rdi); // already done; see above 1967 // push(rbx); // already done; see above 1968 1969 // Marshal outgoing arguments now, freeing registers. 1970 Address from_arg(rsp, 16+ 4); // from 1971 Address to_arg(rsp, 16+ 8); // to 1972 Address length_arg(rsp, 16+12); // elements count 1973 Address ckoff_arg(rsp, 16+16); // super_check_offset 1974 Address ckval_arg(rsp, 16+20); // super_klass 1975 1976 Address SRC_POS_arg(rsp, 16+ 8); 1977 Address DST_POS_arg(rsp, 16+16); 1978 Address LENGTH_arg(rsp, 16+20); 1979 // push rbx, changed the incoming offsets (why not just use rbp,??) 1980 // assert(SRC_POS_arg.disp() == SRC_POS.disp() + 4, ""); 1981 1982 __ movptr(rbx, Address(rsi_dst_klass, ek_offset)); 1983 __ movl2ptr(length, LENGTH_arg); // reload elements count 1984 __ movl2ptr(src_pos, SRC_POS_arg); // reload src_pos 1985 __ movl2ptr(dst_pos, DST_POS_arg); // reload dst_pos 1986 1987 __ movptr(ckval_arg, rbx); // destination element type 1988 __ movl(rbx, Address(rbx, sco_offset)); 1989 __ movl(ckoff_arg, rbx); // corresponding class check offset 1990 1991 __ movl(length_arg, length); // outgoing length argument 1992 1993 __ lea(from, Address(src, src_pos, Address::times_ptr, 1994 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 1995 __ movptr(from_arg, from); 1996 1997 __ lea(to, Address(dst, dst_pos, Address::times_ptr, 1998 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 1999 __ movptr(to_arg, to); 2000 __ jump(RuntimeAddress(entry_checkcast_arraycopy)); 2001 } 2002 2003 return start; 2004 } 2005 2006 void generate_arraycopy_stubs() { 2007 address entry; 2008 address entry_jbyte_arraycopy; 2009 address entry_jshort_arraycopy; 2010 address entry_jint_arraycopy; 2011 address entry_oop_arraycopy; 2012 address entry_jlong_arraycopy; 2013 address entry_checkcast_arraycopy; 2014 2015 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = 2016 generate_disjoint_copy(T_BYTE, true, Address::times_1, &entry, 2017 "arrayof_jbyte_disjoint_arraycopy"); 2018 StubRoutines::_arrayof_jbyte_arraycopy = 2019 generate_conjoint_copy(T_BYTE, true, Address::times_1, entry, 2020 NULL, "arrayof_jbyte_arraycopy"); 2021 StubRoutines::_jbyte_disjoint_arraycopy = 2022 generate_disjoint_copy(T_BYTE, false, Address::times_1, &entry, 2023 "jbyte_disjoint_arraycopy"); 2024 StubRoutines::_jbyte_arraycopy = 2025 generate_conjoint_copy(T_BYTE, false, Address::times_1, entry, 2026 &entry_jbyte_arraycopy, "jbyte_arraycopy"); 2027 2028 StubRoutines::_arrayof_jshort_disjoint_arraycopy = 2029 generate_disjoint_copy(T_SHORT, true, Address::times_2, &entry, 2030 "arrayof_jshort_disjoint_arraycopy"); 2031 StubRoutines::_arrayof_jshort_arraycopy = 2032 generate_conjoint_copy(T_SHORT, true, Address::times_2, entry, 2033 NULL, "arrayof_jshort_arraycopy"); 2034 StubRoutines::_jshort_disjoint_arraycopy = 2035 generate_disjoint_copy(T_SHORT, false, Address::times_2, &entry, 2036 "jshort_disjoint_arraycopy"); 2037 StubRoutines::_jshort_arraycopy = 2038 generate_conjoint_copy(T_SHORT, false, Address::times_2, entry, 2039 &entry_jshort_arraycopy, "jshort_arraycopy"); 2040 2041 // Next arrays are always aligned on 4 bytes at least. 2042 StubRoutines::_jint_disjoint_arraycopy = 2043 generate_disjoint_copy(T_INT, true, Address::times_4, &entry, 2044 "jint_disjoint_arraycopy"); 2045 StubRoutines::_jint_arraycopy = 2046 generate_conjoint_copy(T_INT, true, Address::times_4, entry, 2047 &entry_jint_arraycopy, "jint_arraycopy"); 2048 2049 StubRoutines::_oop_disjoint_arraycopy = 2050 generate_disjoint_copy(T_OBJECT, true, Address::times_ptr, &entry, 2051 "oop_disjoint_arraycopy"); 2052 StubRoutines::_oop_arraycopy = 2053 generate_conjoint_copy(T_OBJECT, true, Address::times_ptr, entry, 2054 &entry_oop_arraycopy, "oop_arraycopy"); 2055 2056 StubRoutines::_oop_disjoint_arraycopy_uninit = 2057 generate_disjoint_copy(T_OBJECT, true, Address::times_ptr, &entry, 2058 "oop_disjoint_arraycopy_uninit", 2059 /*dest_uninitialized*/true); 2060 StubRoutines::_oop_arraycopy_uninit = 2061 generate_conjoint_copy(T_OBJECT, true, Address::times_ptr, entry, 2062 NULL, "oop_arraycopy_uninit", 2063 /*dest_uninitialized*/true); 2064 2065 StubRoutines::_jlong_disjoint_arraycopy = 2066 generate_disjoint_long_copy(&entry, "jlong_disjoint_arraycopy"); 2067 StubRoutines::_jlong_arraycopy = 2068 generate_conjoint_long_copy(entry, &entry_jlong_arraycopy, 2069 "jlong_arraycopy"); 2070 2071 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); 2072 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); 2073 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); 2074 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); 2075 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); 2076 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); 2077 2078 StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy; 2079 StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy; 2080 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = StubRoutines::_oop_disjoint_arraycopy_uninit; 2081 StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy; 2082 2083 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; 2084 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; 2085 StubRoutines::_arrayof_oop_arraycopy_uninit = StubRoutines::_oop_arraycopy_uninit; 2086 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; 2087 2088 StubRoutines::_checkcast_arraycopy = 2089 generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); 2090 StubRoutines::_checkcast_arraycopy_uninit = 2091 generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, /*dest_uninitialized*/true); 2092 2093 StubRoutines::_unsafe_arraycopy = 2094 generate_unsafe_copy("unsafe_arraycopy", 2095 entry_jbyte_arraycopy, 2096 entry_jshort_arraycopy, 2097 entry_jint_arraycopy, 2098 entry_jlong_arraycopy); 2099 2100 StubRoutines::_generic_arraycopy = 2101 generate_generic_copy("generic_arraycopy", 2102 entry_jbyte_arraycopy, 2103 entry_jshort_arraycopy, 2104 entry_jint_arraycopy, 2105 entry_oop_arraycopy, 2106 entry_jlong_arraycopy, 2107 entry_checkcast_arraycopy); 2108 } 2109 2110 void generate_math_stubs() { 2111 { 2112 StubCodeMark mark(this, "StubRoutines", "log"); 2113 StubRoutines::_intrinsic_log = (double (*)(double)) __ pc(); 2114 2115 __ fld_d(Address(rsp, 4)); 2116 __ flog(); 2117 __ ret(0); 2118 } 2119 { 2120 StubCodeMark mark(this, "StubRoutines", "log10"); 2121 StubRoutines::_intrinsic_log10 = (double (*)(double)) __ pc(); 2122 2123 __ fld_d(Address(rsp, 4)); 2124 __ flog10(); 2125 __ ret(0); 2126 } 2127 { 2128 StubCodeMark mark(this, "StubRoutines", "sin"); 2129 StubRoutines::_intrinsic_sin = (double (*)(double)) __ pc(); 2130 2131 __ fld_d(Address(rsp, 4)); 2132 __ trigfunc('s'); 2133 __ ret(0); 2134 } 2135 { 2136 StubCodeMark mark(this, "StubRoutines", "cos"); 2137 StubRoutines::_intrinsic_cos = (double (*)(double)) __ pc(); 2138 2139 __ fld_d(Address(rsp, 4)); 2140 __ trigfunc('c'); 2141 __ ret(0); 2142 } 2143 { 2144 StubCodeMark mark(this, "StubRoutines", "tan"); 2145 StubRoutines::_intrinsic_tan = (double (*)(double)) __ pc(); 2146 2147 __ fld_d(Address(rsp, 4)); 2148 __ trigfunc('t'); 2149 __ ret(0); 2150 } 2151 { 2152 StubCodeMark mark(this, "StubRoutines", "exp"); 2153 StubRoutines::_intrinsic_exp = (double (*)(double)) __ pc(); 2154 2155 __ fld_d(Address(rsp, 4)); 2156 __ exp_with_fallback(0); 2157 __ ret(0); 2158 } 2159 { 2160 StubCodeMark mark(this, "StubRoutines", "pow"); 2161 StubRoutines::_intrinsic_pow = (double (*)(double,double)) __ pc(); 2162 2163 __ fld_d(Address(rsp, 12)); 2164 __ fld_d(Address(rsp, 4)); 2165 __ pow_with_fallback(0); 2166 __ ret(0); 2167 } 2168 } 2169 2170 // AES intrinsic stubs 2171 enum {AESBlockSize = 16}; 2172 2173 address generate_key_shuffle_mask() { 2174 __ align(16); 2175 StubCodeMark mark(this, "StubRoutines", "key_shuffle_mask"); 2176 address start = __ pc(); 2177 __ emit_data(0x00010203, relocInfo::none, 0 ); 2178 __ emit_data(0x04050607, relocInfo::none, 0 ); 2179 __ emit_data(0x08090a0b, relocInfo::none, 0 ); 2180 __ emit_data(0x0c0d0e0f, relocInfo::none, 0 ); 2181 return start; 2182 } 2183 2184 // Utility routine for loading a 128-bit key word in little endian format 2185 // can optionally specify that the shuffle mask is already in an xmmregister 2186 void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) { 2187 __ movdqu(xmmdst, Address(key, offset)); 2188 if (xmm_shuf_mask != NULL) { 2189 __ pshufb(xmmdst, xmm_shuf_mask); 2190 } else { 2191 __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 2192 } 2193 } 2194 2195 // aesenc using specified key+offset 2196 // can optionally specify that the shuffle mask is already in an xmmregister 2197 void aes_enc_key(XMMRegister xmmdst, XMMRegister xmmtmp, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) { 2198 load_key(xmmtmp, key, offset, xmm_shuf_mask); 2199 __ aesenc(xmmdst, xmmtmp); 2200 } 2201 2202 // aesdec using specified key+offset 2203 // can optionally specify that the shuffle mask is already in an xmmregister 2204 void aes_dec_key(XMMRegister xmmdst, XMMRegister xmmtmp, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) { 2205 load_key(xmmtmp, key, offset, xmm_shuf_mask); 2206 __ aesdec(xmmdst, xmmtmp); 2207 } 2208 2209 2210 // Arguments: 2211 // 2212 // Inputs: 2213 // c_rarg0 - source byte array address 2214 // c_rarg1 - destination byte array address 2215 // c_rarg2 - K (key) in little endian int array 2216 // 2217 address generate_aescrypt_encryptBlock() { 2218 assert(UseAES, "need AES instructions and misaligned SSE support"); 2219 __ align(CodeEntryAlignment); 2220 StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock"); 2221 Label L_doLast; 2222 address start = __ pc(); 2223 2224 const Register from = rdx; // source array address 2225 const Register to = rdx; // destination array address 2226 const Register key = rcx; // key array address 2227 const Register keylen = rax; 2228 const Address from_param(rbp, 8+0); 2229 const Address to_param (rbp, 8+4); 2230 const Address key_param (rbp, 8+8); 2231 2232 const XMMRegister xmm_result = xmm0; 2233 const XMMRegister xmm_key_shuf_mask = xmm1; 2234 const XMMRegister xmm_temp1 = xmm2; 2235 const XMMRegister xmm_temp2 = xmm3; 2236 const XMMRegister xmm_temp3 = xmm4; 2237 const XMMRegister xmm_temp4 = xmm5; 2238 2239 __ enter(); // required for proper stackwalking of RuntimeStub frame 2240 __ movptr(from, from_param); 2241 __ movptr(key, key_param); 2242 2243 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 2244 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 2245 2246 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 2247 __ movdqu(xmm_result, Address(from, 0)); // get 16 bytes of input 2248 __ movptr(to, to_param); 2249 2250 // For encryption, the java expanded key ordering is just what we need 2251 2252 load_key(xmm_temp1, key, 0x00, xmm_key_shuf_mask); 2253 __ pxor(xmm_result, xmm_temp1); 2254 2255 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 2256 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 2257 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 2258 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 2259 2260 __ aesenc(xmm_result, xmm_temp1); 2261 __ aesenc(xmm_result, xmm_temp2); 2262 __ aesenc(xmm_result, xmm_temp3); 2263 __ aesenc(xmm_result, xmm_temp4); 2264 2265 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 2266 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 2267 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 2268 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 2269 2270 __ aesenc(xmm_result, xmm_temp1); 2271 __ aesenc(xmm_result, xmm_temp2); 2272 __ aesenc(xmm_result, xmm_temp3); 2273 __ aesenc(xmm_result, xmm_temp4); 2274 2275 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 2276 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 2277 2278 __ cmpl(keylen, 44); 2279 __ jccb(Assembler::equal, L_doLast); 2280 2281 __ aesenc(xmm_result, xmm_temp1); 2282 __ aesenc(xmm_result, xmm_temp2); 2283 2284 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 2285 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 2286 2287 __ cmpl(keylen, 52); 2288 __ jccb(Assembler::equal, L_doLast); 2289 2290 __ aesenc(xmm_result, xmm_temp1); 2291 __ aesenc(xmm_result, xmm_temp2); 2292 2293 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 2294 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 2295 2296 __ BIND(L_doLast); 2297 __ aesenc(xmm_result, xmm_temp1); 2298 __ aesenclast(xmm_result, xmm_temp2); 2299 __ movdqu(Address(to, 0), xmm_result); // store the result 2300 __ xorptr(rax, rax); // return 0 2301 __ leave(); // required for proper stackwalking of RuntimeStub frame 2302 __ ret(0); 2303 2304 return start; 2305 } 2306 2307 2308 // Arguments: 2309 // 2310 // Inputs: 2311 // c_rarg0 - source byte array address 2312 // c_rarg1 - destination byte array address 2313 // c_rarg2 - K (key) in little endian int array 2314 // 2315 address generate_aescrypt_decryptBlock() { 2316 assert(UseAES, "need AES instructions and misaligned SSE support"); 2317 __ align(CodeEntryAlignment); 2318 StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock"); 2319 Label L_doLast; 2320 address start = __ pc(); 2321 2322 const Register from = rdx; // source array address 2323 const Register to = rdx; // destination array address 2324 const Register key = rcx; // key array address 2325 const Register keylen = rax; 2326 const Address from_param(rbp, 8+0); 2327 const Address to_param (rbp, 8+4); 2328 const Address key_param (rbp, 8+8); 2329 2330 const XMMRegister xmm_result = xmm0; 2331 const XMMRegister xmm_key_shuf_mask = xmm1; 2332 const XMMRegister xmm_temp1 = xmm2; 2333 const XMMRegister xmm_temp2 = xmm3; 2334 const XMMRegister xmm_temp3 = xmm4; 2335 const XMMRegister xmm_temp4 = xmm5; 2336 2337 __ enter(); // required for proper stackwalking of RuntimeStub frame 2338 __ movptr(from, from_param); 2339 __ movptr(key, key_param); 2340 2341 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 2342 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 2343 2344 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 2345 __ movdqu(xmm_result, Address(from, 0)); 2346 __ movptr(to, to_param); 2347 2348 // for decryption java expanded key ordering is rotated one position from what we want 2349 // so we start from 0x10 here and hit 0x00 last 2350 // we don't know if the key is aligned, hence not using load-execute form 2351 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 2352 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 2353 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 2354 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 2355 2356 __ pxor (xmm_result, xmm_temp1); 2357 __ aesdec(xmm_result, xmm_temp2); 2358 __ aesdec(xmm_result, xmm_temp3); 2359 __ aesdec(xmm_result, xmm_temp4); 2360 2361 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 2362 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 2363 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 2364 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 2365 2366 __ aesdec(xmm_result, xmm_temp1); 2367 __ aesdec(xmm_result, xmm_temp2); 2368 __ aesdec(xmm_result, xmm_temp3); 2369 __ aesdec(xmm_result, xmm_temp4); 2370 2371 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 2372 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 2373 load_key(xmm_temp3, key, 0x00, xmm_key_shuf_mask); 2374 2375 __ cmpl(keylen, 44); 2376 __ jccb(Assembler::equal, L_doLast); 2377 2378 __ aesdec(xmm_result, xmm_temp1); 2379 __ aesdec(xmm_result, xmm_temp2); 2380 2381 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 2382 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 2383 2384 __ cmpl(keylen, 52); 2385 __ jccb(Assembler::equal, L_doLast); 2386 2387 __ aesdec(xmm_result, xmm_temp1); 2388 __ aesdec(xmm_result, xmm_temp2); 2389 2390 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 2391 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 2392 2393 __ BIND(L_doLast); 2394 __ aesdec(xmm_result, xmm_temp1); 2395 __ aesdec(xmm_result, xmm_temp2); 2396 2397 // for decryption the aesdeclast operation is always on key+0x00 2398 __ aesdeclast(xmm_result, xmm_temp3); 2399 __ movdqu(Address(to, 0), xmm_result); // store the result 2400 __ xorptr(rax, rax); // return 0 2401 __ leave(); // required for proper stackwalking of RuntimeStub frame 2402 __ ret(0); 2403 2404 return start; 2405 } 2406 2407 void handleSOERegisters(bool saving) { 2408 const int saveFrameSizeInBytes = 4 * wordSize; 2409 const Address saved_rbx (rbp, -3 * wordSize); 2410 const Address saved_rsi (rbp, -2 * wordSize); 2411 const Address saved_rdi (rbp, -1 * wordSize); 2412 2413 if (saving) { 2414 __ subptr(rsp, saveFrameSizeInBytes); 2415 __ movptr(saved_rsi, rsi); 2416 __ movptr(saved_rdi, rdi); 2417 __ movptr(saved_rbx, rbx); 2418 } else { 2419 // restoring 2420 __ movptr(rsi, saved_rsi); 2421 __ movptr(rdi, saved_rdi); 2422 __ movptr(rbx, saved_rbx); 2423 } 2424 } 2425 2426 // Arguments: 2427 // 2428 // Inputs: 2429 // c_rarg0 - source byte array address 2430 // c_rarg1 - destination byte array address 2431 // c_rarg2 - K (key) in little endian int array 2432 // c_rarg3 - r vector byte array address 2433 // c_rarg4 - input length 2434 // 2435 // Output: 2436 // rax - input length 2437 // 2438 address generate_cipherBlockChaining_encryptAESCrypt() { 2439 assert(UseAES, "need AES instructions and misaligned SSE support"); 2440 __ align(CodeEntryAlignment); 2441 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt"); 2442 address start = __ pc(); 2443 2444 Label L_exit, L_key_192_256, L_key_256, L_loopTop_128, L_loopTop_192, L_loopTop_256; 2445 const Register from = rsi; // source array address 2446 const Register to = rdx; // destination array address 2447 const Register key = rcx; // key array address 2448 const Register rvec = rdi; // r byte array initialized from initvector array address 2449 // and left with the results of the last encryption block 2450 const Register len_reg = rbx; // src len (must be multiple of blocksize 16) 2451 const Register pos = rax; 2452 2453 // xmm register assignments for the loops below 2454 const XMMRegister xmm_result = xmm0; 2455 const XMMRegister xmm_temp = xmm1; 2456 // first 6 keys preloaded into xmm2-xmm7 2457 const int XMM_REG_NUM_KEY_FIRST = 2; 2458 const int XMM_REG_NUM_KEY_LAST = 7; 2459 const XMMRegister xmm_key0 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 2460 2461 __ enter(); // required for proper stackwalking of RuntimeStub frame 2462 handleSOERegisters(true /*saving*/); 2463 2464 // load registers from incoming parameters 2465 const Address from_param(rbp, 8+0); 2466 const Address to_param (rbp, 8+4); 2467 const Address key_param (rbp, 8+8); 2468 const Address rvec_param (rbp, 8+12); 2469 const Address len_param (rbp, 8+16); 2470 __ movptr(from , from_param); 2471 __ movptr(to , to_param); 2472 __ movptr(key , key_param); 2473 __ movptr(rvec , rvec_param); 2474 __ movptr(len_reg , len_param); 2475 2476 const XMMRegister xmm_key_shuf_mask = xmm_temp; // used temporarily to swap key bytes up front 2477 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 2478 // load up xmm regs 2 thru 7 with keys 0-5 2479 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x00; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) { 2480 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 2481 offset += 0x10; 2482 } 2483 2484 __ movdqu(xmm_result, Address(rvec, 0x00)); // initialize xmm_result with r vec 2485 2486 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 2487 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 2488 __ cmpl(rax, 44); 2489 __ jcc(Assembler::notEqual, L_key_192_256); 2490 2491 // 128 bit code follows here 2492 __ movl(pos, 0); 2493 __ align(OptoLoopAlignment); 2494 __ BIND(L_loopTop_128); 2495 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 2496 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 2497 2498 __ pxor (xmm_result, xmm_key0); // do the aes rounds 2499 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) { 2500 __ aesenc(xmm_result, as_XMMRegister(rnum)); 2501 } 2502 for (int key_offset = 0x60; key_offset <= 0x90; key_offset += 0x10) { 2503 aes_enc_key(xmm_result, xmm_temp, key, key_offset); 2504 } 2505 load_key(xmm_temp, key, 0xa0); 2506 __ aesenclast(xmm_result, xmm_temp); 2507 2508 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 2509 // no need to store r to memory until we exit 2510 __ addptr(pos, AESBlockSize); 2511 __ subptr(len_reg, AESBlockSize); 2512 __ jcc(Assembler::notEqual, L_loopTop_128); 2513 2514 __ BIND(L_exit); 2515 __ movdqu(Address(rvec, 0), xmm_result); // final value of r stored in rvec of CipherBlockChaining object 2516 2517 handleSOERegisters(false /*restoring*/); 2518 __ movptr(rax, len_param); // return length 2519 __ leave(); // required for proper stackwalking of RuntimeStub frame 2520 __ ret(0); 2521 2522 __ BIND(L_key_192_256); 2523 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256) 2524 __ cmpl(rax, 52); 2525 __ jcc(Assembler::notEqual, L_key_256); 2526 2527 // 192-bit code follows here (could be changed to use more xmm registers) 2528 __ movl(pos, 0); 2529 __ align(OptoLoopAlignment); 2530 __ BIND(L_loopTop_192); 2531 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 2532 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 2533 2534 __ pxor (xmm_result, xmm_key0); // do the aes rounds 2535 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) { 2536 __ aesenc(xmm_result, as_XMMRegister(rnum)); 2537 } 2538 for (int key_offset = 0x60; key_offset <= 0xb0; key_offset += 0x10) { 2539 aes_enc_key(xmm_result, xmm_temp, key, key_offset); 2540 } 2541 load_key(xmm_temp, key, 0xc0); 2542 __ aesenclast(xmm_result, xmm_temp); 2543 2544 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 2545 // no need to store r to memory until we exit 2546 __ addptr(pos, AESBlockSize); 2547 __ subptr(len_reg, AESBlockSize); 2548 __ jcc(Assembler::notEqual, L_loopTop_192); 2549 __ jmp(L_exit); 2550 2551 __ BIND(L_key_256); 2552 // 256-bit code follows here (could be changed to use more xmm registers) 2553 __ movl(pos, 0); 2554 __ align(OptoLoopAlignment); 2555 __ BIND(L_loopTop_256); 2556 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 2557 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 2558 2559 __ pxor (xmm_result, xmm_key0); // do the aes rounds 2560 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) { 2561 __ aesenc(xmm_result, as_XMMRegister(rnum)); 2562 } 2563 for (int key_offset = 0x60; key_offset <= 0xd0; key_offset += 0x10) { 2564 aes_enc_key(xmm_result, xmm_temp, key, key_offset); 2565 } 2566 load_key(xmm_temp, key, 0xe0); 2567 __ aesenclast(xmm_result, xmm_temp); 2568 2569 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 2570 // no need to store r to memory until we exit 2571 __ addptr(pos, AESBlockSize); 2572 __ subptr(len_reg, AESBlockSize); 2573 __ jcc(Assembler::notEqual, L_loopTop_256); 2574 __ jmp(L_exit); 2575 2576 return start; 2577 } 2578 2579 2580 // CBC AES Decryption. 2581 // In 32-bit stub, because of lack of registers we do not try to parallelize 4 blocks at a time. 2582 // 2583 // Arguments: 2584 // 2585 // Inputs: 2586 // c_rarg0 - source byte array address 2587 // c_rarg1 - destination byte array address 2588 // c_rarg2 - K (key) in little endian int array 2589 // c_rarg3 - r vector byte array address 2590 // c_rarg4 - input length 2591 // 2592 // Output: 2593 // rax - input length 2594 // 2595 2596 address generate_cipherBlockChaining_decryptAESCrypt() { 2597 assert(UseAES, "need AES instructions and misaligned SSE support"); 2598 __ align(CodeEntryAlignment); 2599 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); 2600 address start = __ pc(); 2601 2602 Label L_exit, L_key_192_256, L_key_256; 2603 Label L_singleBlock_loopTop_128; 2604 Label L_singleBlock_loopTop_192, L_singleBlock_loopTop_256; 2605 const Register from = rsi; // source array address 2606 const Register to = rdx; // destination array address 2607 const Register key = rcx; // key array address 2608 const Register rvec = rdi; // r byte array initialized from initvector array address 2609 // and left with the results of the last encryption block 2610 const Register len_reg = rbx; // src len (must be multiple of blocksize 16) 2611 const Register pos = rax; 2612 2613 // xmm register assignments for the loops below 2614 const XMMRegister xmm_result = xmm0; 2615 const XMMRegister xmm_temp = xmm1; 2616 // first 6 keys preloaded into xmm2-xmm7 2617 const int XMM_REG_NUM_KEY_FIRST = 2; 2618 const int XMM_REG_NUM_KEY_LAST = 7; 2619 const int FIRST_NON_REG_KEY_offset = 0x70; 2620 const XMMRegister xmm_key_first = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 2621 2622 __ enter(); // required for proper stackwalking of RuntimeStub frame 2623 handleSOERegisters(true /*saving*/); 2624 2625 // load registers from incoming parameters 2626 const Address from_param(rbp, 8+0); 2627 const Address to_param (rbp, 8+4); 2628 const Address key_param (rbp, 8+8); 2629 const Address rvec_param (rbp, 8+12); 2630 const Address len_param (rbp, 8+16); 2631 __ movptr(from , from_param); 2632 __ movptr(to , to_param); 2633 __ movptr(key , key_param); 2634 __ movptr(rvec , rvec_param); 2635 __ movptr(len_reg , len_param); 2636 2637 // the java expanded key ordering is rotated one position from what we want 2638 // so we start from 0x10 here and hit 0x00 last 2639 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 2640 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 2641 // load up xmm regs 2 thru 6 with first 5 keys 2642 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x10; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) { 2643 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 2644 offset += 0x10; 2645 } 2646 2647 // inside here, use the rvec register to point to previous block cipher 2648 // with which we xor at the end of each newly decrypted block 2649 const Register prev_block_cipher_ptr = rvec; 2650 2651 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 2652 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 2653 __ cmpl(rax, 44); 2654 __ jcc(Assembler::notEqual, L_key_192_256); 2655 2656 2657 // 128-bit code follows here, parallelized 2658 __ movl(pos, 0); 2659 __ align(OptoLoopAlignment); 2660 __ BIND(L_singleBlock_loopTop_128); 2661 __ cmpptr(len_reg, 0); // any blocks left?? 2662 __ jcc(Assembler::equal, L_exit); 2663 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 2664 __ pxor (xmm_result, xmm_key_first); // do the aes dec rounds 2665 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) { 2666 __ aesdec(xmm_result, as_XMMRegister(rnum)); 2667 } 2668 for (int key_offset = FIRST_NON_REG_KEY_offset; key_offset <= 0xa0; key_offset += 0x10) { // 128-bit runs up to key offset a0 2669 aes_dec_key(xmm_result, xmm_temp, key, key_offset); 2670 } 2671 load_key(xmm_temp, key, 0x00); // final key is stored in java expanded array at offset 0 2672 __ aesdeclast(xmm_result, xmm_temp); 2673 __ movdqu(xmm_temp, Address(prev_block_cipher_ptr, 0x00)); 2674 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 2675 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 2676 // no need to store r to memory until we exit 2677 __ lea(prev_block_cipher_ptr, Address(from, pos, Address::times_1, 0)); // set up new ptr 2678 __ addptr(pos, AESBlockSize); 2679 __ subptr(len_reg, AESBlockSize); 2680 __ jmp(L_singleBlock_loopTop_128); 2681 2682 2683 __ BIND(L_exit); 2684 __ movdqu(xmm_temp, Address(prev_block_cipher_ptr, 0x00)); 2685 __ movptr(rvec , rvec_param); // restore this since used in loop 2686 __ movdqu(Address(rvec, 0), xmm_temp); // final value of r stored in rvec of CipherBlockChaining object 2687 handleSOERegisters(false /*restoring*/); 2688 __ movptr(rax, len_param); // return length 2689 __ leave(); // required for proper stackwalking of RuntimeStub frame 2690 __ ret(0); 2691 2692 2693 __ BIND(L_key_192_256); 2694 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256) 2695 __ cmpl(rax, 52); 2696 __ jcc(Assembler::notEqual, L_key_256); 2697 2698 // 192-bit code follows here (could be optimized to use parallelism) 2699 __ movl(pos, 0); 2700 __ align(OptoLoopAlignment); 2701 __ BIND(L_singleBlock_loopTop_192); 2702 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 2703 __ pxor (xmm_result, xmm_key_first); // do the aes dec rounds 2704 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) { 2705 __ aesdec(xmm_result, as_XMMRegister(rnum)); 2706 } 2707 for (int key_offset = FIRST_NON_REG_KEY_offset; key_offset <= 0xc0; key_offset += 0x10) { // 192-bit runs up to key offset c0 2708 aes_dec_key(xmm_result, xmm_temp, key, key_offset); 2709 } 2710 load_key(xmm_temp, key, 0x00); // final key is stored in java expanded array at offset 0 2711 __ aesdeclast(xmm_result, xmm_temp); 2712 __ movdqu(xmm_temp, Address(prev_block_cipher_ptr, 0x00)); 2713 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 2714 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 2715 // no need to store r to memory until we exit 2716 __ lea(prev_block_cipher_ptr, Address(from, pos, Address::times_1, 0)); // set up new ptr 2717 __ addptr(pos, AESBlockSize); 2718 __ subptr(len_reg, AESBlockSize); 2719 __ jcc(Assembler::notEqual,L_singleBlock_loopTop_192); 2720 __ jmp(L_exit); 2721 2722 __ BIND(L_key_256); 2723 // 256-bit code follows here (could be optimized to use parallelism) 2724 __ movl(pos, 0); 2725 __ align(OptoLoopAlignment); 2726 __ BIND(L_singleBlock_loopTop_256); 2727 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 2728 __ pxor (xmm_result, xmm_key_first); // do the aes dec rounds 2729 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) { 2730 __ aesdec(xmm_result, as_XMMRegister(rnum)); 2731 } 2732 for (int key_offset = FIRST_NON_REG_KEY_offset; key_offset <= 0xe0; key_offset += 0x10) { // 256-bit runs up to key offset e0 2733 aes_dec_key(xmm_result, xmm_temp, key, key_offset); 2734 } 2735 load_key(xmm_temp, key, 0x00); // final key is stored in java expanded array at offset 0 2736 __ aesdeclast(xmm_result, xmm_temp); 2737 __ movdqu(xmm_temp, Address(prev_block_cipher_ptr, 0x00)); 2738 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 2739 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 2740 // no need to store r to memory until we exit 2741 __ lea(prev_block_cipher_ptr, Address(from, pos, Address::times_1, 0)); // set up new ptr 2742 __ addptr(pos, AESBlockSize); 2743 __ subptr(len_reg, AESBlockSize); 2744 __ jcc(Assembler::notEqual,L_singleBlock_loopTop_256); 2745 __ jmp(L_exit); 2746 2747 return start; 2748 } 2749 2750 // byte swap x86 long 2751 address generate_ghash_long_swap_mask() { 2752 __ align(CodeEntryAlignment); 2753 StubCodeMark mark(this, "StubRoutines", "ghash_long_swap_mask"); 2754 address start = __ pc(); 2755 __ emit_data(0x0b0a0908, relocInfo::none, 0); 2756 __ emit_data(0x0f0e0d0c, relocInfo::none, 0); 2757 __ emit_data(0x03020100, relocInfo::none, 0); 2758 __ emit_data(0x07060504, relocInfo::none, 0); 2759 2760 return start; 2761 } 2762 2763 // byte swap x86 byte array 2764 address generate_ghash_byte_swap_mask() { 2765 __ align(CodeEntryAlignment); 2766 StubCodeMark mark(this, "StubRoutines", "ghash_byte_swap_mask"); 2767 address start = __ pc(); 2768 __ emit_data(0x0c0d0e0f, relocInfo::none, 0); 2769 __ emit_data(0x08090a0b, relocInfo::none, 0); 2770 __ emit_data(0x04050607, relocInfo::none, 0); 2771 __ emit_data(0x00010203, relocInfo::none, 0); 2772 return start; 2773 } 2774 2775 /* Single and multi-block ghash operations */ 2776 address generate_ghash_processBlocks() { 2777 assert(UseGHASHIntrinsics, "need GHASH intrinsics and CLMUL support"); 2778 __ align(CodeEntryAlignment); 2779 Label L_ghash_loop, L_exit; 2780 StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); 2781 address start = __ pc(); 2782 2783 const Register state = rdi; 2784 const Register subkeyH = rsi; 2785 const Register data = rdx; 2786 const Register blocks = rcx; 2787 2788 const Address state_param(rbp, 8+0); 2789 const Address subkeyH_param(rbp, 8+4); 2790 const Address data_param(rbp, 8+8); 2791 const Address blocks_param(rbp, 8+12); 2792 2793 const XMMRegister xmm_temp0 = xmm0; 2794 const XMMRegister xmm_temp1 = xmm1; 2795 const XMMRegister xmm_temp2 = xmm2; 2796 const XMMRegister xmm_temp3 = xmm3; 2797 const XMMRegister xmm_temp4 = xmm4; 2798 const XMMRegister xmm_temp5 = xmm5; 2799 const XMMRegister xmm_temp6 = xmm6; 2800 const XMMRegister xmm_temp7 = xmm7; 2801 2802 __ enter(); 2803 handleSOERegisters(true); // Save registers 2804 2805 __ movptr(state, state_param); 2806 __ movptr(subkeyH, subkeyH_param); 2807 __ movptr(data, data_param); 2808 __ movptr(blocks, blocks_param); 2809 2810 __ movdqu(xmm_temp0, Address(state, 0)); 2811 __ pshufb(xmm_temp0, ExternalAddress(StubRoutines::x86::ghash_long_swap_mask_addr())); 2812 2813 __ movdqu(xmm_temp1, Address(subkeyH, 0)); 2814 __ pshufb(xmm_temp1, ExternalAddress(StubRoutines::x86::ghash_long_swap_mask_addr())); 2815 2816 __ BIND(L_ghash_loop); 2817 __ movdqu(xmm_temp2, Address(data, 0)); 2818 __ pshufb(xmm_temp2, ExternalAddress(StubRoutines::x86::ghash_byte_swap_mask_addr())); 2819 2820 __ pxor(xmm_temp0, xmm_temp2); 2821 2822 // 2823 // Multiply with the hash key 2824 // 2825 __ movdqu(xmm_temp3, xmm_temp0); 2826 __ pclmulqdq(xmm_temp3, xmm_temp1, 0); // xmm3 holds a0*b0 2827 __ movdqu(xmm_temp4, xmm_temp0); 2828 __ pclmulqdq(xmm_temp4, xmm_temp1, 16); // xmm4 holds a0*b1 2829 2830 __ movdqu(xmm_temp5, xmm_temp0); 2831 __ pclmulqdq(xmm_temp5, xmm_temp1, 1); // xmm5 holds a1*b0 2832 __ movdqu(xmm_temp6, xmm_temp0); 2833 __ pclmulqdq(xmm_temp6, xmm_temp1, 17); // xmm6 holds a1*b1 2834 2835 __ pxor(xmm_temp4, xmm_temp5); // xmm4 holds a0*b1 + a1*b0 2836 2837 __ movdqu(xmm_temp5, xmm_temp4); // move the contents of xmm4 to xmm5 2838 __ psrldq(xmm_temp4, 8); // shift by xmm4 64 bits to the right 2839 __ pslldq(xmm_temp5, 8); // shift by xmm5 64 bits to the left 2840 __ pxor(xmm_temp3, xmm_temp5); 2841 __ pxor(xmm_temp6, xmm_temp4); // Register pair <xmm6:xmm3> holds the result 2842 // of the carry-less multiplication of 2843 // xmm0 by xmm1. 2844 2845 // We shift the result of the multiplication by one bit position 2846 // to the left to cope for the fact that the bits are reversed. 2847 __ movdqu(xmm_temp7, xmm_temp3); 2848 __ movdqu(xmm_temp4, xmm_temp6); 2849 __ pslld (xmm_temp3, 1); 2850 __ pslld(xmm_temp6, 1); 2851 __ psrld(xmm_temp7, 31); 2852 __ psrld(xmm_temp4, 31); 2853 __ movdqu(xmm_temp5, xmm_temp7); 2854 __ pslldq(xmm_temp4, 4); 2855 __ pslldq(xmm_temp7, 4); 2856 __ psrldq(xmm_temp5, 12); 2857 __ por(xmm_temp3, xmm_temp7); 2858 __ por(xmm_temp6, xmm_temp4); 2859 __ por(xmm_temp6, xmm_temp5); 2860 2861 // 2862 // First phase of the reduction 2863 // 2864 // Move xmm3 into xmm4, xmm5, xmm7 in order to perform the shifts 2865 // independently. 2866 __ movdqu(xmm_temp7, xmm_temp3); 2867 __ movdqu(xmm_temp4, xmm_temp3); 2868 __ movdqu(xmm_temp5, xmm_temp3); 2869 __ pslld(xmm_temp7, 31); // packed right shift shifting << 31 2870 __ pslld(xmm_temp4, 30); // packed right shift shifting << 30 2871 __ pslld(xmm_temp5, 25); // packed right shift shifting << 25 2872 __ pxor(xmm_temp7, xmm_temp4); // xor the shifted versions 2873 __ pxor(xmm_temp7, xmm_temp5); 2874 __ movdqu(xmm_temp4, xmm_temp7); 2875 __ pslldq(xmm_temp7, 12); 2876 __ psrldq(xmm_temp4, 4); 2877 __ pxor(xmm_temp3, xmm_temp7); // first phase of the reduction complete 2878 2879 // 2880 // Second phase of the reduction 2881 // 2882 // Make 3 copies of xmm3 in xmm2, xmm5, xmm7 for doing these 2883 // shift operations. 2884 __ movdqu(xmm_temp2, xmm_temp3); 2885 __ movdqu(xmm_temp7, xmm_temp3); 2886 __ movdqu(xmm_temp5, xmm_temp3); 2887 __ psrld(xmm_temp2, 1); // packed left shifting >> 1 2888 __ psrld(xmm_temp7, 2); // packed left shifting >> 2 2889 __ psrld(xmm_temp5, 7); // packed left shifting >> 7 2890 __ pxor(xmm_temp2, xmm_temp7); // xor the shifted versions 2891 __ pxor(xmm_temp2, xmm_temp5); 2892 __ pxor(xmm_temp2, xmm_temp4); 2893 __ pxor(xmm_temp3, xmm_temp2); 2894 __ pxor(xmm_temp6, xmm_temp3); // the result is in xmm6 2895 2896 __ decrement(blocks); 2897 __ jcc(Assembler::zero, L_exit); 2898 __ movdqu(xmm_temp0, xmm_temp6); 2899 __ addptr(data, 16); 2900 __ jmp(L_ghash_loop); 2901 2902 __ BIND(L_exit); 2903 // Byte swap 16-byte result 2904 __ pshufb(xmm_temp6, ExternalAddress(StubRoutines::x86::ghash_long_swap_mask_addr())); 2905 __ movdqu(Address(state, 0), xmm_temp6); // store the result 2906 2907 handleSOERegisters(false); // restore registers 2908 __ leave(); 2909 __ ret(0); 2910 return start; 2911 } 2912 2913 /** 2914 * Arguments: 2915 * 2916 * Inputs: 2917 * rsp(4) - int crc 2918 * rsp(8) - byte* buf 2919 * rsp(12) - int length 2920 * 2921 * Ouput: 2922 * rax - int crc result 2923 */ 2924 address generate_updateBytesCRC32() { 2925 assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions"); 2926 2927 __ align(CodeEntryAlignment); 2928 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32"); 2929 2930 address start = __ pc(); 2931 2932 const Register crc = rdx; // crc 2933 const Register buf = rsi; // source java byte array address 2934 const Register len = rcx; // length 2935 const Register table = rdi; // crc_table address (reuse register) 2936 const Register tmp = rbx; 2937 assert_different_registers(crc, buf, len, table, tmp, rax); 2938 2939 BLOCK_COMMENT("Entry:"); 2940 __ enter(); // required for proper stackwalking of RuntimeStub frame 2941 __ push(rsi); 2942 __ push(rdi); 2943 __ push(rbx); 2944 2945 Address crc_arg(rbp, 8 + 0); 2946 Address buf_arg(rbp, 8 + 4); 2947 Address len_arg(rbp, 8 + 8); 2948 2949 // Load up: 2950 __ movl(crc, crc_arg); 2951 __ movptr(buf, buf_arg); 2952 __ movl(len, len_arg); 2953 2954 __ kernel_crc32(crc, buf, len, table, tmp); 2955 2956 __ movl(rax, crc); 2957 __ pop(rbx); 2958 __ pop(rdi); 2959 __ pop(rsi); 2960 __ leave(); // required for proper stackwalking of RuntimeStub frame 2961 __ ret(0); 2962 2963 return start; 2964 } 2965 2966 // Safefetch stubs. 2967 void generate_safefetch(const char* name, int size, address* entry, 2968 address* fault_pc, address* continuation_pc) { 2969 // safefetch signatures: 2970 // int SafeFetch32(int* adr, int errValue); 2971 // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); 2972 2973 StubCodeMark mark(this, "StubRoutines", name); 2974 2975 // Entry point, pc or function descriptor. 2976 *entry = __ pc(); 2977 2978 __ movl(rax, Address(rsp, 0x8)); 2979 __ movl(rcx, Address(rsp, 0x4)); 2980 // Load *adr into eax, may fault. 2981 *fault_pc = __ pc(); 2982 switch (size) { 2983 case 4: 2984 // int32_t 2985 __ movl(rax, Address(rcx, 0)); 2986 break; 2987 case 8: 2988 // int64_t 2989 Unimplemented(); 2990 break; 2991 default: 2992 ShouldNotReachHere(); 2993 } 2994 2995 // Return errValue or *adr. 2996 *continuation_pc = __ pc(); 2997 __ ret(0); 2998 } 2999 3000 public: 3001 // Information about frame layout at time of blocking runtime call. 3002 // Note that we only have to preserve callee-saved registers since 3003 // the compilers are responsible for supplying a continuation point 3004 // if they expect all registers to be preserved. 3005 enum layout { 3006 thread_off, // last_java_sp 3007 arg1_off, 3008 arg2_off, 3009 rbp_off, // callee saved register 3010 ret_pc, 3011 framesize 3012 }; 3013 3014 private: 3015 3016 #undef __ 3017 #define __ masm-> 3018 3019 //------------------------------------------------------------------------------------------------------------------------ 3020 // Continuation point for throwing of implicit exceptions that are not handled in 3021 // the current activation. Fabricates an exception oop and initiates normal 3022 // exception dispatching in this frame. 3023 // 3024 // Previously the compiler (c2) allowed for callee save registers on Java calls. 3025 // This is no longer true after adapter frames were removed but could possibly 3026 // be brought back in the future if the interpreter code was reworked and it 3027 // was deemed worthwhile. The comment below was left to describe what must 3028 // happen here if callee saves were resurrected. As it stands now this stub 3029 // could actually be a vanilla BufferBlob and have now oopMap at all. 3030 // Since it doesn't make much difference we've chosen to leave it the 3031 // way it was in the callee save days and keep the comment. 3032 3033 // If we need to preserve callee-saved values we need a callee-saved oop map and 3034 // therefore have to make these stubs into RuntimeStubs rather than BufferBlobs. 3035 // If the compiler needs all registers to be preserved between the fault 3036 // point and the exception handler then it must assume responsibility for that in 3037 // AbstractCompiler::continuation_for_implicit_null_exception or 3038 // continuation_for_implicit_division_by_zero_exception. All other implicit 3039 // exceptions (e.g., NullPointerException or AbstractMethodError on entry) are 3040 // either at call sites or otherwise assume that stack unwinding will be initiated, 3041 // so caller saved registers were assumed volatile in the compiler. 3042 address generate_throw_exception(const char* name, address runtime_entry, 3043 Register arg1 = noreg, Register arg2 = noreg) { 3044 3045 int insts_size = 256; 3046 int locs_size = 32; 3047 3048 CodeBuffer code(name, insts_size, locs_size); 3049 OopMapSet* oop_maps = new OopMapSet(); 3050 MacroAssembler* masm = new MacroAssembler(&code); 3051 3052 address start = __ pc(); 3053 3054 // This is an inlined and slightly modified version of call_VM 3055 // which has the ability to fetch the return PC out of 3056 // thread-local storage and also sets up last_Java_sp slightly 3057 // differently than the real call_VM 3058 Register java_thread = rbx; 3059 __ get_thread(java_thread); 3060 3061 __ enter(); // required for proper stackwalking of RuntimeStub frame 3062 3063 // pc and rbp, already pushed 3064 __ subptr(rsp, (framesize-2) * wordSize); // prolog 3065 3066 // Frame is now completed as far as size and linkage. 3067 3068 int frame_complete = __ pc() - start; 3069 3070 // push java thread (becomes first argument of C function) 3071 __ movptr(Address(rsp, thread_off * wordSize), java_thread); 3072 if (arg1 != noreg) { 3073 __ movptr(Address(rsp, arg1_off * wordSize), arg1); 3074 } 3075 if (arg2 != noreg) { 3076 assert(arg1 != noreg, "missing reg arg"); 3077 __ movptr(Address(rsp, arg2_off * wordSize), arg2); 3078 } 3079 3080 // Set up last_Java_sp and last_Java_fp 3081 __ set_last_Java_frame(java_thread, rsp, rbp, NULL); 3082 3083 // Call runtime 3084 BLOCK_COMMENT("call runtime_entry"); 3085 __ call(RuntimeAddress(runtime_entry)); 3086 // Generate oop map 3087 OopMap* map = new OopMap(framesize, 0); 3088 oop_maps->add_gc_map(__ pc() - start, map); 3089 3090 // restore the thread (cannot use the pushed argument since arguments 3091 // may be overwritten by C code generated by an optimizing compiler); 3092 // however can use the register value directly if it is callee saved. 3093 __ get_thread(java_thread); 3094 3095 __ reset_last_Java_frame(java_thread, true); 3096 3097 __ leave(); // required for proper stackwalking of RuntimeStub frame 3098 3099 // check for pending exceptions 3100 #ifdef ASSERT 3101 Label L; 3102 __ cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 3103 __ jcc(Assembler::notEqual, L); 3104 __ should_not_reach_here(); 3105 __ bind(L); 3106 #endif /* ASSERT */ 3107 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 3108 3109 3110 RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete, framesize, oop_maps, false); 3111 return stub->entry_point(); 3112 } 3113 3114 3115 void create_control_words() { 3116 // Round to nearest, 53-bit mode, exceptions masked 3117 StubRoutines::_fpu_cntrl_wrd_std = 0x027F; 3118 // Round to zero, 53-bit mode, exception mased 3119 StubRoutines::_fpu_cntrl_wrd_trunc = 0x0D7F; 3120 // Round to nearest, 24-bit mode, exceptions masked 3121 StubRoutines::_fpu_cntrl_wrd_24 = 0x007F; 3122 // Round to nearest, 64-bit mode, exceptions masked 3123 StubRoutines::_fpu_cntrl_wrd_64 = 0x037F; 3124 // Round to nearest, 64-bit mode, exceptions masked 3125 StubRoutines::_mxcsr_std = 0x1F80; 3126 // Note: the following two constants are 80-bit values 3127 // layout is critical for correct loading by FPU. 3128 // Bias for strict fp multiply/divide 3129 StubRoutines::_fpu_subnormal_bias1[0]= 0x00000000; // 2^(-15360) == 0x03ff 8000 0000 0000 0000 3130 StubRoutines::_fpu_subnormal_bias1[1]= 0x80000000; 3131 StubRoutines::_fpu_subnormal_bias1[2]= 0x03ff; 3132 // Un-Bias for strict fp multiply/divide 3133 StubRoutines::_fpu_subnormal_bias2[0]= 0x00000000; // 2^(+15360) == 0x7bff 8000 0000 0000 0000 3134 StubRoutines::_fpu_subnormal_bias2[1]= 0x80000000; 3135 StubRoutines::_fpu_subnormal_bias2[2]= 0x7bff; 3136 } 3137 3138 //--------------------------------------------------------------------------- 3139 // Initialization 3140 3141 void generate_initial() { 3142 // Generates all stubs and initializes the entry points 3143 3144 //------------------------------------------------------------------------------------------------------------------------ 3145 // entry points that exist in all platforms 3146 // Note: This is code that could be shared among different platforms - however the benefit seems to be smaller than 3147 // the disadvantage of having a much more complicated generator structure. See also comment in stubRoutines.hpp. 3148 StubRoutines::_forward_exception_entry = generate_forward_exception(); 3149 3150 StubRoutines::_call_stub_entry = 3151 generate_call_stub(StubRoutines::_call_stub_return_address); 3152 // is referenced by megamorphic call 3153 StubRoutines::_catch_exception_entry = generate_catch_exception(); 3154 3155 // These are currently used by Solaris/Intel 3156 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg(); 3157 3158 StubRoutines::_handler_for_unsafe_access_entry = 3159 generate_handler_for_unsafe_access(); 3160 3161 // platform dependent 3162 create_control_words(); 3163 3164 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr(); 3165 StubRoutines::x86::_verify_fpu_cntrl_wrd_entry = generate_verify_fpu_cntrl_wrd(); 3166 StubRoutines::_d2i_wrapper = generate_d2i_wrapper(T_INT, 3167 CAST_FROM_FN_PTR(address, SharedRuntime::d2i)); 3168 StubRoutines::_d2l_wrapper = generate_d2i_wrapper(T_LONG, 3169 CAST_FROM_FN_PTR(address, SharedRuntime::d2l)); 3170 3171 // Build this early so it's available for the interpreter 3172 StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError)); 3173 3174 if (UseCRC32Intrinsics) { 3175 // set table address before stub generation which use it 3176 StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table; 3177 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32(); 3178 } 3179 } 3180 3181 3182 void generate_all() { 3183 // Generates all stubs and initializes the entry points 3184 3185 // These entry points require SharedInfo::stack0 to be set up in non-core builds 3186 // and need to be relocatable, so they each fabricate a RuntimeStub internally. 3187 StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError)); 3188 StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError)); 3189 StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call)); 3190 3191 //------------------------------------------------------------------------------------------------------------------------ 3192 // entry points that are platform specific 3193 3194 // support for verify_oop (must happen after universe_init) 3195 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); 3196 3197 // arraycopy stubs used by compilers 3198 generate_arraycopy_stubs(); 3199 3200 generate_math_stubs(); 3201 3202 // don't bother generating these AES intrinsic stubs unless global flag is set 3203 if (UseAESIntrinsics) { 3204 StubRoutines::x86::_key_shuffle_mask_addr = generate_key_shuffle_mask(); // might be needed by the others 3205 3206 StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock(); 3207 StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock(); 3208 StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt(); 3209 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt(); 3210 } 3211 3212 // Generate GHASH intrinsics code 3213 if (UseGHASHIntrinsics) { 3214 StubRoutines::x86::_ghash_long_swap_mask_addr = generate_ghash_long_swap_mask(); 3215 StubRoutines::x86::_ghash_byte_swap_mask_addr = generate_ghash_byte_swap_mask(); 3216 StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks(); 3217 } 3218 3219 // Safefetch stubs. 3220 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, 3221 &StubRoutines::_safefetch32_fault_pc, 3222 &StubRoutines::_safefetch32_continuation_pc); 3223 StubRoutines::_safefetchN_entry = StubRoutines::_safefetch32_entry; 3224 StubRoutines::_safefetchN_fault_pc = StubRoutines::_safefetch32_fault_pc; 3225 StubRoutines::_safefetchN_continuation_pc = StubRoutines::_safefetch32_continuation_pc; 3226 } 3227 3228 3229 public: 3230 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 3231 if (all) { 3232 generate_all(); 3233 } else { 3234 generate_initial(); 3235 } 3236 } 3237 }; // end class declaration 3238 3239 3240 void StubGenerator_generate(CodeBuffer* code, bool all) { 3241 StubGenerator g(code, all); 3242 }