1 /* 2 * Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2016, 2024 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "registerSaver_s390.hpp" 29 #include "gc/shared/barrierSet.hpp" 30 #include "gc/shared/barrierSetAssembler.hpp" 31 #include "gc/shared/barrierSetNMethod.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "interpreter/interp_masm.hpp" 34 #include "memory/universe.hpp" 35 #include "nativeInst_s390.hpp" 36 #include "oops/instanceOop.hpp" 37 #include "oops/objArrayKlass.hpp" 38 #include "oops/oop.inline.hpp" 39 #include "prims/methodHandles.hpp" 40 #include "prims/upcallLinker.hpp" 41 #include "runtime/frame.inline.hpp" 42 #include "runtime/handles.inline.hpp" 43 #include "runtime/javaThread.hpp" 44 #include "runtime/sharedRuntime.hpp" 45 #include "runtime/stubCodeGenerator.hpp" 46 #include "runtime/stubRoutines.hpp" 47 #include "utilities/formatBuffer.hpp" 48 #include "utilities/macros.hpp" 49 #include "utilities/powerOfTwo.hpp" 50 51 // Declaration and definition of StubGenerator (no .hpp file). 52 // For a more detailed description of the stub routine structure 53 // see the comment in stubRoutines.hpp. 54 55 #ifdef PRODUCT 56 #define __ _masm-> 57 #else 58 #define __ (Verbose ? (_masm->block_comment(FILE_AND_LINE),_masm):_masm)-> 59 #endif 60 61 #define BLOCK_COMMENT(str) if (PrintAssembly || PrintStubCode) __ block_comment(str) 62 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 63 64 65 // These static, partially const, variables are for the AES intrinsics. 66 // They are declared/initialized here to make them available across function bodies. 67 68 static const int AES_parmBlk_align = 32; // octoword alignment. 69 static const int AES_stackSpace_incr = AES_parmBlk_align; // add'l stack space is allocated in such increments. 70 // Must be multiple of AES_parmBlk_align. 71 72 static int AES_ctrVal_len = 0; // ctr init value len (in bytes), expected: length of dataBlk (16) 73 static int AES_ctrVec_len = 0; // # of ctr vector elements. That many block can be ciphered with one instruction execution 74 static int AES_ctrArea_len = 0; // reserved stack space (in bytes) for ctr (= ctrVal_len * ctrVec_len) 75 76 static int AES_parmBlk_addspace = 0; // Must be multiple of AES_parmblk_align. 77 // Will be set by stub generator to stub specific value. 78 static int AES_dataBlk_space = 0; // Must be multiple of AES_parmblk_align. 79 // Will be set by stub generator to stub specific value. 80 static int AES_dataBlk_offset = 0; // offset of the local src and dst dataBlk buffers 81 // Will be set by stub generator to stub specific value. 82 83 // These offsets are relative to the parameter block address (Register parmBlk = Z_R1) 84 static const int keylen_offset = -1; 85 static const int fCode_offset = -2; 86 static const int ctrVal_len_offset = -4; 87 static const int msglen_offset = -8; 88 static const int unextSP_offset = -16; 89 static const int rem_msgblk_offset = -20; 90 static const int argsave_offset = -2*AES_parmBlk_align; 91 static const int regsave_offset = -4*AES_parmBlk_align; // save space for work regs (Z_R10..13) 92 static const int msglen_red_offset = regsave_offset + AES_parmBlk_align; // reduced len after preLoop; 93 static const int counter_offset = msglen_red_offset+8; // current counter vector position. 94 static const int localSpill_offset = argsave_offset + 24; // arg2..arg4 are saved 95 96 97 // ----------------------------------------------------------------------- 98 // Stub Code definitions 99 100 class StubGenerator: public StubCodeGenerator { 101 private: 102 103 //---------------------------------------------------------------------- 104 // Call stubs are used to call Java from C. 105 106 // 107 // Arguments: 108 // 109 // R2 - call wrapper address : address 110 // R3 - result : intptr_t* 111 // R4 - result type : BasicType 112 // R5 - method : method 113 // R6 - frame mgr entry point : address 114 // [SP+160] - parameter block : intptr_t* 115 // [SP+172] - parameter count in words : int 116 // [SP+176] - thread : Thread* 117 // 118 address generate_call_stub(address& return_address) { 119 // Set up a new C frame, copy Java arguments, call frame manager 120 // or native_entry, and process result. 121 122 StubCodeMark mark(this, "StubRoutines", "call_stub"); 123 address start = __ pc(); 124 125 Register r_arg_call_wrapper_addr = Z_ARG1; 126 Register r_arg_result_addr = Z_ARG2; 127 Register r_arg_result_type = Z_ARG3; 128 Register r_arg_method = Z_ARG4; 129 Register r_arg_entry = Z_ARG5; 130 131 // offsets to fp 132 #define d_arg_thread 176 133 #define d_arg_argument_addr 160 134 #define d_arg_argument_count 168+4 135 136 Register r_entryframe_fp = Z_tmp_1; 137 Register r_top_of_arguments_addr = Z_ARG4; 138 Register r_new_arg_entry = Z_R14; 139 140 // macros for frame offsets 141 #define call_wrapper_address_offset \ 142 _z_entry_frame_locals_neg(call_wrapper_address) 143 #define result_address_offset \ 144 _z_entry_frame_locals_neg(result_address) 145 #define result_type_offset \ 146 _z_entry_frame_locals_neg(result_type) 147 #define arguments_tos_address_offset \ 148 _z_entry_frame_locals_neg(arguments_tos_address) 149 150 { 151 // 152 // STACK on entry to call_stub: 153 // 154 // F1 [C_FRAME] 155 // ... 156 // 157 158 Register r_argument_addr = Z_tmp_3; 159 Register r_argumentcopy_addr = Z_tmp_4; 160 Register r_argument_size_in_bytes = Z_ARG5; 161 Register r_frame_size = Z_R1; 162 163 Label arguments_copied; 164 165 // Save non-volatile registers to ABI of caller frame. 166 BLOCK_COMMENT("save registers, push frame {"); 167 __ z_stmg(Z_R6, Z_R14, 16, Z_SP); 168 __ z_std(Z_F8, 96, Z_SP); 169 __ z_std(Z_F9, 104, Z_SP); 170 __ z_std(Z_F10, 112, Z_SP); 171 __ z_std(Z_F11, 120, Z_SP); 172 __ z_std(Z_F12, 128, Z_SP); 173 __ z_std(Z_F13, 136, Z_SP); 174 __ z_std(Z_F14, 144, Z_SP); 175 __ z_std(Z_F15, 152, Z_SP); 176 177 // 178 // Push ENTRY_FRAME including arguments: 179 // 180 // F0 [TOP_IJAVA_FRAME_ABI] 181 // [outgoing Java arguments] 182 // [ENTRY_FRAME_LOCALS] 183 // F1 [C_FRAME] 184 // ... 185 // 186 187 // Calculate new frame size and push frame. 188 #define abi_plus_locals_size \ 189 (frame::z_top_ijava_frame_abi_size + frame::z_entry_frame_locals_size) 190 if (abi_plus_locals_size % BytesPerWord == 0) { 191 // Preload constant part of frame size. 192 __ load_const_optimized(r_frame_size, -abi_plus_locals_size/BytesPerWord); 193 // Keep copy of our frame pointer (caller's SP). 194 __ z_lgr(r_entryframe_fp, Z_SP); 195 // Add space required by arguments to frame size. 196 __ z_slgf(r_frame_size, d_arg_argument_count, Z_R0, Z_SP); 197 // Move Z_ARG5 early, it will be used as a local. 198 __ z_lgr(r_new_arg_entry, r_arg_entry); 199 // Convert frame size from words to bytes. 200 __ z_sllg(r_frame_size, r_frame_size, LogBytesPerWord); 201 __ push_frame(r_frame_size, r_entryframe_fp, 202 false/*don't copy SP*/, true /*frame size sign inverted*/); 203 } else { 204 guarantee(false, "frame sizes should be multiples of word size (BytesPerWord)"); 205 } 206 BLOCK_COMMENT("} save, push"); 207 208 // Load argument registers for call. 209 BLOCK_COMMENT("prepare/copy arguments {"); 210 __ z_lgr(Z_method, r_arg_method); 211 __ z_lg(Z_thread, d_arg_thread, r_entryframe_fp); 212 213 // Calculate top_of_arguments_addr which will be tos (not prepushed) later. 214 // Wimply use SP + frame::top_ijava_frame_size. 215 __ add2reg(r_top_of_arguments_addr, 216 frame::z_top_ijava_frame_abi_size - BytesPerWord, Z_SP); 217 218 // Initialize call_stub locals (step 1). 219 if ((call_wrapper_address_offset + BytesPerWord == result_address_offset) && 220 (result_address_offset + BytesPerWord == result_type_offset) && 221 (result_type_offset + BytesPerWord == arguments_tos_address_offset)) { 222 223 __ z_stmg(r_arg_call_wrapper_addr, r_top_of_arguments_addr, 224 call_wrapper_address_offset, r_entryframe_fp); 225 } else { 226 __ z_stg(r_arg_call_wrapper_addr, 227 call_wrapper_address_offset, r_entryframe_fp); 228 __ z_stg(r_arg_result_addr, 229 result_address_offset, r_entryframe_fp); 230 __ z_stg(r_arg_result_type, 231 result_type_offset, r_entryframe_fp); 232 __ z_stg(r_top_of_arguments_addr, 233 arguments_tos_address_offset, r_entryframe_fp); 234 } 235 236 // Copy Java arguments. 237 238 // Any arguments to copy? 239 __ load_and_test_int2long(Z_R1, Address(r_entryframe_fp, d_arg_argument_count)); 240 __ z_bre(arguments_copied); 241 242 // Prepare loop and copy arguments in reverse order. 243 { 244 // Calculate argument size in bytes. 245 __ z_sllg(r_argument_size_in_bytes, Z_R1, LogBytesPerWord); 246 247 // Get addr of first incoming Java argument. 248 __ z_lg(r_argument_addr, d_arg_argument_addr, r_entryframe_fp); 249 250 // Let r_argumentcopy_addr point to last outgoing Java argument. 251 __ add2reg(r_argumentcopy_addr, BytesPerWord, r_top_of_arguments_addr); // = Z_SP+160 effectively. 252 253 // Let r_argument_addr point to last incoming Java argument. 254 __ add2reg_with_index(r_argument_addr, -BytesPerWord, 255 r_argument_size_in_bytes, r_argument_addr); 256 257 // Now loop while Z_R1 > 0 and copy arguments. 258 { 259 Label next_argument; 260 __ bind(next_argument); 261 // Mem-mem move. 262 __ z_mvc(0, BytesPerWord-1, r_argumentcopy_addr, 0, r_argument_addr); 263 __ add2reg(r_argument_addr, -BytesPerWord); 264 __ add2reg(r_argumentcopy_addr, BytesPerWord); 265 __ z_brct(Z_R1, next_argument); 266 } 267 } // End of argument copy loop. 268 269 __ bind(arguments_copied); 270 } 271 BLOCK_COMMENT("} arguments"); 272 273 BLOCK_COMMENT("call {"); 274 { 275 // Call frame manager or native entry. 276 277 // 278 // Register state on entry to frame manager / native entry: 279 // 280 // Z_ARG1 = r_top_of_arguments_addr - intptr_t *sender tos (prepushed) 281 // Lesp = (SP) + copied_arguments_offset - 8 282 // Z_method - method 283 // Z_thread - JavaThread* 284 // 285 286 // Here, the usual SP is the initial_caller_sp. 287 __ z_lgr(Z_R10, Z_SP); 288 289 // Z_esp points to the slot below the last argument. 290 __ z_lgr(Z_esp, r_top_of_arguments_addr); 291 292 // 293 // Stack on entry to frame manager / native entry: 294 // 295 // F0 [TOP_IJAVA_FRAME_ABI] 296 // [outgoing Java arguments] 297 // [ENTRY_FRAME_LOCALS] 298 // F1 [C_FRAME] 299 // ... 300 // 301 302 // Do a light-weight C-call here, r_new_arg_entry holds the address 303 // of the interpreter entry point (frame manager or native entry) 304 // and save runtime-value of return_pc in return_address 305 // (call by reference argument). 306 return_address = __ call_stub(r_new_arg_entry); 307 } 308 BLOCK_COMMENT("} call"); 309 310 { 311 BLOCK_COMMENT("restore registers {"); 312 // Returned from frame manager or native entry. 313 // Now pop frame, process result, and return to caller. 314 315 // 316 // Stack on exit from frame manager / native entry: 317 // 318 // F0 [ABI] 319 // ... 320 // [ENTRY_FRAME_LOCALS] 321 // F1 [C_FRAME] 322 // ... 323 // 324 // Just pop the topmost frame ... 325 // 326 327 // Restore frame pointer. 328 __ z_lg(r_entryframe_fp, _z_abi(callers_sp), Z_SP); 329 // Pop frame. Done here to minimize stalls. 330 __ pop_frame(); 331 332 // Reload some volatile registers which we've spilled before the call 333 // to frame manager / native entry. 334 // Access all locals via frame pointer, because we know nothing about 335 // the topmost frame's size. 336 __ z_lg(r_arg_result_addr, result_address_offset, r_entryframe_fp); 337 __ z_lg(r_arg_result_type, result_type_offset, r_entryframe_fp); 338 339 // Restore non-volatiles. 340 __ z_lmg(Z_R6, Z_R14, 16, Z_SP); 341 __ z_ld(Z_F8, 96, Z_SP); 342 __ z_ld(Z_F9, 104, Z_SP); 343 __ z_ld(Z_F10, 112, Z_SP); 344 __ z_ld(Z_F11, 120, Z_SP); 345 __ z_ld(Z_F12, 128, Z_SP); 346 __ z_ld(Z_F13, 136, Z_SP); 347 __ z_ld(Z_F14, 144, Z_SP); 348 __ z_ld(Z_F15, 152, Z_SP); 349 BLOCK_COMMENT("} restore"); 350 351 // 352 // Stack on exit from call_stub: 353 // 354 // 0 [C_FRAME] 355 // ... 356 // 357 // No call_stub frames left. 358 // 359 360 // All non-volatiles have been restored at this point!! 361 362 //------------------------------------------------------------------------ 363 // The following code makes some assumptions on the T_<type> enum values. 364 // The enum is defined in globalDefinitions.hpp. 365 // The validity of the assumptions is tested as far as possible. 366 // The assigned values should not be shuffled 367 // T_BOOLEAN==4 - lowest used enum value 368 // T_NARROWOOP==16 - largest used enum value 369 //------------------------------------------------------------------------ 370 BLOCK_COMMENT("process result {"); 371 Label firstHandler; 372 int handlerLen= 8; 373 #ifdef ASSERT 374 char assertMsg[] = "check BasicType definition in globalDefinitions.hpp"; 375 __ z_chi(r_arg_result_type, T_BOOLEAN); 376 __ asm_assert(Assembler::bcondNotLow, assertMsg, 0x0234); 377 __ z_chi(r_arg_result_type, T_NARROWOOP); 378 __ asm_assert(Assembler::bcondNotHigh, assertMsg, 0x0235); 379 #endif 380 __ add2reg(r_arg_result_type, -T_BOOLEAN); // Remove offset. 381 __ z_larl(Z_R1, firstHandler); // location of first handler 382 __ z_sllg(r_arg_result_type, r_arg_result_type, 3); // Each handler is 8 bytes long. 383 __ z_bc(MacroAssembler::bcondAlways, 0, r_arg_result_type, Z_R1); 384 385 __ align(handlerLen); 386 __ bind(firstHandler); 387 // T_BOOLEAN: 388 guarantee(T_BOOLEAN == 4, "check BasicType definition in globalDefinitions.hpp"); 389 __ z_st(Z_RET, 0, r_arg_result_addr); 390 __ z_br(Z_R14); // Return to caller. 391 __ align(handlerLen); 392 // T_CHAR: 393 guarantee(T_CHAR == T_BOOLEAN+1, "check BasicType definition in globalDefinitions.hpp"); 394 __ z_st(Z_RET, 0, r_arg_result_addr); 395 __ z_br(Z_R14); // Return to caller. 396 __ align(handlerLen); 397 // T_FLOAT: 398 guarantee(T_FLOAT == T_CHAR+1, "check BasicType definition in globalDefinitions.hpp"); 399 __ z_ste(Z_FRET, 0, r_arg_result_addr); 400 __ z_br(Z_R14); // Return to caller. 401 __ align(handlerLen); 402 // T_DOUBLE: 403 guarantee(T_DOUBLE == T_FLOAT+1, "check BasicType definition in globalDefinitions.hpp"); 404 __ z_std(Z_FRET, 0, r_arg_result_addr); 405 __ z_br(Z_R14); // Return to caller. 406 __ align(handlerLen); 407 // T_BYTE: 408 guarantee(T_BYTE == T_DOUBLE+1, "check BasicType definition in globalDefinitions.hpp"); 409 __ z_st(Z_RET, 0, r_arg_result_addr); 410 __ z_br(Z_R14); // Return to caller. 411 __ align(handlerLen); 412 // T_SHORT: 413 guarantee(T_SHORT == T_BYTE+1, "check BasicType definition in globalDefinitions.hpp"); 414 __ z_st(Z_RET, 0, r_arg_result_addr); 415 __ z_br(Z_R14); // Return to caller. 416 __ align(handlerLen); 417 // T_INT: 418 guarantee(T_INT == T_SHORT+1, "check BasicType definition in globalDefinitions.hpp"); 419 __ z_st(Z_RET, 0, r_arg_result_addr); 420 __ z_br(Z_R14); // Return to caller. 421 __ align(handlerLen); 422 // T_LONG: 423 guarantee(T_LONG == T_INT+1, "check BasicType definition in globalDefinitions.hpp"); 424 __ z_stg(Z_RET, 0, r_arg_result_addr); 425 __ z_br(Z_R14); // Return to caller. 426 __ align(handlerLen); 427 // T_OBJECT: 428 guarantee(T_OBJECT == T_LONG+1, "check BasicType definition in globalDefinitions.hpp"); 429 __ z_stg(Z_RET, 0, r_arg_result_addr); 430 __ z_br(Z_R14); // Return to caller. 431 __ align(handlerLen); 432 // T_ARRAY: 433 guarantee(T_ARRAY == T_OBJECT+1, "check BasicType definition in globalDefinitions.hpp"); 434 __ z_stg(Z_RET, 0, r_arg_result_addr); 435 __ z_br(Z_R14); // Return to caller. 436 __ align(handlerLen); 437 // T_VOID: 438 guarantee(T_VOID == T_ARRAY+1, "check BasicType definition in globalDefinitions.hpp"); 439 __ z_stg(Z_RET, 0, r_arg_result_addr); 440 __ z_br(Z_R14); // Return to caller. 441 __ align(handlerLen); 442 // T_ADDRESS: 443 guarantee(T_ADDRESS == T_VOID+1, "check BasicType definition in globalDefinitions.hpp"); 444 __ z_stg(Z_RET, 0, r_arg_result_addr); 445 __ z_br(Z_R14); // Return to caller. 446 __ align(handlerLen); 447 // T_NARROWOOP: 448 guarantee(T_NARROWOOP == T_ADDRESS+1, "check BasicType definition in globalDefinitions.hpp"); 449 __ z_st(Z_RET, 0, r_arg_result_addr); 450 __ z_br(Z_R14); // Return to caller. 451 __ align(handlerLen); 452 BLOCK_COMMENT("} process result"); 453 } 454 return start; 455 } 456 457 // Return point for a Java call if there's an exception thrown in 458 // Java code. The exception is caught and transformed into a 459 // pending exception stored in JavaThread that can be tested from 460 // within the VM. 461 address generate_catch_exception() { 462 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 463 464 address start = __ pc(); 465 466 // 467 // Registers alive 468 // 469 // Z_thread 470 // Z_ARG1 - address of pending exception 471 // Z_ARG2 - return address in call stub 472 // 473 474 const Register exception_file = Z_R0; 475 const Register exception_line = Z_R1; 476 477 __ load_const_optimized(exception_file, (void*)__FILE__); 478 __ load_const_optimized(exception_line, (void*)__LINE__); 479 480 __ z_stg(Z_ARG1, thread_(pending_exception)); 481 // Store into `char *'. 482 __ z_stg(exception_file, thread_(exception_file)); 483 // Store into `int'. 484 __ z_st(exception_line, thread_(exception_line)); 485 486 // Complete return to VM. 487 assert(StubRoutines::_call_stub_return_address != nullptr, "must have been generated before"); 488 489 // Continue in call stub. 490 __ z_br(Z_ARG2); 491 492 return start; 493 } 494 495 // Continuation point for runtime calls returning with a pending 496 // exception. The pending exception check happened in the runtime 497 // or native call stub. The pending exception in Thread is 498 // converted into a Java-level exception. 499 // 500 // Read: 501 // Z_R14: pc the runtime library callee wants to return to. 502 // Since the exception occurred in the callee, the return pc 503 // from the point of view of Java is the exception pc. 504 // 505 // Invalidate: 506 // Volatile registers (except below). 507 // 508 // Update: 509 // Z_ARG1: exception 510 // (Z_R14 is unchanged and is live out). 511 // 512 address generate_forward_exception() { 513 StubCodeMark mark(this, "StubRoutines", "forward_exception"); 514 address start = __ pc(); 515 516 #define pending_exception_offset in_bytes(Thread::pending_exception_offset()) 517 #ifdef ASSERT 518 // Get pending exception oop. 519 __ z_lg(Z_ARG1, pending_exception_offset, Z_thread); 520 521 // Make sure that this code is only executed if there is a pending exception. 522 { 523 Label L; 524 __ z_ltgr(Z_ARG1, Z_ARG1); 525 __ z_brne(L); 526 __ stop("StubRoutines::forward exception: no pending exception (1)"); 527 __ bind(L); 528 } 529 530 __ verify_oop(Z_ARG1, "StubRoutines::forward exception: not an oop"); 531 #endif 532 533 __ z_lgr(Z_ARG2, Z_R14); // Copy exception pc into Z_ARG2. 534 __ save_return_pc(); 535 __ push_frame_abi160(0); 536 // Find exception handler. 537 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), 538 Z_thread, 539 Z_ARG2); 540 // Copy handler's address. 541 __ z_lgr(Z_R1, Z_RET); 542 __ pop_frame(); 543 __ restore_return_pc(); 544 545 // Set up the arguments for the exception handler: 546 // - Z_ARG1: exception oop 547 // - Z_ARG2: exception pc 548 549 // Load pending exception oop. 550 __ z_lg(Z_ARG1, pending_exception_offset, Z_thread); 551 552 // The exception pc is the return address in the caller, 553 // must load it into Z_ARG2 554 __ z_lgr(Z_ARG2, Z_R14); 555 556 #ifdef ASSERT 557 // Make sure exception is set. 558 { Label L; 559 __ z_ltgr(Z_ARG1, Z_ARG1); 560 __ z_brne(L); 561 __ stop("StubRoutines::forward exception: no pending exception (2)"); 562 __ bind(L); 563 } 564 #endif 565 // Clear the pending exception. 566 __ clear_mem(Address(Z_thread, pending_exception_offset), sizeof(void *)); 567 // Jump to exception handler 568 __ z_br(Z_R1 /*handler address*/); 569 570 return start; 571 572 #undef pending_exception_offset 573 } 574 575 #undef __ 576 #ifdef PRODUCT 577 #define __ _masm-> 578 #else 579 #define __ (Verbose ? (_masm->block_comment(FILE_AND_LINE),_masm):_masm)-> 580 #endif 581 582 // Support for uint StubRoutine::zarch::partial_subtype_check(Klass 583 // sub, Klass super); 584 // 585 // Arguments: 586 // ret : Z_RET, returned 587 // sub : Z_ARG2, argument, not changed 588 // super: Z_ARG3, argument, not changed 589 // 590 // raddr: Z_R14, blown by call 591 // 592 address generate_partial_subtype_check() { 593 StubCodeMark mark(this, "StubRoutines", "partial_subtype_check"); 594 Label miss; 595 596 address start = __ pc(); 597 598 const Register Rsubklass = Z_ARG2; // subklass 599 const Register Rsuperklass = Z_ARG3; // superklass 600 601 // No args, but tmp registers that are killed. 602 const Register Rlength = Z_ARG4; // cache array length 603 const Register Rarray_ptr = Z_ARG5; // Current value from cache array. 604 605 if (UseCompressedOops) { 606 assert(Universe::heap() != nullptr, "java heap must be initialized to generate partial_subtype_check stub"); 607 } 608 609 // Always take the slow path. 610 __ check_klass_subtype_slow_path(Rsubklass, Rsuperklass, 611 Rarray_ptr, Rlength, nullptr, &miss); 612 613 // Match falls through here. 614 __ clear_reg(Z_RET); // Zero indicates a match. Set EQ flag in CC. 615 __ z_br(Z_R14); 616 617 __ BIND(miss); 618 __ load_const_optimized(Z_RET, 1); // One indicates a miss. 619 __ z_ltgr(Z_RET, Z_RET); // Set NE flag in CR. 620 __ z_br(Z_R14); 621 622 return start; 623 } 624 625 address generate_lookup_secondary_supers_table_stub(u1 super_klass_index) { 626 StubCodeMark mark(this, "StubRoutines", "lookup_secondary_supers_table"); 627 628 const Register 629 r_super_klass = Z_ARG1, 630 r_sub_klass = Z_ARG2, 631 r_array_index = Z_ARG3, 632 r_array_length = Z_ARG4, 633 r_array_base = Z_ARG5, 634 r_bitmap = Z_R10, 635 r_result = Z_R11; 636 address start = __ pc(); 637 638 __ lookup_secondary_supers_table(r_sub_klass, r_super_klass, 639 r_array_base, r_array_length, r_array_index, 640 r_bitmap, r_result, super_klass_index); 641 642 __ z_br(Z_R14); 643 644 return start; 645 } 646 647 // Slow path implementation for UseSecondarySupersTable. 648 address generate_lookup_secondary_supers_table_slow_path_stub() { 649 StubCodeMark mark(this, "StubRoutines", "lookup_secondary_supers_table_slow_path"); 650 651 address start = __ pc(); 652 653 const Register 654 r_super_klass = Z_ARG1, 655 r_array_base = Z_ARG5, 656 r_temp1 = Z_ARG4, 657 r_array_index = Z_ARG3, 658 r_bitmap = Z_R10, 659 r_result = Z_R11; 660 661 __ lookup_secondary_supers_table_slow_path(r_super_klass, r_array_base, 662 r_array_index, r_bitmap, r_result, r_temp1); 663 664 __ z_br(Z_R14); 665 666 return start; 667 } 668 669 #if !defined(PRODUCT) 670 // Wrapper which calls oopDesc::is_oop_or_null() 671 // Only called by MacroAssembler::verify_oop 672 static void verify_oop_helper(const char* message, oopDesc* o) { 673 if (!oopDesc::is_oop_or_null(o)) { 674 fatal("%s. oop: " PTR_FORMAT, message, p2i(o)); 675 } 676 ++ StubRoutines::_verify_oop_count; 677 } 678 #endif 679 680 // Return address of code to be called from code generated by 681 // MacroAssembler::verify_oop. 682 // 683 // Don't generate, rather use C++ code. 684 address generate_verify_oop_subroutine() { 685 // Don't generate a StubCodeMark, because no code is generated! 686 // Generating the mark triggers notifying the oprofile jvmti agent 687 // about the dynamic code generation, but the stub without 688 // code (code_size == 0) confuses opjitconv 689 // StubCodeMark mark(this, "StubRoutines", "verify_oop_stub"); 690 691 address start = 0; 692 693 #if !defined(PRODUCT) 694 start = CAST_FROM_FN_PTR(address, verify_oop_helper); 695 #endif 696 697 return start; 698 } 699 700 // This is to test that the count register contains a positive int value. 701 // Required because C2 does not respect int to long conversion for stub calls. 702 void assert_positive_int(Register count) { 703 #ifdef ASSERT 704 __ z_srag(Z_R0, count, 31); // Just leave the sign (must be zero) in Z_R0. 705 __ asm_assert(Assembler::bcondZero, "missing zero extend", 0xAFFE); 706 #endif 707 } 708 709 // Generate overlap test for array copy stubs. 710 // If no actual overlap is detected, control is transferred to the 711 // "normal" copy stub (entry address passed in disjoint_copy_target). 712 // Otherwise, execution continues with the code generated by the 713 // caller of array_overlap_test. 714 // 715 // Input: 716 // Z_ARG1 - from 717 // Z_ARG2 - to 718 // Z_ARG3 - element count 719 void array_overlap_test(address disjoint_copy_target, int log2_elem_size) { 720 __ MacroAssembler::compare_and_branch_optimized(Z_ARG2, Z_ARG1, Assembler::bcondNotHigh, 721 disjoint_copy_target, /*len64=*/true, /*has_sign=*/false); 722 723 Register index = Z_ARG3; 724 if (log2_elem_size > 0) { 725 __ z_sllg(Z_R1, Z_ARG3, log2_elem_size); // byte count 726 index = Z_R1; 727 } 728 __ add2reg_with_index(Z_R1, 0, index, Z_ARG1); // First byte after "from" range. 729 730 __ MacroAssembler::compare_and_branch_optimized(Z_R1, Z_ARG2, Assembler::bcondNotHigh, 731 disjoint_copy_target, /*len64=*/true, /*has_sign=*/false); 732 733 // Destructive overlap: let caller generate code for that. 734 } 735 736 // Generate stub for disjoint array copy. If "aligned" is true, the 737 // "from" and "to" addresses are assumed to be heapword aligned. 738 // 739 // Arguments for generated stub: 740 // from: Z_ARG1 741 // to: Z_ARG2 742 // count: Z_ARG3 treated as signed 743 void generate_disjoint_copy(bool aligned, int element_size, 744 bool branchToEnd, 745 bool restoreArgs) { 746 // This is the zarch specific stub generator for general array copy tasks. 747 // It has the following prereqs and features: 748 // 749 // - No destructive overlap allowed (else unpredictable results). 750 // - Destructive overlap does not exist if the leftmost byte of the target 751 // does not coincide with any of the source bytes (except the leftmost). 752 // 753 // Register usage upon entry: 754 // Z_ARG1 == Z_R2 : address of source array 755 // Z_ARG2 == Z_R3 : address of target array 756 // Z_ARG3 == Z_R4 : length of operands (# of elements on entry) 757 // 758 // Register usage within the generator: 759 // - Z_R0 and Z_R1 are KILLed by the stub routine (target addr/len). 760 // Used as pair register operand in complex moves, scratch registers anyway. 761 // - Z_R5 is KILLed by the stub routine (source register pair addr/len) (even/odd reg). 762 // Same as R0/R1, but no scratch register. 763 // - Z_ARG1, Z_ARG2, Z_ARG3 are USEd but preserved by the stub routine, 764 // but they might get temporarily overwritten. 765 766 Register save_reg = Z_ARG4; // (= Z_R5), holds original target operand address for restore. 767 768 { 769 Register llen_reg = Z_R1; // Holds left operand len (odd reg). 770 Register laddr_reg = Z_R0; // Holds left operand addr (even reg), overlaps with data_reg. 771 Register rlen_reg = Z_R5; // Holds right operand len (odd reg), overlaps with save_reg. 772 Register raddr_reg = Z_R4; // Holds right operand addr (even reg), overlaps with len_reg. 773 774 Register data_reg = Z_R0; // Holds copied data chunk in alignment process and copy loop. 775 Register len_reg = Z_ARG3; // Holds operand len (#elements at entry, #bytes shortly after). 776 Register dst_reg = Z_ARG2; // Holds left (target) operand addr. 777 Register src_reg = Z_ARG1; // Holds right (source) operand addr. 778 779 Label doMVCLOOP, doMVCLOOPcount, doMVCLOOPiterate; 780 Label doMVCUnrolled; 781 NearLabel doMVC, doMVCgeneral, done; 782 Label MVC_template; 783 address pcMVCblock_b, pcMVCblock_e; 784 785 bool usedMVCLE = true; 786 bool usedMVCLOOP = true; 787 bool usedMVCUnrolled = false; 788 bool usedMVC = false; 789 bool usedMVCgeneral = false; 790 791 int stride; 792 Register stride_reg; 793 Register ix_reg; 794 795 assert((element_size<=256) && (256%element_size == 0), "element size must be <= 256, power of 2"); 796 unsigned int log2_size = exact_log2(element_size); 797 798 switch (element_size) { 799 case 1: BLOCK_COMMENT("ARRAYCOPY DISJOINT byte {"); break; 800 case 2: BLOCK_COMMENT("ARRAYCOPY DISJOINT short {"); break; 801 case 4: BLOCK_COMMENT("ARRAYCOPY DISJOINT int {"); break; 802 case 8: BLOCK_COMMENT("ARRAYCOPY DISJOINT long {"); break; 803 default: BLOCK_COMMENT("ARRAYCOPY DISJOINT {"); break; 804 } 805 806 assert_positive_int(len_reg); 807 808 BLOCK_COMMENT("preparation {"); 809 810 // No copying if len <= 0. 811 if (branchToEnd) { 812 __ compare64_and_branch(len_reg, (intptr_t) 0, Assembler::bcondNotHigh, done); 813 } else { 814 if (VM_Version::has_CompareBranch()) { 815 __ z_cgib(len_reg, 0, Assembler::bcondNotHigh, 0, Z_R14); 816 } else { 817 __ z_ltgr(len_reg, len_reg); 818 __ z_bcr(Assembler::bcondNotPositive, Z_R14); 819 } 820 } 821 822 // Prefetch just one cache line. Speculative opt for short arrays. 823 // Do not use Z_R1 in prefetch. Is undefined here. 824 if (VM_Version::has_Prefetch()) { 825 __ z_pfd(0x01, 0, Z_R0, src_reg); // Fetch access. 826 __ z_pfd(0x02, 0, Z_R0, dst_reg); // Store access. 827 } 828 829 BLOCK_COMMENT("} preparation"); 830 831 // Save args only if really needed. 832 // Keep len test local to branch. Is generated only once. 833 834 BLOCK_COMMENT("mode selection {"); 835 836 // Special handling for arrays with only a few elements. 837 // Nothing fancy: just an executed MVC. 838 if (log2_size > 0) { 839 __ z_sllg(Z_R1, len_reg, log2_size); // Remember #bytes in Z_R1. 840 } 841 if (element_size != 8) { 842 __ z_cghi(len_reg, 256/element_size); 843 __ z_brnh(doMVC); 844 usedMVC = true; 845 } 846 if (element_size == 8) { // Long and oop arrays are always aligned. 847 __ z_cghi(len_reg, 256/element_size); 848 __ z_brnh(doMVCUnrolled); 849 usedMVCUnrolled = true; 850 } 851 852 // Prefetch another cache line. We, for sure, have more than one line to copy. 853 if (VM_Version::has_Prefetch()) { 854 __ z_pfd(0x01, 256, Z_R0, src_reg); // Fetch access. 855 __ z_pfd(0x02, 256, Z_R0, dst_reg); // Store access. 856 } 857 858 if (restoreArgs) { 859 // Remember entry value of ARG2 to restore all arguments later from that knowledge. 860 __ z_lgr(save_reg, dst_reg); 861 } 862 863 __ z_cghi(len_reg, 4096/element_size); 864 if (log2_size == 0) { 865 __ z_lgr(Z_R1, len_reg); // Init Z_R1 with #bytes 866 } 867 __ z_brnh(doMVCLOOP); 868 869 // Fall through to MVCLE case. 870 871 BLOCK_COMMENT("} mode selection"); 872 873 // MVCLE: for long arrays 874 // DW aligned: Best performance for sizes > 4kBytes. 875 // unaligned: Least complex for sizes > 256 bytes. 876 if (usedMVCLE) { 877 BLOCK_COMMENT("mode MVCLE {"); 878 879 // Setup registers for mvcle. 880 //__ z_lgr(llen_reg, len_reg);// r1 <- r4 #bytes already in Z_R1, aka llen_reg. 881 __ z_lgr(laddr_reg, dst_reg); // r0 <- r3 882 __ z_lgr(raddr_reg, src_reg); // r4 <- r2 883 __ z_lgr(rlen_reg, llen_reg); // r5 <- r1 884 885 __ MacroAssembler::move_long_ext(laddr_reg, raddr_reg, 0xb0); // special: bypass cache 886 // __ MacroAssembler::move_long_ext(laddr_reg, raddr_reg, 0xb8); // special: Hold data in cache. 887 // __ MacroAssembler::move_long_ext(laddr_reg, raddr_reg, 0); 888 889 if (restoreArgs) { 890 // MVCLE updates the source (Z_R4,Z_R5) and target (Z_R0,Z_R1) register pairs. 891 // Dst_reg (Z_ARG2) and src_reg (Z_ARG1) are left untouched. No restore required. 892 // Len_reg (Z_ARG3) is destroyed and must be restored. 893 __ z_slgr(laddr_reg, dst_reg); // copied #bytes 894 if (log2_size > 0) { 895 __ z_srag(Z_ARG3, laddr_reg, log2_size); // Convert back to #elements. 896 } else { 897 __ z_lgr(Z_ARG3, laddr_reg); 898 } 899 } 900 if (branchToEnd) { 901 __ z_bru(done); 902 } else { 903 __ z_br(Z_R14); 904 } 905 BLOCK_COMMENT("} mode MVCLE"); 906 } 907 // No fallthru possible here. 908 909 // MVCUnrolled: for short, aligned arrays. 910 911 if (usedMVCUnrolled) { 912 BLOCK_COMMENT("mode MVC unrolled {"); 913 stride = 8; 914 915 // Generate unrolled MVC instructions. 916 for (int ii = 32; ii > 1; ii--) { 917 __ z_mvc(0, ii * stride-1, dst_reg, 0, src_reg); // ii*8 byte copy 918 if (branchToEnd) { 919 __ z_bru(done); 920 } else { 921 __ z_br(Z_R14); 922 } 923 } 924 925 pcMVCblock_b = __ pc(); 926 __ z_mvc(0, 1 * stride-1, dst_reg, 0, src_reg); // 8 byte copy 927 if (branchToEnd) { 928 __ z_bru(done); 929 } else { 930 __ z_br(Z_R14); 931 } 932 933 pcMVCblock_e = __ pc(); 934 Label MVC_ListEnd; 935 __ bind(MVC_ListEnd); 936 937 // This is an absolute fast path: 938 // - Array len in bytes must be not greater than 256. 939 // - Array len in bytes must be an integer mult of DW 940 // to save expensive handling of trailing bytes. 941 // - Argument restore is not done, 942 // i.e. previous code must not alter arguments (this code doesn't either). 943 944 __ bind(doMVCUnrolled); 945 946 // Avoid mul, prefer shift where possible. 947 // Combine shift right (for #DW) with shift left (for block size). 948 // Set CC for zero test below (asm_assert). 949 // Note: #bytes comes in Z_R1, #DW in len_reg. 950 unsigned int MVCblocksize = pcMVCblock_e - pcMVCblock_b; 951 unsigned int logMVCblocksize = 0xffffffffU; // Pacify compiler ("used uninitialized" warning). 952 953 if (log2_size > 0) { // Len was scaled into Z_R1. 954 switch (MVCblocksize) { 955 956 case 8: logMVCblocksize = 3; 957 __ z_ltgr(Z_R0, Z_R1); // #bytes is index 958 break; // reasonable size, use shift 959 960 case 16: logMVCblocksize = 4; 961 __ z_slag(Z_R0, Z_R1, logMVCblocksize-log2_size); 962 break; // reasonable size, use shift 963 964 default: logMVCblocksize = 0; 965 __ z_ltgr(Z_R0, len_reg); // #DW for mul 966 break; // all other sizes: use mul 967 } 968 } else { 969 guarantee(log2_size, "doMVCUnrolled: only for DW entities"); 970 } 971 972 // This test (and branch) is redundant. Previous code makes sure that 973 // - element count > 0 974 // - element size == 8. 975 // Thus, len reg should never be zero here. We insert an asm_assert() here, 976 // just to double-check and to be on the safe side. 977 __ asm_assert(false, "zero len cannot occur", 99); 978 979 __ z_larl(Z_R1, MVC_ListEnd); // Get addr of last instr block. 980 // Avoid mul, prefer shift where possible. 981 if (logMVCblocksize == 0) { 982 __ z_mghi(Z_R0, MVCblocksize); 983 } 984 __ z_slgr(Z_R1, Z_R0); 985 __ z_br(Z_R1); 986 BLOCK_COMMENT("} mode MVC unrolled"); 987 } 988 // No fallthru possible here. 989 990 // MVC execute template 991 // Must always generate. Usage may be switched on below. 992 // There is no suitable place after here to put the template. 993 __ bind(MVC_template); 994 __ z_mvc(0,0,dst_reg,0,src_reg); // Instr template, never exec directly! 995 996 997 // MVC Loop: for medium-sized arrays 998 999 // Only for DW aligned arrays (src and dst). 1000 // #bytes to copy must be at least 256!!! 1001 // Non-aligned cases handled separately. 1002 stride = 256; 1003 stride_reg = Z_R1; // Holds #bytes when control arrives here. 1004 ix_reg = Z_ARG3; // Alias for len_reg. 1005 1006 1007 if (usedMVCLOOP) { 1008 BLOCK_COMMENT("mode MVC loop {"); 1009 __ bind(doMVCLOOP); 1010 1011 __ z_lcgr(ix_reg, Z_R1); // Ix runs from -(n-2)*stride to 1*stride (inclusive). 1012 __ z_llill(stride_reg, stride); 1013 __ add2reg(ix_reg, 2*stride); // Thus: increment ix by 2*stride. 1014 1015 __ bind(doMVCLOOPiterate); 1016 __ z_mvc(0, stride-1, dst_reg, 0, src_reg); 1017 __ add2reg(dst_reg, stride); 1018 __ add2reg(src_reg, stride); 1019 __ bind(doMVCLOOPcount); 1020 __ z_brxlg(ix_reg, stride_reg, doMVCLOOPiterate); 1021 1022 // Don 't use add2reg() here, since we must set the condition code! 1023 __ z_aghi(ix_reg, -2*stride); // Compensate incr from above: zero diff means "all copied". 1024 1025 if (restoreArgs) { 1026 __ z_lcgr(Z_R1, ix_reg); // Prepare ix_reg for copy loop, #bytes expected in Z_R1. 1027 __ z_brnz(doMVCgeneral); // We're not done yet, ix_reg is not zero. 1028 1029 // ARG1, ARG2, and ARG3 were altered by the code above, so restore them building on save_reg. 1030 __ z_slgr(dst_reg, save_reg); // copied #bytes 1031 __ z_slgr(src_reg, dst_reg); // = ARG1 (now restored) 1032 if (log2_size) { 1033 __ z_srag(Z_ARG3, dst_reg, log2_size); // Convert back to #elements to restore ARG3. 1034 } else { 1035 __ z_lgr(Z_ARG3, dst_reg); 1036 } 1037 __ z_lgr(Z_ARG2, save_reg); // ARG2 now restored. 1038 1039 if (branchToEnd) { 1040 __ z_bru(done); 1041 } else { 1042 __ z_br(Z_R14); 1043 } 1044 1045 } else { 1046 if (branchToEnd) { 1047 __ z_brz(done); // CC set by aghi instr. 1048 } else { 1049 __ z_bcr(Assembler::bcondZero, Z_R14); // We're all done if zero. 1050 } 1051 1052 __ z_lcgr(Z_R1, ix_reg); // Prepare ix_reg for copy loop, #bytes expected in Z_R1. 1053 // __ z_bru(doMVCgeneral); // fallthru 1054 } 1055 usedMVCgeneral = true; 1056 BLOCK_COMMENT("} mode MVC loop"); 1057 } 1058 // Fallthru to doMVCgeneral 1059 1060 // MVCgeneral: for short, unaligned arrays, after other copy operations 1061 1062 // Somewhat expensive due to use of EX instruction, but simple. 1063 if (usedMVCgeneral) { 1064 BLOCK_COMMENT("mode MVC general {"); 1065 __ bind(doMVCgeneral); 1066 1067 __ add2reg(len_reg, -1, Z_R1); // Get #bytes-1 for EXECUTE. 1068 if (VM_Version::has_ExecuteExtensions()) { 1069 __ z_exrl(len_reg, MVC_template); // Execute MVC with variable length. 1070 } else { 1071 __ z_larl(Z_R1, MVC_template); // Get addr of instr template. 1072 __ z_ex(len_reg, 0, Z_R0, Z_R1); // Execute MVC with variable length. 1073 } // penalty: 9 ticks 1074 1075 if (restoreArgs) { 1076 // ARG1, ARG2, and ARG3 were altered by code executed before, so restore them building on save_reg 1077 __ z_slgr(dst_reg, save_reg); // Copied #bytes without the "doMVCgeneral" chunk 1078 __ z_slgr(src_reg, dst_reg); // = ARG1 (now restored), was not advanced for "doMVCgeneral" chunk 1079 __ add2reg_with_index(dst_reg, 1, len_reg, dst_reg); // Len of executed MVC was not accounted for, yet. 1080 if (log2_size) { 1081 __ z_srag(Z_ARG3, dst_reg, log2_size); // Convert back to #elements to restore ARG3 1082 } else { 1083 __ z_lgr(Z_ARG3, dst_reg); 1084 } 1085 __ z_lgr(Z_ARG2, save_reg); // ARG2 now restored. 1086 } 1087 1088 if (usedMVC) { 1089 if (branchToEnd) { 1090 __ z_bru(done); 1091 } else { 1092 __ z_br(Z_R14); 1093 } 1094 } else { 1095 if (!branchToEnd) __ z_br(Z_R14); 1096 } 1097 BLOCK_COMMENT("} mode MVC general"); 1098 } 1099 // Fallthru possible if following block not generated. 1100 1101 // MVC: for short, unaligned arrays 1102 1103 // Somewhat expensive due to use of EX instruction, but simple. penalty: 9 ticks. 1104 // Differs from doMVCgeneral in reconstruction of ARG2, ARG3, and ARG4. 1105 if (usedMVC) { 1106 BLOCK_COMMENT("mode MVC {"); 1107 __ bind(doMVC); 1108 1109 // get #bytes-1 for EXECUTE 1110 if (log2_size) { 1111 __ add2reg(Z_R1, -1); // Length was scaled into Z_R1. 1112 } else { 1113 __ add2reg(Z_R1, -1, len_reg); // Length was not scaled. 1114 } 1115 1116 if (VM_Version::has_ExecuteExtensions()) { 1117 __ z_exrl(Z_R1, MVC_template); // Execute MVC with variable length. 1118 } else { 1119 __ z_lgr(Z_R0, Z_R5); // Save ARG4, may be unnecessary. 1120 __ z_larl(Z_R5, MVC_template); // Get addr of instr template. 1121 __ z_ex(Z_R1, 0, Z_R0, Z_R5); // Execute MVC with variable length. 1122 __ z_lgr(Z_R5, Z_R0); // Restore ARG4, may be unnecessary. 1123 } 1124 1125 if (!branchToEnd) { 1126 __ z_br(Z_R14); 1127 } 1128 BLOCK_COMMENT("} mode MVC"); 1129 } 1130 1131 __ bind(done); 1132 1133 switch (element_size) { 1134 case 1: BLOCK_COMMENT("} ARRAYCOPY DISJOINT byte "); break; 1135 case 2: BLOCK_COMMENT("} ARRAYCOPY DISJOINT short"); break; 1136 case 4: BLOCK_COMMENT("} ARRAYCOPY DISJOINT int "); break; 1137 case 8: BLOCK_COMMENT("} ARRAYCOPY DISJOINT long "); break; 1138 default: BLOCK_COMMENT("} ARRAYCOPY DISJOINT "); break; 1139 } 1140 } 1141 } 1142 1143 // Generate stub for conjoint array copy. If "aligned" is true, the 1144 // "from" and "to" addresses are assumed to be heapword aligned. 1145 // 1146 // Arguments for generated stub: 1147 // from: Z_ARG1 1148 // to: Z_ARG2 1149 // count: Z_ARG3 treated as signed 1150 void generate_conjoint_copy(bool aligned, int element_size, bool branchToEnd) { 1151 1152 // This is the zarch specific stub generator for general array copy tasks. 1153 // It has the following prereqs and features: 1154 // 1155 // - Destructive overlap exists and is handled by reverse copy. 1156 // - Destructive overlap exists if the leftmost byte of the target 1157 // does coincide with any of the source bytes (except the leftmost). 1158 // - Z_R0 and Z_R1 are KILLed by the stub routine (data and stride) 1159 // - Z_ARG1 and Z_ARG2 are USEd but preserved by the stub routine. 1160 // - Z_ARG3 is USED but preserved by the stub routine. 1161 // - Z_ARG4 is used as index register and is thus KILLed. 1162 // 1163 { 1164 Register stride_reg = Z_R1; // Stride & compare value in loop (negative element_size). 1165 Register data_reg = Z_R0; // Holds value of currently processed element. 1166 Register ix_reg = Z_ARG4; // Holds byte index of currently processed element. 1167 Register len_reg = Z_ARG3; // Holds length (in #elements) of arrays. 1168 Register dst_reg = Z_ARG2; // Holds left operand addr. 1169 Register src_reg = Z_ARG1; // Holds right operand addr. 1170 1171 assert(256%element_size == 0, "Element size must be power of 2."); 1172 assert(element_size <= 8, "Can't handle more than DW units."); 1173 1174 switch (element_size) { 1175 case 1: BLOCK_COMMENT("ARRAYCOPY CONJOINT byte {"); break; 1176 case 2: BLOCK_COMMENT("ARRAYCOPY CONJOINT short {"); break; 1177 case 4: BLOCK_COMMENT("ARRAYCOPY CONJOINT int {"); break; 1178 case 8: BLOCK_COMMENT("ARRAYCOPY CONJOINT long {"); break; 1179 default: BLOCK_COMMENT("ARRAYCOPY CONJOINT {"); break; 1180 } 1181 1182 assert_positive_int(len_reg); 1183 1184 if (VM_Version::has_Prefetch()) { 1185 __ z_pfd(0x01, 0, Z_R0, src_reg); // Fetch access. 1186 __ z_pfd(0x02, 0, Z_R0, dst_reg); // Store access. 1187 } 1188 1189 unsigned int log2_size = exact_log2(element_size); 1190 if (log2_size) { 1191 __ z_sllg(ix_reg, len_reg, log2_size); 1192 } else { 1193 __ z_lgr(ix_reg, len_reg); 1194 } 1195 1196 // Optimize reverse copy loop. 1197 // Main loop copies DW units which may be unaligned. Unaligned access adds some penalty ticks. 1198 // Unaligned DW access (neither fetch nor store) is DW-atomic, but should be alignment-atomic. 1199 // Preceding the main loop, some bytes are copied to obtain a DW-multiple remaining length. 1200 1201 Label countLoop1; 1202 Label copyLoop1; 1203 Label skipBY; 1204 Label skipHW; 1205 int stride = -8; 1206 1207 __ load_const_optimized(stride_reg, stride); // Prepare for DW copy loop. 1208 1209 if (element_size == 8) // Nothing to do here. 1210 __ z_bru(countLoop1); 1211 else { // Do not generate dead code. 1212 __ z_tmll(ix_reg, 7); // Check the "odd" bits. 1213 __ z_bre(countLoop1); // There are none, very good! 1214 } 1215 1216 if (log2_size == 0) { // Handle leftover Byte. 1217 __ z_tmll(ix_reg, 1); 1218 __ z_bre(skipBY); 1219 __ z_lb(data_reg, -1, ix_reg, src_reg); 1220 __ z_stcy(data_reg, -1, ix_reg, dst_reg); 1221 __ add2reg(ix_reg, -1); // Decrement delayed to avoid AGI. 1222 __ bind(skipBY); 1223 // fallthru 1224 } 1225 if (log2_size <= 1) { // Handle leftover HW. 1226 __ z_tmll(ix_reg, 2); 1227 __ z_bre(skipHW); 1228 __ z_lhy(data_reg, -2, ix_reg, src_reg); 1229 __ z_sthy(data_reg, -2, ix_reg, dst_reg); 1230 __ add2reg(ix_reg, -2); // Decrement delayed to avoid AGI. 1231 __ bind(skipHW); 1232 __ z_tmll(ix_reg, 4); 1233 __ z_bre(countLoop1); 1234 // fallthru 1235 } 1236 if (log2_size <= 2) { // There are just 4 bytes (left) that need to be copied. 1237 __ z_ly(data_reg, -4, ix_reg, src_reg); 1238 __ z_sty(data_reg, -4, ix_reg, dst_reg); 1239 __ add2reg(ix_reg, -4); // Decrement delayed to avoid AGI. 1240 __ z_bru(countLoop1); 1241 } 1242 1243 // Control can never get to here. Never! Never ever! 1244 __ z_illtrap(0x99); 1245 __ bind(copyLoop1); 1246 __ z_lg(data_reg, 0, ix_reg, src_reg); 1247 __ z_stg(data_reg, 0, ix_reg, dst_reg); 1248 __ bind(countLoop1); 1249 __ z_brxhg(ix_reg, stride_reg, copyLoop1); 1250 1251 if (!branchToEnd) 1252 __ z_br(Z_R14); 1253 1254 switch (element_size) { 1255 case 1: BLOCK_COMMENT("} ARRAYCOPY CONJOINT byte "); break; 1256 case 2: BLOCK_COMMENT("} ARRAYCOPY CONJOINT short"); break; 1257 case 4: BLOCK_COMMENT("} ARRAYCOPY CONJOINT int "); break; 1258 case 8: BLOCK_COMMENT("} ARRAYCOPY CONJOINT long "); break; 1259 default: BLOCK_COMMENT("} ARRAYCOPY CONJOINT "); break; 1260 } 1261 } 1262 } 1263 1264 // Generate stub for disjoint byte copy. If "aligned" is true, the 1265 // "from" and "to" addresses are assumed to be heapword aligned. 1266 address generate_disjoint_byte_copy(bool aligned, const char * name) { 1267 StubCodeMark mark(this, "StubRoutines", name); 1268 1269 // This is the zarch specific stub generator for byte array copy. 1270 // Refer to generate_disjoint_copy for a list of prereqs and features: 1271 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1272 generate_disjoint_copy(aligned, 1, false, false); 1273 return __ addr_at(start_off); 1274 } 1275 1276 1277 address generate_disjoint_short_copy(bool aligned, const char * name) { 1278 StubCodeMark mark(this, "StubRoutines", name); 1279 // This is the zarch specific stub generator for short array copy. 1280 // Refer to generate_disjoint_copy for a list of prereqs and features: 1281 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1282 generate_disjoint_copy(aligned, 2, false, false); 1283 return __ addr_at(start_off); 1284 } 1285 1286 1287 address generate_disjoint_int_copy(bool aligned, const char * name) { 1288 StubCodeMark mark(this, "StubRoutines", name); 1289 // This is the zarch specific stub generator for int array copy. 1290 // Refer to generate_disjoint_copy for a list of prereqs and features: 1291 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1292 generate_disjoint_copy(aligned, 4, false, false); 1293 return __ addr_at(start_off); 1294 } 1295 1296 1297 address generate_disjoint_long_copy(bool aligned, const char * name) { 1298 StubCodeMark mark(this, "StubRoutines", name); 1299 // This is the zarch specific stub generator for long array copy. 1300 // Refer to generate_disjoint_copy for a list of prereqs and features: 1301 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1302 generate_disjoint_copy(aligned, 8, false, false); 1303 return __ addr_at(start_off); 1304 } 1305 1306 1307 address generate_disjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) { 1308 StubCodeMark mark(this, "StubRoutines", name); 1309 // This is the zarch specific stub generator for oop array copy. 1310 // Refer to generate_disjoint_copy for a list of prereqs and features. 1311 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1312 unsigned int size = UseCompressedOops ? 4 : 8; 1313 1314 DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT; 1315 if (dest_uninitialized) { 1316 decorators |= IS_DEST_UNINITIALIZED; 1317 } 1318 if (aligned) { 1319 decorators |= ARRAYCOPY_ALIGNED; 1320 } 1321 1322 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 1323 bs->arraycopy_prologue(_masm, decorators, T_OBJECT, Z_ARG1, Z_ARG2, Z_ARG3); 1324 1325 generate_disjoint_copy(aligned, size, true, true); 1326 1327 bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, Z_ARG2, Z_ARG3, true); 1328 1329 return __ addr_at(start_off); 1330 } 1331 1332 1333 address generate_conjoint_byte_copy(bool aligned, const char * name) { 1334 StubCodeMark mark(this, "StubRoutines", name); 1335 // This is the zarch specific stub generator for overlapping byte array copy. 1336 // Refer to generate_conjoint_copy for a list of prereqs and features: 1337 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1338 address nooverlap_target = aligned ? StubRoutines::arrayof_jbyte_disjoint_arraycopy() 1339 : StubRoutines::jbyte_disjoint_arraycopy(); 1340 1341 array_overlap_test(nooverlap_target, 0); // Branch away to nooverlap_target if disjoint. 1342 generate_conjoint_copy(aligned, 1, false); 1343 1344 return __ addr_at(start_off); 1345 } 1346 1347 1348 address generate_conjoint_short_copy(bool aligned, const char * name) { 1349 StubCodeMark mark(this, "StubRoutines", name); 1350 // This is the zarch specific stub generator for overlapping short array copy. 1351 // Refer to generate_conjoint_copy for a list of prereqs and features: 1352 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1353 address nooverlap_target = aligned ? StubRoutines::arrayof_jshort_disjoint_arraycopy() 1354 : StubRoutines::jshort_disjoint_arraycopy(); 1355 1356 array_overlap_test(nooverlap_target, 1); // Branch away to nooverlap_target if disjoint. 1357 generate_conjoint_copy(aligned, 2, false); 1358 1359 return __ addr_at(start_off); 1360 } 1361 1362 address generate_conjoint_int_copy(bool aligned, const char * name) { 1363 StubCodeMark mark(this, "StubRoutines", name); 1364 // This is the zarch specific stub generator for overlapping int array copy. 1365 // Refer to generate_conjoint_copy for a list of prereqs and features: 1366 1367 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1368 address nooverlap_target = aligned ? StubRoutines::arrayof_jint_disjoint_arraycopy() 1369 : StubRoutines::jint_disjoint_arraycopy(); 1370 1371 array_overlap_test(nooverlap_target, 2); // Branch away to nooverlap_target if disjoint. 1372 generate_conjoint_copy(aligned, 4, false); 1373 1374 return __ addr_at(start_off); 1375 } 1376 1377 address generate_conjoint_long_copy(bool aligned, const char * name) { 1378 StubCodeMark mark(this, "StubRoutines", name); 1379 // This is the zarch specific stub generator for overlapping long array copy. 1380 // Refer to generate_conjoint_copy for a list of prereqs and features: 1381 1382 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1383 address nooverlap_target = aligned ? StubRoutines::arrayof_jlong_disjoint_arraycopy() 1384 : StubRoutines::jlong_disjoint_arraycopy(); 1385 1386 array_overlap_test(nooverlap_target, 3); // Branch away to nooverlap_target if disjoint. 1387 generate_conjoint_copy(aligned, 8, false); 1388 1389 return __ addr_at(start_off); 1390 } 1391 1392 address generate_conjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) { 1393 StubCodeMark mark(this, "StubRoutines", name); 1394 // This is the zarch specific stub generator for overlapping oop array copy. 1395 // Refer to generate_conjoint_copy for a list of prereqs and features. 1396 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1397 unsigned int size = UseCompressedOops ? 4 : 8; 1398 unsigned int shift = UseCompressedOops ? 2 : 3; 1399 1400 address nooverlap_target = aligned ? StubRoutines::arrayof_oop_disjoint_arraycopy(dest_uninitialized) 1401 : StubRoutines::oop_disjoint_arraycopy(dest_uninitialized); 1402 1403 // Branch to disjoint_copy (if applicable) before pre_barrier to avoid double pre_barrier. 1404 array_overlap_test(nooverlap_target, shift); // Branch away to nooverlap_target if disjoint. 1405 1406 DecoratorSet decorators = IN_HEAP | IS_ARRAY; 1407 if (dest_uninitialized) { 1408 decorators |= IS_DEST_UNINITIALIZED; 1409 } 1410 if (aligned) { 1411 decorators |= ARRAYCOPY_ALIGNED; 1412 } 1413 1414 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 1415 bs->arraycopy_prologue(_masm, decorators, T_OBJECT, Z_ARG1, Z_ARG2, Z_ARG3); 1416 1417 generate_conjoint_copy(aligned, size, true); // Must preserve ARG2, ARG3. 1418 1419 bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, Z_ARG2, Z_ARG3, true); 1420 1421 return __ addr_at(start_off); 1422 } 1423 1424 1425 void generate_arraycopy_stubs() { 1426 1427 // Note: the disjoint stubs must be generated first, some of 1428 // the conjoint stubs use them. 1429 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy (false, "jbyte_disjoint_arraycopy"); 1430 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy"); 1431 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy (false, "jint_disjoint_arraycopy"); 1432 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_copy (false, "jlong_disjoint_arraycopy"); 1433 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_oop_copy (false, "oop_disjoint_arraycopy", false); 1434 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy (false, "oop_disjoint_arraycopy_uninit", true); 1435 1436 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy (true, "arrayof_jbyte_disjoint_arraycopy"); 1437 StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, "arrayof_jshort_disjoint_arraycopy"); 1438 StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy (true, "arrayof_jint_disjoint_arraycopy"); 1439 StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_long_copy (true, "arrayof_jlong_disjoint_arraycopy"); 1440 StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_disjoint_oop_copy (true, "arrayof_oop_disjoint_arraycopy", false); 1441 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy (true, "arrayof_oop_disjoint_arraycopy_uninit", true); 1442 1443 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy (false, "jbyte_arraycopy"); 1444 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, "jshort_arraycopy"); 1445 StubRoutines::_jint_arraycopy = generate_conjoint_int_copy (false, "jint_arraycopy"); 1446 StubRoutines::_jlong_arraycopy = generate_conjoint_long_copy (false, "jlong_arraycopy"); 1447 StubRoutines::_oop_arraycopy = generate_conjoint_oop_copy (false, "oop_arraycopy", false); 1448 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_oop_copy (false, "oop_arraycopy_uninit", true); 1449 1450 StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_byte_copy (true, "arrayof_jbyte_arraycopy"); 1451 StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_short_copy(true, "arrayof_jshort_arraycopy"); 1452 StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_int_copy (true, "arrayof_jint_arraycopy"); 1453 StubRoutines::_arrayof_jlong_arraycopy = generate_conjoint_long_copy (true, "arrayof_jlong_arraycopy"); 1454 StubRoutines::_arrayof_oop_arraycopy = generate_conjoint_oop_copy (true, "arrayof_oop_arraycopy", false); 1455 StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy (true, "arrayof_oop_arraycopy_uninit", true); 1456 } 1457 1458 // Call interface for AES_encryptBlock, AES_decryptBlock stubs. 1459 // 1460 // Z_ARG1 - source data block. Ptr to leftmost byte to be processed. 1461 // Z_ARG2 - destination data block. Ptr to leftmost byte to be stored. 1462 // For in-place encryption/decryption, ARG1 and ARG2 can point 1463 // to the same piece of storage. 1464 // Z_ARG3 - Crypto key address (expanded key). The first n bits of 1465 // the expanded key constitute the original AES-<n> key (see below). 1466 // 1467 // Z_RET - return value. First unprocessed byte offset in src buffer. 1468 // 1469 // Some remarks: 1470 // The crypto key, as passed from the caller to these encryption stubs, 1471 // is a so-called expanded key. It is derived from the original key 1472 // by the Rijndael key schedule, see http://en.wikipedia.org/wiki/Rijndael_key_schedule 1473 // With the expanded key, the cipher/decipher task is decomposed in 1474 // multiple, less complex steps, called rounds. Sun SPARC and Intel 1475 // processors obviously implement support for those less complex steps. 1476 // z/Architecture provides instructions for full cipher/decipher complexity. 1477 // Therefore, we need the original, not the expanded key here. 1478 // Luckily, the first n bits of an AES-<n> expanded key are formed 1479 // by the original key itself. That takes us out of trouble. :-) 1480 // The key length (in bytes) relation is as follows: 1481 // original expanded rounds key bit keylen 1482 // key bytes key bytes length in words 1483 // 16 176 11 128 44 1484 // 24 208 13 192 52 1485 // 32 240 15 256 60 1486 // 1487 // The crypto instructions used in the AES* stubs have some specific register requirements. 1488 // Z_R0 holds the crypto function code. Please refer to the KM/KMC instruction 1489 // description in the "z/Architecture Principles of Operation" manual for details. 1490 // Z_R1 holds the parameter block address. The parameter block contains the cryptographic key 1491 // (KM instruction) and the chaining value (KMC instruction). 1492 // dst must designate an even-numbered register, holding the address of the output message. 1493 // src must designate an even/odd register pair, holding the address/length of the original message 1494 1495 // Helper function which generates code to 1496 // - load the function code in register fCode (== Z_R0). 1497 // - load the data block length (depends on cipher function) into register srclen if requested. 1498 // - is_decipher switches between cipher/decipher function codes 1499 // - set_len requests (if true) loading the data block length in register srclen 1500 void generate_load_AES_fCode(Register keylen, Register fCode, Register srclen, bool is_decipher) { 1501 1502 BLOCK_COMMENT("Set fCode {"); { 1503 Label fCode_set; 1504 int mode = is_decipher ? VM_Version::CipherMode::decipher : VM_Version::CipherMode::cipher; 1505 bool identical_dataBlk_len = (VM_Version::Cipher::_AES128_dataBlk == VM_Version::Cipher::_AES192_dataBlk) 1506 && (VM_Version::Cipher::_AES128_dataBlk == VM_Version::Cipher::_AES256_dataBlk); 1507 // Expanded key length is 44/52/60 * 4 bytes for AES-128/AES-192/AES-256. 1508 __ z_cghi(keylen, 52); // Check only once at the beginning. keylen and fCode may share the same register. 1509 1510 __ z_lghi(fCode, VM_Version::Cipher::_AES128 + mode); 1511 if (!identical_dataBlk_len) { 1512 __ z_lghi(srclen, VM_Version::Cipher::_AES128_dataBlk); 1513 } 1514 __ z_brl(fCode_set); // keyLen < 52: AES128 1515 1516 __ z_lghi(fCode, VM_Version::Cipher::_AES192 + mode); 1517 if (!identical_dataBlk_len) { 1518 __ z_lghi(srclen, VM_Version::Cipher::_AES192_dataBlk); 1519 } 1520 __ z_bre(fCode_set); // keyLen == 52: AES192 1521 1522 __ z_lghi(fCode, VM_Version::Cipher::_AES256 + mode); 1523 if (!identical_dataBlk_len) { 1524 __ z_lghi(srclen, VM_Version::Cipher::_AES256_dataBlk); 1525 } 1526 // __ z_brh(fCode_set); // keyLen < 52: AES128 // fallthru 1527 1528 __ bind(fCode_set); 1529 if (identical_dataBlk_len) { 1530 __ z_lghi(srclen, VM_Version::Cipher::_AES128_dataBlk); 1531 } 1532 } 1533 BLOCK_COMMENT("} Set fCode"); 1534 } 1535 1536 // Push a parameter block for the cipher/decipher instruction on the stack. 1537 // Layout of the additional stack space allocated for AES_cipherBlockChaining: 1538 // 1539 // | | 1540 // +--------+ <-- SP before expansion 1541 // | | 1542 // : : alignment loss (part 2), 0..(AES_parmBlk_align-1) bytes 1543 // | | 1544 // +--------+ 1545 // | | 1546 // : : space for parameter block, size VM_Version::Cipher::_AES*_parmBlk_C 1547 // | | 1548 // +--------+ <-- parmBlk, octoword-aligned, start of parameter block 1549 // | | 1550 // : : additional stack space for spills etc., size AES_parmBlk_addspace, DW @ Z_SP not usable!!! 1551 // | | 1552 // +--------+ <-- Z_SP + alignment loss, octoword-aligned 1553 // | | 1554 // : : alignment loss (part 1), 0..(AES_parmBlk_align-1) bytes. DW @ Z_SP not usable!!! 1555 // | | 1556 // +--------+ <-- Z_SP after expansion 1557 1558 void generate_push_Block(int dataBlk_len, int parmBlk_len, int crypto_fCode, 1559 Register parmBlk, Register keylen, Register fCode, Register cv, Register key) { 1560 1561 AES_parmBlk_addspace = AES_parmBlk_align; // Must be multiple of AES_parmblk_align. 1562 // spill space for regs etc., don't use DW @SP! 1563 const int cv_len = dataBlk_len; 1564 const int key_len = parmBlk_len - cv_len; 1565 // This len must be known at JIT compile time. Only then are we able to recalc the SP before resize. 1566 // We buy this knowledge by wasting some (up to AES_parmBlk_align) bytes of stack space. 1567 const int resize_len = cv_len + key_len + AES_parmBlk_align + AES_parmBlk_addspace; 1568 1569 // Use parmBlk as temp reg here to hold the frame pointer. 1570 __ resize_frame(-resize_len, parmBlk, true); 1571 1572 // calculate parmBlk address from updated (resized) SP. 1573 __ add2reg(parmBlk, resize_len - (cv_len + key_len), Z_SP); 1574 __ z_nill(parmBlk, (~(AES_parmBlk_align-1)) & 0xffff); // Align parameter block. 1575 1576 // There is room for stuff in the range [parmBlk-AES_parmBlk_addspace+8, parmBlk). 1577 __ z_stg(keylen, -8, parmBlk); // Spill keylen for later use. 1578 1579 // calculate (SP before resize) from updated SP. 1580 __ add2reg(keylen, resize_len, Z_SP); // keylen holds prev SP for now. 1581 __ z_stg(keylen, -16, parmBlk); // Spill prev SP for easy revert. 1582 1583 __ z_mvc(0, cv_len-1, parmBlk, 0, cv); // Copy cv. 1584 __ z_mvc(cv_len, key_len-1, parmBlk, 0, key); // Copy key. 1585 __ z_lghi(fCode, crypto_fCode); 1586 } 1587 1588 // NOTE: 1589 // Before returning, the stub has to copy the chaining value from 1590 // the parmBlk, where it was updated by the crypto instruction, back 1591 // to the chaining value array the address of which was passed in the cv argument. 1592 // As all the available registers are used and modified by KMC, we need to save 1593 // the key length across the KMC instruction. We do so by spilling it to the stack, 1594 // just preceding the parmBlk (at (parmBlk - 8)). 1595 void generate_push_parmBlk(Register keylen, Register fCode, Register parmBlk, Register key, Register cv, bool is_decipher) { 1596 int mode = is_decipher ? VM_Version::CipherMode::decipher : VM_Version::CipherMode::cipher; 1597 Label parmBlk_128, parmBlk_192, parmBlk_256, parmBlk_set; 1598 1599 BLOCK_COMMENT("push parmBlk {"); 1600 // We have just three cipher strengths which translates into three 1601 // possible extended key lengths: 44, 52, and 60 bytes. 1602 // We therefore can compare the actual length against the "middle" length 1603 // and get: lt -> len=44, eq -> len=52, gt -> len=60. 1604 __ z_cghi(keylen, 52); 1605 if (VM_Version::has_Crypto_AES128()) { __ z_brl(parmBlk_128); } // keyLen < 52: AES128 1606 if (VM_Version::has_Crypto_AES192()) { __ z_bre(parmBlk_192); } // keyLen == 52: AES192 1607 if (VM_Version::has_Crypto_AES256()) { __ z_brh(parmBlk_256); } // keyLen > 52: AES256 1608 1609 // Security net: requested AES function not available on this CPU. 1610 // NOTE: 1611 // As of now (March 2015), this safety net is not required. JCE policy files limit the 1612 // cryptographic strength of the keys used to 128 bit. If we have AES hardware support 1613 // at all, we have at least AES-128. 1614 __ stop_static("AES key strength not supported by CPU. Use -XX:-UseAES as remedy.", 0); 1615 1616 if (VM_Version::has_Crypto_AES256()) { 1617 __ bind(parmBlk_256); 1618 generate_push_Block(VM_Version::Cipher::_AES256_dataBlk, 1619 VM_Version::Cipher::_AES256_parmBlk_C, 1620 VM_Version::Cipher::_AES256 + mode, 1621 parmBlk, keylen, fCode, cv, key); 1622 if (VM_Version::has_Crypto_AES128() || VM_Version::has_Crypto_AES192()) { 1623 __ z_bru(parmBlk_set); // Fallthru otherwise. 1624 } 1625 } 1626 1627 if (VM_Version::has_Crypto_AES192()) { 1628 __ bind(parmBlk_192); 1629 generate_push_Block(VM_Version::Cipher::_AES192_dataBlk, 1630 VM_Version::Cipher::_AES192_parmBlk_C, 1631 VM_Version::Cipher::_AES192 + mode, 1632 parmBlk, keylen, fCode, cv, key); 1633 if (VM_Version::has_Crypto_AES128()) { 1634 __ z_bru(parmBlk_set); // Fallthru otherwise. 1635 } 1636 } 1637 1638 if (VM_Version::has_Crypto_AES128()) { 1639 __ bind(parmBlk_128); 1640 generate_push_Block(VM_Version::Cipher::_AES128_dataBlk, 1641 VM_Version::Cipher::_AES128_parmBlk_C, 1642 VM_Version::Cipher::_AES128 + mode, 1643 parmBlk, keylen, fCode, cv, key); 1644 // Fallthru 1645 } 1646 1647 __ bind(parmBlk_set); 1648 BLOCK_COMMENT("} push parmBlk"); 1649 } 1650 1651 // Pop a parameter block from the stack. The chaining value portion of the parameter block 1652 // is copied back to the cv array as it is needed for subsequent cipher steps. 1653 // The keylen value as well as the original SP (before resizing) was pushed to the stack 1654 // when pushing the parameter block. 1655 void generate_pop_parmBlk(Register keylen, Register parmBlk, Register key, Register cv) { 1656 1657 BLOCK_COMMENT("pop parmBlk {"); 1658 bool identical_dataBlk_len = (VM_Version::Cipher::_AES128_dataBlk == VM_Version::Cipher::_AES192_dataBlk) && 1659 (VM_Version::Cipher::_AES128_dataBlk == VM_Version::Cipher::_AES256_dataBlk); 1660 if (identical_dataBlk_len) { 1661 int cv_len = VM_Version::Cipher::_AES128_dataBlk; 1662 __ z_mvc(0, cv_len-1, cv, 0, parmBlk); // Copy cv. 1663 } else { 1664 int cv_len; 1665 Label parmBlk_128, parmBlk_192, parmBlk_256, parmBlk_set; 1666 __ z_lg(keylen, -8, parmBlk); // restore keylen 1667 __ z_cghi(keylen, 52); 1668 if (VM_Version::has_Crypto_AES256()) __ z_brh(parmBlk_256); // keyLen > 52: AES256 1669 if (VM_Version::has_Crypto_AES192()) __ z_bre(parmBlk_192); // keyLen == 52: AES192 1670 // if (VM_Version::has_Crypto_AES128()) __ z_brl(parmBlk_128); // keyLen < 52: AES128 // fallthru 1671 1672 // Security net: there is no one here. If we would need it, we should have 1673 // fallen into it already when pushing the parameter block. 1674 if (VM_Version::has_Crypto_AES128()) { 1675 __ bind(parmBlk_128); 1676 cv_len = VM_Version::Cipher::_AES128_dataBlk; 1677 __ z_mvc(0, cv_len-1, cv, 0, parmBlk); // Copy cv. 1678 if (VM_Version::has_Crypto_AES192() || VM_Version::has_Crypto_AES256()) { 1679 __ z_bru(parmBlk_set); 1680 } 1681 } 1682 1683 if (VM_Version::has_Crypto_AES192()) { 1684 __ bind(parmBlk_192); 1685 cv_len = VM_Version::Cipher::_AES192_dataBlk; 1686 __ z_mvc(0, cv_len-1, cv, 0, parmBlk); // Copy cv. 1687 if (VM_Version::has_Crypto_AES256()) { 1688 __ z_bru(parmBlk_set); 1689 } 1690 } 1691 1692 if (VM_Version::has_Crypto_AES256()) { 1693 __ bind(parmBlk_256); 1694 cv_len = VM_Version::Cipher::_AES256_dataBlk; 1695 __ z_mvc(0, cv_len-1, cv, 0, parmBlk); // Copy cv. 1696 // __ z_bru(parmBlk_set); // fallthru 1697 } 1698 __ bind(parmBlk_set); 1699 } 1700 __ z_lg(Z_SP, -16, parmBlk); // Revert resize_frame_absolute. Z_SP saved by push_parmBlk. 1701 BLOCK_COMMENT("} pop parmBlk"); 1702 } 1703 1704 // Compute AES encrypt/decrypt function. 1705 void generate_AES_cipherBlock(bool is_decipher) { 1706 // Incoming arguments. 1707 Register from = Z_ARG1; // source byte array 1708 Register to = Z_ARG2; // destination byte array 1709 Register key = Z_ARG3; // expanded key array 1710 1711 const Register keylen = Z_R0; // Temporarily (until fCode is set) holds the expanded key array length. 1712 1713 // Register definitions as required by KM instruction. 1714 const Register fCode = Z_R0; // crypto function code 1715 const Register parmBlk = Z_R1; // parameter block address (points to crypto key) 1716 const Register src = Z_ARG1; // Must be even reg (KM requirement). 1717 const Register srclen = Z_ARG2; // Must be odd reg and pair with src. Overwrites destination address. 1718 const Register dst = Z_ARG3; // Must be even reg (KM requirement). Overwrites expanded key address. 1719 1720 // Read key len of expanded key (in 4-byte words). 1721 __ z_lgf(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 1722 1723 // Copy arguments to registers as required by crypto instruction. 1724 __ z_lgr(parmBlk, key); // crypto key (in T_INT array). 1725 __ lgr_if_needed(src, from); // Copy src address. Will not emit, src/from are identical. 1726 __ z_lgr(dst, to); // Copy dst address, even register required. 1727 1728 // Construct function code into fCode(Z_R0), data block length into srclen(Z_ARG2). 1729 generate_load_AES_fCode(keylen, fCode, srclen, is_decipher); 1730 1731 __ km(dst, src); // Cipher the message. 1732 1733 __ z_br(Z_R14); 1734 } 1735 1736 // Compute AES encrypt function. 1737 address generate_AES_encryptBlock(const char* name) { 1738 __ align(CodeEntryAlignment); 1739 StubCodeMark mark(this, "StubRoutines", name); 1740 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1741 1742 generate_AES_cipherBlock(false); 1743 1744 return __ addr_at(start_off); 1745 } 1746 1747 // Compute AES decrypt function. 1748 address generate_AES_decryptBlock(const char* name) { 1749 __ align(CodeEntryAlignment); 1750 StubCodeMark mark(this, "StubRoutines", name); 1751 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1752 1753 generate_AES_cipherBlock(true); 1754 1755 return __ addr_at(start_off); 1756 } 1757 1758 // These stubs receive the addresses of the cryptographic key and of the chaining value as two separate 1759 // arguments (registers "key" and "cv", respectively). The KMC instruction, on the other hand, requires 1760 // chaining value and key to be, in this sequence, adjacent in storage. Thus, we need to allocate some 1761 // thread-local working storage. Using heap memory incurs all the hassles of allocating/freeing. 1762 // Stack space, on the contrary, is deallocated automatically when we return from the stub to the caller. 1763 // *** WARNING *** 1764 // Please note that we do not formally allocate stack space, nor do we 1765 // update the stack pointer. Therefore, no function calls are allowed 1766 // and nobody else must use the stack range where the parameter block 1767 // is located. 1768 // We align the parameter block to the next available octoword. 1769 // 1770 // Compute chained AES encrypt function. 1771 void generate_AES_cipherBlockChaining(bool is_decipher) { 1772 1773 Register from = Z_ARG1; // source byte array (clear text) 1774 Register to = Z_ARG2; // destination byte array (ciphered) 1775 Register key = Z_ARG3; // expanded key array. 1776 Register cv = Z_ARG4; // chaining value 1777 const Register msglen = Z_ARG5; // Total length of the msg to be encrypted. Value must be returned 1778 // in Z_RET upon completion of this stub. Is 32-bit integer. 1779 1780 const Register keylen = Z_R0; // Expanded key length, as read from key array. Temp only. 1781 const Register fCode = Z_R0; // crypto function code 1782 const Register parmBlk = Z_R1; // parameter block address (points to crypto key) 1783 const Register src = Z_ARG1; // is Z_R2 1784 const Register srclen = Z_ARG2; // Overwrites destination address. 1785 const Register dst = Z_ARG3; // Overwrites key address. 1786 1787 // Read key len of expanded key (in 4-byte words). 1788 __ z_lgf(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 1789 1790 // Construct parm block address in parmBlk (== Z_R1), copy cv and key to parm block. 1791 // Construct function code in fCode (Z_R0). 1792 generate_push_parmBlk(keylen, fCode, parmBlk, key, cv, is_decipher); 1793 1794 // Prepare other registers for instruction. 1795 __ lgr_if_needed(src, from); // Copy src address. Will not emit, src/from are identical. 1796 __ z_lgr(dst, to); 1797 __ z_llgfr(srclen, msglen); // We pass the offsets as ints, not as longs as required. 1798 1799 __ kmc(dst, src); // Cipher the message. 1800 1801 generate_pop_parmBlk(keylen, parmBlk, key, cv); 1802 1803 __ z_llgfr(Z_RET, msglen); // We pass the offsets as ints, not as longs as required. 1804 __ z_br(Z_R14); 1805 } 1806 1807 // Compute chained AES encrypt function. 1808 address generate_cipherBlockChaining_AES_encrypt(const char* name) { 1809 __ align(CodeEntryAlignment); 1810 StubCodeMark mark(this, "StubRoutines", name); 1811 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1812 1813 generate_AES_cipherBlockChaining(false); 1814 1815 return __ addr_at(start_off); 1816 } 1817 1818 // Compute chained AES decrypt function. 1819 address generate_cipherBlockChaining_AES_decrypt(const char* name) { 1820 __ align(CodeEntryAlignment); 1821 StubCodeMark mark(this, "StubRoutines", name); 1822 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1823 1824 generate_AES_cipherBlockChaining(true); 1825 1826 return __ addr_at(start_off); 1827 } 1828 1829 1830 // ***************************************************************************** 1831 1832 // AES CounterMode 1833 // Push a parameter block for the cipher/decipher instruction on the stack. 1834 // Layout of the additional stack space allocated for counterMode_AES_cipherBlock 1835 // 1836 // | | 1837 // +--------+ <-- SP before expansion 1838 // | | 1839 // : : alignment loss (part 2), 0..(AES_parmBlk_align-1) bytes. 1840 // | | 1841 // +--------+ <-- gap = parmBlk + parmBlk_len + ctrArea_len 1842 // | | 1843 // : : byte[] ctr - kmctr expects a counter vector the size of the input vector. 1844 // : : The interface only provides byte[16] iv, the init vector. 1845 // : : The size of this area is a tradeoff between stack space, init effort, and speed. 1846 // | | Each counter is a 128bit int. Vector element [0] is a copy of iv. 1847 // | | Vector element [i] is formed by incrementing element [i-1]. 1848 // +--------+ <-- ctr = parmBlk + parmBlk_len 1849 // | | 1850 // : : space for parameter block, size VM_Version::Cipher::_AES*_parmBlk_G 1851 // | | 1852 // +--------+ <-- parmBlk = Z_SP + (alignment loss (part 1+2)) + AES_dataBlk_space + AES_parmBlk_addSpace, octoword-aligned, start of parameter block 1853 // | | 1854 // : : additional stack space for spills etc., min. size AES_parmBlk_addspace, all bytes usable. 1855 // | | 1856 // +--------+ <-- Z_SP + alignment loss (part 1+2) + AES_dataBlk_space, octoword-aligned 1857 // | | 1858 // : : space for one source data block and one dest data block. 1859 // | | 1860 // +--------+ <-- Z_SP + alignment loss (part 1+2), octoword-aligned 1861 // | | 1862 // : : additional alignment loss. Blocks above can't tolerate unusable DW @SP. 1863 // | | 1864 // +--------+ <-- Z_SP + alignment loss (part 1), octoword-aligned 1865 // | | 1866 // : : alignment loss (part 1), 0..(AES_parmBlk_align-1) bytes. DW @ Z_SP holds frame ptr. 1867 // | | 1868 // +--------+ <-- Z_SP after expansion 1869 // 1870 // additional space allocation (per DW): 1871 // spillSpace = parmBlk - AES_parmBlk_addspace 1872 // dataBlocks = spillSpace - AES_dataBlk_space 1873 // 1874 // parmBlk-8 various fields of various lengths 1875 // parmBlk-1: key_len (only one byte is stored at parmBlk-1) 1876 // parmBlk-2: fCode (only one byte is stored at parmBlk-2) 1877 // parmBlk-4: ctrVal_len (as retrieved from iv array), in bytes, as HW 1878 // parmBlk-8: msglen length (in bytes) of crypto msg, as passed in by caller 1879 // return value is calculated from this: rv = msglen - processed. 1880 // parmBlk-16 old_SP (SP before resize) 1881 // parmBlk-24 temp values 1882 // up to and including main loop in generate_counterMode_AES 1883 // - parmBlk-20: remmsg_len remaining msg len (aka unprocessed msg bytes) 1884 // after main loop in generate_counterMode_AES 1885 // - parmBlk-24: spill slot for various address values 1886 // 1887 // parmBlk-40 free spill slot, used for local spills. 1888 // parmBlk-64 ARG2(dst) ptr spill slot 1889 // parmBlk-56 ARG3(crypto key) ptr spill slot 1890 // parmBlk-48 ARG4(icv value) ptr spill slot 1891 // 1892 // parmBlk-72 1893 // parmBlk-80 1894 // parmBlk-88 counter vector current position 1895 // parmBlk-96 reduced msg len (after preLoop processing) 1896 // 1897 // parmBlk-104 Z_R13 spill slot (preLoop only) 1898 // parmBlk-112 Z_R12 spill slot (preLoop only) 1899 // parmBlk-120 Z_R11 spill slot (preLoop only) 1900 // parmBlk-128 Z_R10 spill slot (preLoop only) 1901 // 1902 // 1903 // Layout of the parameter block (instruction KMCTR, function KMCTR-AES* 1904 // 1905 // +--------+ key_len: +16 (AES-128), +24 (AES-192), +32 (AES-256) 1906 // | | 1907 // | | cryptographic key 1908 // | | 1909 // +--------+ <-- parmBlk 1910 // 1911 // On exit: 1912 // Z_SP points to resized frame 1913 // Z_SP before resize available from -16(parmBlk) 1914 // parmBlk points to crypto instruction parameter block 1915 // parameter block is filled with crypto key. 1916 // msglen unchanged, saved for later at -24(parmBlk) 1917 // fCode contains function code for instruction 1918 // key unchanged 1919 // 1920 void generate_counterMode_prepare_Stack(Register parmBlk, Register ctr, Register counter, Register scratch) { 1921 1922 BLOCK_COMMENT("prepare stack counterMode_AESCrypt {"); 1923 1924 // save argument registers. 1925 // ARG1(from) is Z_RET as well. Not saved or restored. 1926 // ARG5(msglen) is restored by other means. 1927 __ z_stmg(Z_ARG2, Z_ARG4, argsave_offset, parmBlk); 1928 1929 assert(AES_ctrVec_len > 0, "sanity. We need a counter vector"); 1930 __ add2reg(counter, AES_parmBlk_align, parmBlk); // counter array is located behind crypto key. Available range is disp12 only. 1931 __ z_mvc(0, AES_ctrVal_len-1, counter, 0, ctr); // move first copy of iv 1932 for (int j = 1; j < AES_ctrVec_len; j+=j) { // j (and amount of moved data) doubles with every iteration 1933 int offset = j * AES_ctrVal_len; 1934 if (offset <= 256) { 1935 __ z_mvc(offset, offset-1, counter, 0, counter); // move iv 1936 } else { 1937 for (int k = 0; k < offset; k += 256) { 1938 __ z_mvc(offset+k, 255, counter, 0, counter); 1939 } 1940 } 1941 } 1942 1943 Label noCarry, done; 1944 __ z_lg(scratch, Address(ctr, 8)); // get low-order DW of initial counter. 1945 __ z_algfi(scratch, AES_ctrVec_len); // check if we will overflow during init. 1946 __ z_brc(Assembler::bcondLogNoCarry, noCarry); // No, 64-bit increment is sufficient. 1947 1948 for (int j = 1; j < AES_ctrVec_len; j++) { // start with j = 1; no need to add 0 to the first counter value. 1949 int offset = j * AES_ctrVal_len; 1950 generate_increment128(counter, offset, j, scratch); // increment iv by index value 1951 } 1952 __ z_bru(done); 1953 1954 __ bind(noCarry); 1955 for (int j = 1; j < AES_ctrVec_len; j++) { // start with j = 1; no need to add 0 to the first counter value. 1956 int offset = j * AES_ctrVal_len; 1957 generate_increment64(counter, offset, j); // increment iv by index value 1958 } 1959 1960 __ bind(done); 1961 1962 BLOCK_COMMENT("} prepare stack counterMode_AESCrypt"); 1963 } 1964 1965 1966 void generate_counterMode_increment_ctrVector(Register parmBlk, Register counter, Register scratch, bool v0_only) { 1967 1968 BLOCK_COMMENT("increment ctrVector counterMode_AESCrypt {"); 1969 1970 __ add2reg(counter, AES_parmBlk_align, parmBlk); // ptr to counter array needs to be restored 1971 1972 if (v0_only) { 1973 int offset = 0; 1974 generate_increment128(counter, offset, AES_ctrVec_len, scratch); // increment iv by # vector elements 1975 } else { 1976 int j = 0; 1977 if (VM_Version::has_VectorFacility()) { 1978 bool first_call = true; 1979 for (; j < (AES_ctrVec_len - 3); j+=4) { // increment blocks of 4 iv elements 1980 int offset = j * AES_ctrVal_len; 1981 generate_increment128x4(counter, offset, AES_ctrVec_len, first_call); 1982 first_call = false; 1983 } 1984 } 1985 for (; j < AES_ctrVec_len; j++) { 1986 int offset = j * AES_ctrVal_len; 1987 generate_increment128(counter, offset, AES_ctrVec_len, scratch); // increment iv by # vector elements 1988 } 1989 } 1990 1991 BLOCK_COMMENT("} increment ctrVector counterMode_AESCrypt"); 1992 } 1993 1994 // IBM s390 (IBM z/Architecture, to be more exact) uses Big-Endian number representation. 1995 // Therefore, the bits are ordered from most significant to least significant. The address 1996 // of a number in memory points to its lowest location where the most significant bit is stored. 1997 void generate_increment64(Register counter, int offset, int increment) { 1998 __ z_algsi(offset + 8, counter, increment); // increment, no overflow check 1999 } 2000 2001 void generate_increment128(Register counter, int offset, int increment, Register scratch) { 2002 __ clear_reg(scratch); // prepare to add carry to high-order DW 2003 __ z_algsi(offset + 8, counter, increment); // increment low order DW 2004 __ z_alcg(scratch, Address(counter, offset)); // add carry to high-order DW 2005 __ z_stg(scratch, Address(counter, offset)); // store back 2006 } 2007 2008 void generate_increment128(Register counter, int offset, Register increment, Register scratch) { 2009 __ clear_reg(scratch); // prepare to add carry to high-order DW 2010 __ z_alg(increment, Address(counter, offset + 8)); // increment low order DW 2011 __ z_stg(increment, Address(counter, offset + 8)); // store back 2012 __ z_alcg(scratch, Address(counter, offset)); // add carry to high-order DW 2013 __ z_stg(scratch, Address(counter, offset)); // store back 2014 } 2015 2016 // This is the vector variant of increment128, incrementing 4 ctr vector elements per call. 2017 void generate_increment128x4(Register counter, int offset, int increment, bool init) { 2018 VectorRegister Vincr = Z_V16; 2019 VectorRegister Vctr0 = Z_V20; 2020 VectorRegister Vctr1 = Z_V21; 2021 VectorRegister Vctr2 = Z_V22; 2022 VectorRegister Vctr3 = Z_V23; 2023 2024 // Initialize the increment value only once for a series of increments. 2025 // It must be assured that the non-initializing generator calls are 2026 // immediately subsequent. Otherwise, there is no guarantee for Vincr to be unchanged. 2027 if (init) { 2028 __ z_vzero(Vincr); // preset VReg with constant increment 2029 __ z_vleih(Vincr, increment, 7); // rightmost HW has ix = 7 2030 } 2031 2032 __ z_vlm(Vctr0, Vctr3, offset, counter); // get the counter values 2033 __ z_vaq(Vctr0, Vctr0, Vincr); // increment them 2034 __ z_vaq(Vctr1, Vctr1, Vincr); 2035 __ z_vaq(Vctr2, Vctr2, Vincr); 2036 __ z_vaq(Vctr3, Vctr3, Vincr); 2037 __ z_vstm(Vctr0, Vctr3, offset, counter); // store the counter values 2038 } 2039 2040 unsigned int generate_counterMode_push_Block(int dataBlk_len, int parmBlk_len, int crypto_fCode, 2041 Register parmBlk, Register msglen, Register fCode, Register key) { 2042 2043 // space for data blocks (src and dst, one each) for partial block processing) 2044 AES_parmBlk_addspace = AES_stackSpace_incr // spill space (temp data) 2045 + AES_stackSpace_incr // for argument save/restore 2046 + AES_stackSpace_incr*2 // for work reg save/restore 2047 ; 2048 AES_dataBlk_space = roundup(2*dataBlk_len, AES_parmBlk_align); 2049 AES_dataBlk_offset = -(AES_parmBlk_addspace+AES_dataBlk_space); 2050 const int key_len = parmBlk_len; // The length of the unextended key (16, 24, 32) 2051 2052 assert((AES_ctrVal_len == 0) || (AES_ctrVal_len == dataBlk_len), "varying dataBlk_len is not supported."); 2053 AES_ctrVal_len = dataBlk_len; // ctr init value len (in bytes) 2054 AES_ctrArea_len = AES_ctrVec_len * AES_ctrVal_len; // space required on stack for ctr vector 2055 2056 // This len must be known at JIT compile time. Only then are we able to recalc the SP before resize. 2057 // We buy this knowledge by wasting some (up to AES_parmBlk_align) bytes of stack space. 2058 const int resize_len = AES_parmBlk_align // room for alignment of parmBlk 2059 + AES_parmBlk_align // extra room for alignment 2060 + AES_dataBlk_space // one src and one dst data blk 2061 + AES_parmBlk_addspace // spill space for local data 2062 + roundup(parmBlk_len, AES_parmBlk_align) // aligned length of parmBlk 2063 + AES_ctrArea_len // stack space for ctr vector 2064 ; 2065 Register scratch = fCode; // We can use fCode as a scratch register. It's contents on entry 2066 // is irrelevant and it is set at the very end of this code block. 2067 2068 assert(key_len < 256, "excessive crypto key len: %d, limit: 256", key_len); 2069 2070 BLOCK_COMMENT(err_msg("push_Block (%d bytes) counterMode_AESCrypt%d {", resize_len, parmBlk_len*8)); 2071 2072 // After the frame is resized, the parmBlk is positioned such 2073 // that it is octoword-aligned. This potentially creates some 2074 // alignment waste in addspace and/or in the gap area. 2075 // After resize_frame, scratch contains the frame pointer. 2076 __ resize_frame(-resize_len, scratch, true); 2077 #ifdef ASSERT 2078 __ clear_mem(Address(Z_SP, (intptr_t)8), resize_len - 8); 2079 #endif 2080 2081 // calculate aligned parmBlk address from updated (resized) SP. 2082 __ add2reg(parmBlk, AES_parmBlk_addspace + AES_dataBlk_space + (2*AES_parmBlk_align-1), Z_SP); 2083 __ z_nill(parmBlk, (~(AES_parmBlk_align-1)) & 0xffff); // Align parameter block. 2084 2085 // There is room to spill stuff in the range [parmBlk-AES_parmBlk_addspace+8, parmBlk). 2086 __ z_mviy(keylen_offset, parmBlk, key_len - 1); // Spill crypto key length for later use. Decrement by one for direct use with xc template. 2087 __ z_mviy(fCode_offset, parmBlk, crypto_fCode); // Crypto function code, will be loaded into Z_R0 later. 2088 __ z_sty(msglen, msglen_offset, parmBlk); // full plaintext/ciphertext len. 2089 __ z_sty(msglen, msglen_red_offset, parmBlk); // save for main loop, may get updated in preLoop. 2090 __ z_sra(msglen, exact_log2(dataBlk_len)); // # full cipher blocks that can be formed from input text. 2091 __ z_sty(msglen, rem_msgblk_offset, parmBlk); 2092 2093 __ add2reg(scratch, resize_len, Z_SP); // calculate (SP before resize) from resized SP. 2094 __ z_stg(scratch, unextSP_offset, parmBlk); // Spill unextended SP for easy revert. 2095 __ z_stmg(Z_R10, Z_R13, regsave_offset, parmBlk); // make some regs available as work registers 2096 2097 // Fill parmBlk with all required data 2098 __ z_mvc(0, key_len-1, parmBlk, 0, key); // Copy key. Need to do it here - key_len is only known here. 2099 BLOCK_COMMENT(err_msg("} push_Block (%d bytes) counterMode_AESCrypt%d", resize_len, parmBlk_len*8)); 2100 return resize_len; 2101 } 2102 2103 2104 void generate_counterMode_pop_Block(Register parmBlk, Register msglen, Label& eraser) { 2105 // For added safety, clear the stack area where the crypto key was stored. 2106 Register scratch = msglen; 2107 assert_different_registers(scratch, Z_R0); // can't use Z_R0 for exrl. 2108 2109 // wipe out key on stack 2110 __ z_llgc(scratch, keylen_offset, parmBlk); // get saved (key_len-1) value (we saved just one byte!) 2111 __ z_exrl(scratch, eraser); // template relies on parmBlk still pointing to key on stack 2112 2113 // restore argument registers. 2114 // ARG1(from) is Z_RET as well. Not restored - will hold return value anyway. 2115 // ARG5(msglen) is restored further down. 2116 __ z_lmg(Z_ARG2, Z_ARG4, argsave_offset, parmBlk); 2117 2118 // restore work registers 2119 __ z_lmg(Z_R10, Z_R13, regsave_offset, parmBlk); // make some regs available as work registers 2120 2121 __ z_lgf(msglen, msglen_offset, parmBlk); // Restore msglen, only low order FW is valid 2122 #ifdef ASSERT 2123 { 2124 Label skip2last, skip2done; 2125 // Z_RET (aka Z_R2) can be used as scratch as well. It will be set from msglen before return. 2126 __ z_lgr(Z_RET, Z_SP); // save extended SP 2127 __ z_lg(Z_SP, unextSP_offset, parmBlk); // trim stack back to unextended size 2128 __ z_sgrk(Z_R1, Z_SP, Z_RET); 2129 2130 __ z_cghi(Z_R1, 256); 2131 __ z_brl(skip2last); 2132 __ z_xc(0, 255, Z_RET, 0, Z_RET); 2133 __ z_aghi(Z_RET, 256); 2134 __ z_aghi(Z_R1, -256); 2135 2136 __ z_cghi(Z_R1, 256); 2137 __ z_brl(skip2last); 2138 __ z_xc(0, 255, Z_RET, 0, Z_RET); 2139 __ z_aghi(Z_RET, 256); 2140 __ z_aghi(Z_R1, -256); 2141 2142 __ z_cghi(Z_R1, 256); 2143 __ z_brl(skip2last); 2144 __ z_xc(0, 255, Z_RET, 0, Z_RET); 2145 __ z_aghi(Z_RET, 256); 2146 __ z_aghi(Z_R1, -256); 2147 2148 __ bind(skip2last); 2149 __ z_lgr(Z_R0, Z_RET); 2150 __ z_aghik(Z_RET, Z_R1, -1); // decrement for exrl 2151 __ z_brl(skip2done); 2152 __ z_lgr(parmBlk, Z_R0); // parmBlk == Z_R1, used in eraser template 2153 __ z_exrl(Z_RET, eraser); 2154 2155 __ bind(skip2done); 2156 } 2157 #else 2158 __ z_lg(Z_SP, unextSP_offset, parmBlk); // trim stack back to unextended size 2159 #endif 2160 } 2161 2162 2163 int generate_counterMode_push_parmBlk(Register parmBlk, Register msglen, Register fCode, Register key, bool is_decipher) { 2164 int resize_len = 0; 2165 int mode = is_decipher ? VM_Version::CipherMode::decipher : VM_Version::CipherMode::cipher; 2166 Label parmBlk_128, parmBlk_192, parmBlk_256, parmBlk_set; 2167 Register keylen = fCode; // Expanded key length, as read from key array, Temp only. 2168 // use fCode as scratch; fCode receives its final value later. 2169 2170 // Read key len of expanded key (in 4-byte words). 2171 __ z_lgf(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 2172 __ z_cghi(keylen, 52); 2173 if (VM_Version::has_Crypto_AES_CTR256()) { __ z_brh(parmBlk_256); } // keyLen > 52: AES256. Assume: most frequent 2174 if (VM_Version::has_Crypto_AES_CTR128()) { __ z_brl(parmBlk_128); } // keyLen < 52: AES128. 2175 if (VM_Version::has_Crypto_AES_CTR192()) { __ z_bre(parmBlk_192); } // keyLen == 52: AES192. Assume: least frequent 2176 2177 // Safety net: requested AES_CTR function for requested keylen not available on this CPU. 2178 __ stop_static("AES key strength not supported by CPU. Use -XX:-UseAESCTRIntrinsics as remedy.", 0); 2179 2180 if (VM_Version::has_Crypto_AES_CTR128()) { 2181 __ bind(parmBlk_128); 2182 resize_len = generate_counterMode_push_Block(VM_Version::Cipher::_AES128_dataBlk, 2183 VM_Version::Cipher::_AES128_parmBlk_G, 2184 VM_Version::Cipher::_AES128 + mode, 2185 parmBlk, msglen, fCode, key); 2186 if (VM_Version::has_Crypto_AES_CTR256() || VM_Version::has_Crypto_AES_CTR192()) { 2187 __ z_bru(parmBlk_set); // Fallthru otherwise. 2188 } 2189 } 2190 2191 if (VM_Version::has_Crypto_AES_CTR192()) { 2192 __ bind(parmBlk_192); 2193 resize_len = generate_counterMode_push_Block(VM_Version::Cipher::_AES192_dataBlk, 2194 VM_Version::Cipher::_AES192_parmBlk_G, 2195 VM_Version::Cipher::_AES192 + mode, 2196 parmBlk, msglen, fCode, key); 2197 if (VM_Version::has_Crypto_AES_CTR256()) { 2198 __ z_bru(parmBlk_set); // Fallthru otherwise. 2199 } 2200 } 2201 2202 if (VM_Version::has_Crypto_AES_CTR256()) { 2203 __ bind(parmBlk_256); 2204 resize_len = generate_counterMode_push_Block(VM_Version::Cipher::_AES256_dataBlk, 2205 VM_Version::Cipher::_AES256_parmBlk_G, 2206 VM_Version::Cipher::_AES256 + mode, 2207 parmBlk, msglen, fCode, key); 2208 // Fallthru 2209 } 2210 2211 __ bind(parmBlk_set); 2212 return resize_len; 2213 } 2214 2215 2216 void generate_counterMode_pop_parmBlk(Register parmBlk, Register msglen, Label& eraser) { 2217 2218 BLOCK_COMMENT("pop parmBlk counterMode_AESCrypt {"); 2219 2220 generate_counterMode_pop_Block(parmBlk, msglen, eraser); 2221 2222 BLOCK_COMMENT("} pop parmBlk counterMode_AESCrypt"); 2223 } 2224 2225 // Implementation of counter-mode AES encrypt/decrypt function. 2226 // 2227 void generate_counterMode_AES_impl(bool is_decipher) { 2228 2229 // On entry: 2230 // if there was a previous call to update(), and this previous call did not fully use 2231 // the current encrypted counter, that counter is available at arg6_Offset(Z_SP). 2232 // The index of the first unused bayte in the encrypted counter is available at arg7_Offset(Z_SP). 2233 // The index is in the range [1..AES_ctrVal_len] ([1..16]), where index == 16 indicates a fully 2234 // used previous encrypted counter. 2235 // The unencrypted counter has already been incremented and is ready to be used for the next 2236 // data block, after the unused bytes from the previous call have been consumed. 2237 // The unencrypted counter follows the "increment-after use" principle. 2238 2239 // On exit: 2240 // The index of the first unused byte of the encrypted counter is written back to arg7_Offset(Z_SP). 2241 // A value of AES_ctrVal_len (16) indicates there is no leftover byte. 2242 // If there is at least one leftover byte (1 <= index < AES_ctrVal_len), the encrypted counter value 2243 // is written back to arg6_Offset(Z_SP). If there is no leftover, nothing is written back. 2244 // The unencrypted counter value is written back after having been incremented. 2245 2246 Register from = Z_ARG1; // byte[], source byte array (clear text) 2247 Register to = Z_ARG2; // byte[], destination byte array (ciphered) 2248 Register key = Z_ARG3; // byte[], expanded key array. 2249 Register ctr = Z_ARG4; // byte[], counter byte array. 2250 const Register msglen = Z_ARG5; // int, Total length of the msg to be encrypted. Value must be 2251 // returned in Z_RET upon completion of this stub. 2252 // This is a jint. Negative values are illegal, but technically possible. 2253 // Do not rely on high word. Contents is undefined. 2254 // encCtr = Z_ARG6 - encrypted counter (byte array), 2255 // address passed on stack at _z_abi(remaining_cargs) + 0 * WordSize 2256 // cvIndex = Z_ARG7 - # used (consumed) bytes of encrypted counter, 2257 // passed on stack at _z_abi(remaining_cargs) + 1 * WordSize 2258 // Caution:4-byte value, right-justified in 8-byte stack word 2259 2260 const Register fCode = Z_R0; // crypto function code 2261 const Register parmBlk = Z_R1; // parameter block address (points to crypto key) 2262 const Register src = Z_ARG1; // is Z_R2, forms even/odd pair with srclen 2263 const Register srclen = Z_ARG2; // Overwrites destination address. 2264 const Register dst = Z_ARG3; // Overwrites key address. 2265 const Register counter = Z_ARG5; // Overwrites msglen. Must have counter array in an even register. 2266 2267 Label srcMover, dstMover, fromMover, ctrXOR, dataEraser; // EXRL (execution) templates. 2268 Label CryptoLoop, CryptoLoop_doit, CryptoLoop_end, CryptoLoop_setupAndDoLast, CryptoLoop_ctrVal_inc; 2269 Label allDone, allDone_noInc, popAndExit, Exit; 2270 2271 int arg6_Offset = _z_abi(remaining_cargs) + 0 * HeapWordSize; 2272 int arg7_Offset = _z_abi(remaining_cargs) + 1 * HeapWordSize; // stack slot holds ptr to int value 2273 int oldSP_Offset = 0; 2274 2275 // Is there anything to do at all? Protect against negative len as well. 2276 __ z_ltr(msglen, msglen); 2277 __ z_brnh(Exit); 2278 2279 // Expand stack, load parm block address into parmBlk (== Z_R1), copy crypto key to parm block. 2280 oldSP_Offset = generate_counterMode_push_parmBlk(parmBlk, msglen, fCode, key, is_decipher); 2281 arg6_Offset += oldSP_Offset; 2282 arg7_Offset += oldSP_Offset; 2283 2284 // Check if there is a leftover, partially used encrypted counter from last invocation. 2285 // If so, use those leftover counter bytes first before starting the "normal" encryption. 2286 2287 // We do not have access to the encrypted counter value. It is generated and used only 2288 // internally within the previous kmctr instruction. But, at the end of call to this stub, 2289 // the last encrypted couner is extracted by ciphering a 0x00 byte stream. The result is 2290 // stored at the arg6 location for use with the subsequent call. 2291 // 2292 // The #used bytes of the encrypted counter (from a previous call) is provided via arg7. 2293 // It is used as index into the encrypted counter to access the first byte availabla for ciphering. 2294 // To cipher the input text, we move the number of remaining bytes in the encrypted counter from 2295 // input to output. Then we simply XOR the output bytes with the associated encrypted counter bytes. 2296 2297 Register cvIxAddr = Z_R10; // Address of index into encCtr. Preserved for use @CryptoLoop_end. 2298 __ z_lg(cvIxAddr, arg7_Offset, Z_SP); // arg7: addr of field encCTR_index. 2299 2300 { 2301 Register cvUnused = Z_R11; // # unused bytes of encrypted counter value (= 16 - cvIndex) 2302 Register encCtr = Z_R12; // encrypted counter value, points to first ununsed byte. 2303 Register cvIndex = Z_R13; // # index of first unused byte of encrypted counter value 2304 Label preLoop_end; 2305 2306 // preLoop is necessary only if there is a partially used encrypted counter (encCtr). 2307 // Partially used means cvIndex is in [1, dataBlk_len-1]. 2308 // cvIndex == 0: encCtr is set up but not used at all. Should not occur. 2309 // cvIndex == dataBlk_len: encCtr is exhausted, all bytes used. 2310 // Using unsigned compare protects against cases where (cvIndex < 0). 2311 __ z_clfhsi(0, cvIxAddr, AES_ctrVal_len); // check #used bytes in encCtr against ctr len. 2312 __ z_brnl(preLoop_end); // if encCtr is fully used, skip to normal processing. 2313 __ z_ltgf(cvIndex, 0, Z_R0, cvIxAddr); // # used bytes in encCTR. 2314 __ z_brz(preLoop_end); // if encCtr has no used bytes, skip to normal processing. 2315 2316 __ z_lg(encCtr, arg6_Offset, Z_SP); // encrypted counter from last call to update() 2317 __ z_agr(encCtr, cvIndex); // now points to first unused byte 2318 2319 __ add2reg(cvUnused, -AES_ctrVal_len, cvIndex); // calculate #unused bytes in encCtr. 2320 __ z_lcgr(cvUnused, cvUnused); // previous checks ensure cvUnused in range [1, dataBlk_len-1] 2321 2322 __ z_lgf(msglen, msglen_offset, parmBlk); // Restore msglen (jint value) 2323 __ z_cr(cvUnused, msglen); // check if msg can consume all unused encCtr bytes 2324 __ z_locr(cvUnused, msglen, Assembler::bcondHigh); // take the shorter length 2325 __ z_aghi(cvUnused, -1); // decrement # unused bytes by 1 for exrl instruction 2326 // preceding checks ensure cvUnused in range [1, dataBlk_len-1] 2327 __ z_exrl(cvUnused, fromMover); 2328 __ z_exrl(cvUnused, ctrXOR); 2329 2330 __ z_aghi(cvUnused, 1); // revert decrement from above 2331 __ z_agr(cvIndex, cvUnused); // update index into encCtr (first unused byte) 2332 __ z_st(cvIndex, 0, cvIxAddr); // write back arg7, cvIxAddr is still valid 2333 2334 // update pointers and counters to prepare for main loop 2335 __ z_agr(from, cvUnused); 2336 __ z_agr(to, cvUnused); 2337 __ z_sr(msglen, cvUnused); // #bytes not yet processed 2338 __ z_sty(msglen, msglen_red_offset, parmBlk); // save for calculations in main loop 2339 __ z_srak(Z_R0, msglen, exact_log2(AES_ctrVal_len));// # full cipher blocks that can be formed from input text. 2340 __ z_sty(Z_R0, rem_msgblk_offset, parmBlk); 2341 2342 // check remaining msglen. If zero, all msg bytes were processed in preLoop. 2343 __ z_ltr(msglen, msglen); 2344 __ z_brnh(popAndExit); 2345 2346 __ bind(preLoop_end); 2347 } 2348 2349 // Create count vector on stack to accommodate up to AES_ctrVec_len blocks. 2350 generate_counterMode_prepare_Stack(parmBlk, ctr, counter, fCode); 2351 2352 // Prepare other registers for instruction. 2353 __ lgr_if_needed(src, from); // Copy src address. Will not emit, src/from are identical. 2354 __ z_lgr(dst, to); 2355 __ z_llgc(fCode, fCode_offset, Z_R0, parmBlk); 2356 2357 __ bind(CryptoLoop); 2358 __ z_lghi(srclen, AES_ctrArea_len); // preset len (#bytes) for next iteration: max possible. 2359 __ z_asi(rem_msgblk_offset, parmBlk, -AES_ctrVec_len); // decrement #remaining blocks (16 bytes each). Range: [+127..-128] 2360 __ z_brl(CryptoLoop_setupAndDoLast); // Handling the last iteration (using less than max #blocks) out-of-line 2361 2362 __ bind(CryptoLoop_doit); 2363 __ kmctr(dst, counter, src); // Cipher the message. 2364 2365 __ z_lt(srclen, rem_msgblk_offset, Z_R0, parmBlk); // check if this was the last iteration 2366 __ z_brz(CryptoLoop_ctrVal_inc); // == 0: ctrVector fully used. Need to increment the first 2367 // vector element to encrypt remaining unprocessed bytes. 2368 // __ z_brl(CryptoLoop_end); // < 0: this was detected before and handled at CryptoLoop_setupAndDoLast 2369 // > 0: this is the fallthru case, need another iteration 2370 2371 generate_counterMode_increment_ctrVector(parmBlk, counter, srclen, false); // srclen unused here (serves as scratch) 2372 __ z_bru(CryptoLoop); 2373 2374 __ bind(CryptoLoop_end); 2375 2376 // OK, when we arrive here, we have encrypted all of the "from" byte stream 2377 // except for the last few [0..dataBlk_len) bytes. In addition, we know that 2378 // there are no more unused bytes in the previously generated encrypted counter. 2379 // The (unencrypted) counter, however, is ready to use (it was incremented before). 2380 2381 // To encrypt the few remaining bytes, we need to form an extra src and dst 2382 // data block of dataBlk_len each. This is because we can only process full 2383 // blocks but we must not read or write beyond the boundaries of the argument 2384 // arrays. Here is what we do: 2385 // - The ctrVector has at least one unused element. This is ensured by CryptoLoop code. 2386 // - The (first) unused element is pointed at by the counter register. 2387 // - The src data block is filled with the remaining "from" bytes, remainder of block undefined. 2388 // - The single src data block is encrypted into the dst data block. 2389 // - The dst data block is copied into the "to" array, but only the leftmost few bytes 2390 // (as many as were left in the source byte stream). 2391 // - The counter value to be used is pointed at by the counter register. 2392 // - Fortunately, the crypto instruction (kmctr) has updated all related addresses such that 2393 // we know where to continue with "from" and "to" and which counter value to use next. 2394 2395 Register encCtr = Z_R12; // encrypted counter value, points to stub argument. 2396 Register tmpDst = Z_R12; // addr of temp destination (for last partial block encryption) 2397 2398 __ z_lgf(srclen, msglen_red_offset, parmBlk); // plaintext/ciphertext len after potential preLoop processing. 2399 __ z_nilf(srclen, AES_ctrVal_len - 1); // those rightmost bits indicate the unprocessed #bytes 2400 __ z_stg(srclen, localSpill_offset, parmBlk); // save for later reuse 2401 __ z_mvhi(0, cvIxAddr, 16); // write back arg7 (default 16 in case of allDone). 2402 __ z_braz(allDone_noInc); // no unprocessed bytes? Then we are done. 2403 // This also means the last block of data processed was 2404 // a full-sized block (AES_ctrVal_len bytes) which results 2405 // in no leftover encrypted counter bytes. 2406 __ z_st(srclen, 0, cvIxAddr); // This will be the index of the first unused byte in the encrypted counter. 2407 __ z_stg(counter, counter_offset, parmBlk); // save counter location for easy later restore 2408 2409 // calculate address (on stack) for final dst and src blocks. 2410 __ add2reg(tmpDst, AES_dataBlk_offset, parmBlk); // tmp dst (on stack) is right before tmp src 2411 2412 // We have a residue of [1..15] unprocessed bytes, srclen holds the exact number. 2413 // Residue == 0 was checked just above, residue == AES_ctrVal_len would be another 2414 // full-sized block and would have been handled by CryptoLoop. 2415 2416 __ add2reg(srclen, -1); // decrement for exrl 2417 __ z_exrl(srclen, srcMover); // copy remaining bytes of src byte stream 2418 __ load_const_optimized(srclen, AES_ctrVal_len); // kmctr processes only complete blocks 2419 __ add2reg(src, AES_ctrVal_len, tmpDst); // tmp dst is right before tmp src 2420 2421 __ kmctr(tmpDst, counter, src); // Cipher the remaining bytes. 2422 2423 __ add2reg(tmpDst, -AES_ctrVal_len, tmpDst); // restore tmp dst address 2424 __ z_lg(srclen, localSpill_offset, parmBlk); // residual len, saved above 2425 __ add2reg(srclen, -1); // decrement for exrl 2426 __ z_exrl(srclen, dstMover); 2427 2428 // Write back new encrypted counter 2429 __ add2reg(src, AES_dataBlk_offset, parmBlk); 2430 __ clear_mem(Address(src, RegisterOrConstant((intptr_t)0)), AES_ctrVal_len); 2431 __ load_const_optimized(srclen, AES_ctrVal_len); // kmctr processes only complete blocks 2432 __ z_lg(encCtr, arg6_Offset, Z_SP); // write encrypted counter to arg6 2433 __ z_lg(counter, counter_offset, parmBlk); // restore counter 2434 __ kmctr(encCtr, counter, src); 2435 2436 // The last used element of the counter vector contains the latest counter value that was used. 2437 // As described above, the counter value on exit must be the one to be used next. 2438 __ bind(allDone); 2439 __ z_lg(counter, counter_offset, parmBlk); // restore counter 2440 generate_increment128(counter, 0, 1, Z_R0); 2441 2442 __ bind(allDone_noInc); 2443 __ z_mvc(0, AES_ctrVal_len, ctr, 0, counter); 2444 2445 __ bind(popAndExit); 2446 generate_counterMode_pop_parmBlk(parmBlk, msglen, dataEraser); 2447 2448 __ bind(Exit); 2449 __ z_lgfr(Z_RET, msglen); 2450 2451 __ z_br(Z_R14); 2452 2453 //---------------------------- 2454 //---< out-of-line code >--- 2455 //---------------------------- 2456 __ bind(CryptoLoop_setupAndDoLast); 2457 __ z_lgf(srclen, rem_msgblk_offset, parmBlk); // remaining #blocks in memory is < 0 2458 __ z_aghi(srclen, AES_ctrVec_len); // recalculate the actually remaining #blocks 2459 __ z_sllg(srclen, srclen, exact_log2(AES_ctrVal_len)); // convert to #bytes. Counter value is same length as data block 2460 __ kmctr(dst, counter, src); // Cipher the last integral blocks of the message. 2461 __ z_bru(CryptoLoop_end); // There is at least one unused counter vector element. 2462 // no need to increment. 2463 2464 __ bind(CryptoLoop_ctrVal_inc); 2465 generate_counterMode_increment_ctrVector(parmBlk, counter, srclen, true); // srclen unused here (serves as scratch) 2466 __ z_bru(CryptoLoop_end); 2467 2468 //------------------------------------------- 2469 //---< execution templates for preLoop >--- 2470 //------------------------------------------- 2471 __ bind(fromMover); 2472 __ z_mvc(0, 0, to, 0, from); // Template instruction to move input data to dst. 2473 __ bind(ctrXOR); 2474 __ z_xc(0, 0, to, 0, encCtr); // Template instruction to XOR input data (now in to) with encrypted counter. 2475 2476 //------------------------------- 2477 //---< execution templates >--- 2478 //------------------------------- 2479 __ bind(dataEraser); 2480 __ z_xc(0, 0, parmBlk, 0, parmBlk); // Template instruction to erase crypto key on stack. 2481 __ bind(dstMover); 2482 __ z_mvc(0, 0, dst, 0, tmpDst); // Template instruction to move encrypted reminder from stack to dst. 2483 __ bind(srcMover); 2484 __ z_mvc(AES_ctrVal_len, 0, tmpDst, 0, src); // Template instruction to move reminder of source byte stream to stack. 2485 } 2486 2487 2488 // Create two intrinsic variants, optimized for short and long plaintexts. 2489 void generate_counterMode_AES(bool is_decipher) { 2490 2491 const Register msglen = Z_ARG5; // int, Total length of the msg to be encrypted. Value must be 2492 // returned in Z_RET upon completion of this stub. 2493 const int threshold = 256; // above this length (in bytes), text is considered long. 2494 const int vec_short = threshold>>6; // that many blocks (16 bytes each) per iteration, max 4 loop iterations 2495 const int vec_long = threshold>>2; // that many blocks (16 bytes each) per iteration. 2496 2497 Label AESCTR_short, AESCTR_long; 2498 2499 __ z_chi(msglen, threshold); 2500 __ z_brh(AESCTR_long); 2501 2502 __ bind(AESCTR_short); 2503 2504 BLOCK_COMMENT(err_msg("counterMode_AESCrypt (text len <= %d, block size = %d) {", threshold, vec_short*16)); 2505 2506 AES_ctrVec_len = vec_short; 2507 generate_counterMode_AES_impl(false); // control of generated code will not return 2508 2509 BLOCK_COMMENT(err_msg("} counterMode_AESCrypt (text len <= %d, block size = %d)", threshold, vec_short*16)); 2510 2511 __ align(32); // Octoword alignment benefits branch targets. 2512 2513 BLOCK_COMMENT(err_msg("counterMode_AESCrypt (text len > %d, block size = %d) {", threshold, vec_long*16)); 2514 2515 __ bind(AESCTR_long); 2516 AES_ctrVec_len = vec_long; 2517 generate_counterMode_AES_impl(false); // control of generated code will not return 2518 2519 BLOCK_COMMENT(err_msg("} counterMode_AESCrypt (text len > %d, block size = %d)", threshold, vec_long*16)); 2520 } 2521 2522 2523 // Compute AES-CTR crypto function. 2524 // Encrypt or decrypt is selected via parameters. Only one stub is necessary. 2525 address generate_counterMode_AESCrypt(const char* name) { 2526 __ align(CodeEntryAlignment); 2527 StubCodeMark mark(this, "StubRoutines", name); 2528 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 2529 2530 generate_counterMode_AES(false); 2531 2532 return __ addr_at(start_off); 2533 } 2534 2535 // ***************************************************************************** 2536 2537 // Compute GHASH function. 2538 address generate_ghash_processBlocks() { 2539 __ align(CodeEntryAlignment); 2540 StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); 2541 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 2542 2543 const Register state = Z_ARG1; 2544 const Register subkeyH = Z_ARG2; 2545 const Register data = Z_ARG3; // 1st of even-odd register pair. 2546 const Register blocks = Z_ARG4; 2547 const Register len = blocks; // 2nd of even-odd register pair. 2548 2549 const int param_block_size = 4 * 8; 2550 const int frame_resize = param_block_size + 8; // Extra space for copy of fp. 2551 2552 // Reserve stack space for parameter block (R1). 2553 __ z_lgr(Z_R1, Z_SP); 2554 __ resize_frame(-frame_resize, Z_R0, true); 2555 __ z_aghi(Z_R1, -param_block_size); 2556 2557 // Fill parameter block. 2558 __ z_mvc(Address(Z_R1) , Address(state) , 16); 2559 __ z_mvc(Address(Z_R1, 16), Address(subkeyH), 16); 2560 2561 // R4+5: data pointer + length 2562 __ z_llgfr(len, blocks); // Cast to 64-bit. 2563 2564 // R0: function code 2565 __ load_const_optimized(Z_R0, (int)VM_Version::MsgDigest::_GHASH); 2566 2567 // Compute. 2568 __ z_sllg(len, len, 4); // In bytes. 2569 __ kimd(data); 2570 2571 // Copy back result and free parameter block. 2572 __ z_mvc(Address(state), Address(Z_R1), 16); 2573 __ z_xc(Address(Z_R1), param_block_size, Address(Z_R1)); 2574 __ z_aghi(Z_SP, frame_resize); 2575 2576 __ z_br(Z_R14); 2577 2578 return __ addr_at(start_off); 2579 } 2580 2581 2582 // Call interface for all SHA* stubs. 2583 // 2584 // Z_ARG1 - source data block. Ptr to leftmost byte to be processed. 2585 // Z_ARG2 - current SHA state. Ptr to state area. This area serves as 2586 // parameter block as required by the crypto instruction. 2587 // Z_ARG3 - current byte offset in source data block. 2588 // Z_ARG4 - last byte offset in source data block. 2589 // (Z_ARG4 - Z_ARG3) gives the #bytes remaining to be processed. 2590 // 2591 // Z_RET - return value. First unprocessed byte offset in src buffer. 2592 // 2593 // A few notes on the call interface: 2594 // - All stubs, whether they are single-block or multi-block, are assumed to 2595 // digest an integer multiple of the data block length of data. All data 2596 // blocks are digested using the intermediate message digest (KIMD) instruction. 2597 // Special end processing, as done by the KLMD instruction, seems to be 2598 // emulated by the calling code. 2599 // 2600 // - Z_ARG1 addresses the first byte of source data. The offset (Z_ARG3) is 2601 // already accounted for. 2602 // 2603 // - The current SHA state (the intermediate message digest value) is contained 2604 // in an area addressed by Z_ARG2. The area size depends on the SHA variant 2605 // and is accessible via the enum VM_Version::MsgDigest::_SHA<n>_parmBlk_I 2606 // 2607 // - The single-block stub is expected to digest exactly one data block, starting 2608 // at the address passed in Z_ARG1. 2609 // 2610 // - The multi-block stub is expected to digest all data blocks which start in 2611 // the offset interval [srcOff(Z_ARG3), srcLimit(Z_ARG4)). The exact difference 2612 // (srcLimit-srcOff), rounded up to the next multiple of the data block length, 2613 // gives the number of blocks to digest. It must be assumed that the calling code 2614 // provides for a large enough source data buffer. 2615 // 2616 // Compute SHA-1 function. 2617 address generate_SHA1_stub(bool multiBlock, const char* name) { 2618 __ align(CodeEntryAlignment); 2619 StubCodeMark mark(this, "StubRoutines", name); 2620 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 2621 2622 const Register srcBuff = Z_ARG1; // Points to first block to process (offset already added). 2623 const Register SHAState = Z_ARG2; // Only on entry. Reused soon thereafter for kimd register pairs. 2624 const Register srcOff = Z_ARG3; // int 2625 const Register srcLimit = Z_ARG4; // Only passed in multiBlock case. int 2626 2627 const Register SHAState_local = Z_R1; 2628 const Register SHAState_save = Z_ARG3; 2629 const Register srcBufLen = Z_ARG2; // Destroys state address, must be copied before. 2630 Label useKLMD, rtn; 2631 2632 __ load_const_optimized(Z_R0, (int)VM_Version::MsgDigest::_SHA1); // function code 2633 __ z_lgr(SHAState_local, SHAState); // SHAState == parameter block 2634 2635 if (multiBlock) { // Process everything from offset to limit. 2636 2637 // The following description is valid if we get a raw (unpimped) source data buffer, 2638 // spanning the range between [srcOff(Z_ARG3), srcLimit(Z_ARG4)). As detailed above, 2639 // the calling convention for these stubs is different. We leave the description in 2640 // to inform the reader what must be happening hidden in the calling code. 2641 // 2642 // The data block to be processed can have arbitrary length, i.e. its length does not 2643 // need to be an integer multiple of SHA<n>_datablk. Therefore, we need to implement 2644 // two different paths. If the length is an integer multiple, we use KIMD, saving us 2645 // to copy the SHA state back and forth. If the length is odd, we copy the SHA state 2646 // to the stack, execute a KLMD instruction on it and copy the result back to the 2647 // caller's SHA state location. 2648 2649 // Total #srcBuff blocks to process. 2650 if (VM_Version::has_DistinctOpnds()) { 2651 __ z_srk(srcBufLen, srcLimit, srcOff); // exact difference 2652 __ z_ahi(srcBufLen, VM_Version::MsgDigest::_SHA1_dataBlk-1); // round up 2653 __ z_nill(srcBufLen, (~(VM_Version::MsgDigest::_SHA1_dataBlk-1)) & 0xffff); 2654 __ z_ark(srcLimit, srcOff, srcBufLen); // Srclimit temporarily holds return value. 2655 __ z_llgfr(srcBufLen, srcBufLen); // Cast to 64-bit. 2656 } else { 2657 __ z_lgfr(srcBufLen, srcLimit); // Exact difference. srcLimit passed as int. 2658 __ z_sgfr(srcBufLen, srcOff); // SrcOff passed as int, now properly casted to long. 2659 __ z_aghi(srcBufLen, VM_Version::MsgDigest::_SHA1_dataBlk-1); // round up 2660 __ z_nill(srcBufLen, (~(VM_Version::MsgDigest::_SHA1_dataBlk-1)) & 0xffff); 2661 __ z_lgr(srcLimit, srcOff); // SrcLimit temporarily holds return value. 2662 __ z_agr(srcLimit, srcBufLen); 2663 } 2664 2665 // Integral #blocks to digest? 2666 // As a result of the calculations above, srcBufLen MUST be an integer 2667 // multiple of _SHA1_dataBlk, or else we are in big trouble. 2668 // We insert an asm_assert into the KLMD case to guard against that. 2669 __ z_tmll(srcBufLen, VM_Version::MsgDigest::_SHA1_dataBlk-1); 2670 __ z_brc(Assembler::bcondNotAllZero, useKLMD); 2671 2672 // Process all full blocks. 2673 __ kimd(srcBuff); 2674 2675 __ z_lgr(Z_RET, srcLimit); // Offset of first unprocessed byte in buffer. 2676 } else { // Process one data block only. 2677 __ load_const_optimized(srcBufLen, (int)VM_Version::MsgDigest::_SHA1_dataBlk); // #srcBuff bytes to process 2678 __ kimd(srcBuff); 2679 __ add2reg(Z_RET, (int)VM_Version::MsgDigest::_SHA1_dataBlk, srcOff); // Offset of first unprocessed byte in buffer. No 32 to 64 bit extension needed. 2680 } 2681 2682 __ bind(rtn); 2683 __ z_br(Z_R14); 2684 2685 if (multiBlock) { 2686 __ bind(useKLMD); 2687 2688 #if 1 2689 // Security net: this stub is believed to be called for full-sized data blocks only 2690 // NOTE: The following code is believed to be correct, but is is not tested. 2691 __ stop_static("SHA128 stub can digest full data blocks only. Use -XX:-UseSHA as remedy.", 0); 2692 #endif 2693 } 2694 2695 return __ addr_at(start_off); 2696 } 2697 2698 // Compute SHA-256 function. 2699 address generate_SHA256_stub(bool multiBlock, const char* name) { 2700 __ align(CodeEntryAlignment); 2701 StubCodeMark mark(this, "StubRoutines", name); 2702 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 2703 2704 const Register srcBuff = Z_ARG1; 2705 const Register SHAState = Z_ARG2; // Only on entry. Reused soon thereafter. 2706 const Register SHAState_local = Z_R1; 2707 const Register SHAState_save = Z_ARG3; 2708 const Register srcOff = Z_ARG3; 2709 const Register srcLimit = Z_ARG4; 2710 const Register srcBufLen = Z_ARG2; // Destroys state address, must be copied before. 2711 Label useKLMD, rtn; 2712 2713 __ load_const_optimized(Z_R0, (int)VM_Version::MsgDigest::_SHA256); // function code 2714 __ z_lgr(SHAState_local, SHAState); // SHAState == parameter block 2715 2716 if (multiBlock) { // Process everything from offset to limit. 2717 // The following description is valid if we get a raw (unpimped) source data buffer, 2718 // spanning the range between [srcOff(Z_ARG3), srcLimit(Z_ARG4)). As detailed above, 2719 // the calling convention for these stubs is different. We leave the description in 2720 // to inform the reader what must be happening hidden in the calling code. 2721 // 2722 // The data block to be processed can have arbitrary length, i.e. its length does not 2723 // need to be an integer multiple of SHA<n>_datablk. Therefore, we need to implement 2724 // two different paths. If the length is an integer multiple, we use KIMD, saving us 2725 // to copy the SHA state back and forth. If the length is odd, we copy the SHA state 2726 // to the stack, execute a KLMD instruction on it and copy the result back to the 2727 // caller's SHA state location. 2728 2729 // total #srcBuff blocks to process 2730 if (VM_Version::has_DistinctOpnds()) { 2731 __ z_srk(srcBufLen, srcLimit, srcOff); // exact difference 2732 __ z_ahi(srcBufLen, VM_Version::MsgDigest::_SHA256_dataBlk-1); // round up 2733 __ z_nill(srcBufLen, (~(VM_Version::MsgDigest::_SHA256_dataBlk-1)) & 0xffff); 2734 __ z_ark(srcLimit, srcOff, srcBufLen); // Srclimit temporarily holds return value. 2735 __ z_llgfr(srcBufLen, srcBufLen); // Cast to 64-bit. 2736 } else { 2737 __ z_lgfr(srcBufLen, srcLimit); // exact difference 2738 __ z_sgfr(srcBufLen, srcOff); 2739 __ z_aghi(srcBufLen, VM_Version::MsgDigest::_SHA256_dataBlk-1); // round up 2740 __ z_nill(srcBufLen, (~(VM_Version::MsgDigest::_SHA256_dataBlk-1)) & 0xffff); 2741 __ z_lgr(srcLimit, srcOff); // Srclimit temporarily holds return value. 2742 __ z_agr(srcLimit, srcBufLen); 2743 } 2744 2745 // Integral #blocks to digest? 2746 // As a result of the calculations above, srcBufLen MUST be an integer 2747 // multiple of _SHA1_dataBlk, or else we are in big trouble. 2748 // We insert an asm_assert into the KLMD case to guard against that. 2749 __ z_tmll(srcBufLen, VM_Version::MsgDigest::_SHA256_dataBlk-1); 2750 __ z_brc(Assembler::bcondNotAllZero, useKLMD); 2751 2752 // Process all full blocks. 2753 __ kimd(srcBuff); 2754 2755 __ z_lgr(Z_RET, srcLimit); // Offset of first unprocessed byte in buffer. 2756 } else { // Process one data block only. 2757 __ load_const_optimized(srcBufLen, (int)VM_Version::MsgDigest::_SHA256_dataBlk); // #srcBuff bytes to process 2758 __ kimd(srcBuff); 2759 __ add2reg(Z_RET, (int)VM_Version::MsgDigest::_SHA256_dataBlk, srcOff); // Offset of first unprocessed byte in buffer. 2760 } 2761 2762 __ bind(rtn); 2763 __ z_br(Z_R14); 2764 2765 if (multiBlock) { 2766 __ bind(useKLMD); 2767 #if 1 2768 // Security net: this stub is believed to be called for full-sized data blocks only. 2769 // NOTE: 2770 // The following code is believed to be correct, but is is not tested. 2771 __ stop_static("SHA256 stub can digest full data blocks only. Use -XX:-UseSHA as remedy.", 0); 2772 #endif 2773 } 2774 2775 return __ addr_at(start_off); 2776 } 2777 2778 // Compute SHA-512 function. 2779 address generate_SHA512_stub(bool multiBlock, const char* name) { 2780 __ align(CodeEntryAlignment); 2781 StubCodeMark mark(this, "StubRoutines", name); 2782 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 2783 2784 const Register srcBuff = Z_ARG1; 2785 const Register SHAState = Z_ARG2; // Only on entry. Reused soon thereafter. 2786 const Register SHAState_local = Z_R1; 2787 const Register SHAState_save = Z_ARG3; 2788 const Register srcOff = Z_ARG3; 2789 const Register srcLimit = Z_ARG4; 2790 const Register srcBufLen = Z_ARG2; // Destroys state address, must be copied before. 2791 Label useKLMD, rtn; 2792 2793 __ load_const_optimized(Z_R0, (int)VM_Version::MsgDigest::_SHA512); // function code 2794 __ z_lgr(SHAState_local, SHAState); // SHAState == parameter block 2795 2796 if (multiBlock) { // Process everything from offset to limit. 2797 // The following description is valid if we get a raw (unpimped) source data buffer, 2798 // spanning the range between [srcOff(Z_ARG3), srcLimit(Z_ARG4)). As detailed above, 2799 // the calling convention for these stubs is different. We leave the description in 2800 // to inform the reader what must be happening hidden in the calling code. 2801 // 2802 // The data block to be processed can have arbitrary length, i.e. its length does not 2803 // need to be an integer multiple of SHA<n>_datablk. Therefore, we need to implement 2804 // two different paths. If the length is an integer multiple, we use KIMD, saving us 2805 // to copy the SHA state back and forth. If the length is odd, we copy the SHA state 2806 // to the stack, execute a KLMD instruction on it and copy the result back to the 2807 // caller's SHA state location. 2808 2809 // total #srcBuff blocks to process 2810 if (VM_Version::has_DistinctOpnds()) { 2811 __ z_srk(srcBufLen, srcLimit, srcOff); // exact difference 2812 __ z_ahi(srcBufLen, VM_Version::MsgDigest::_SHA512_dataBlk-1); // round up 2813 __ z_nill(srcBufLen, (~(VM_Version::MsgDigest::_SHA512_dataBlk-1)) & 0xffff); 2814 __ z_ark(srcLimit, srcOff, srcBufLen); // Srclimit temporarily holds return value. 2815 __ z_llgfr(srcBufLen, srcBufLen); // Cast to 64-bit. 2816 } else { 2817 __ z_lgfr(srcBufLen, srcLimit); // exact difference 2818 __ z_sgfr(srcBufLen, srcOff); 2819 __ z_aghi(srcBufLen, VM_Version::MsgDigest::_SHA512_dataBlk-1); // round up 2820 __ z_nill(srcBufLen, (~(VM_Version::MsgDigest::_SHA512_dataBlk-1)) & 0xffff); 2821 __ z_lgr(srcLimit, srcOff); // Srclimit temporarily holds return value. 2822 __ z_agr(srcLimit, srcBufLen); 2823 } 2824 2825 // integral #blocks to digest? 2826 // As a result of the calculations above, srcBufLen MUST be an integer 2827 // multiple of _SHA1_dataBlk, or else we are in big trouble. 2828 // We insert an asm_assert into the KLMD case to guard against that. 2829 __ z_tmll(srcBufLen, VM_Version::MsgDigest::_SHA512_dataBlk-1); 2830 __ z_brc(Assembler::bcondNotAllZero, useKLMD); 2831 2832 // Process all full blocks. 2833 __ kimd(srcBuff); 2834 2835 __ z_lgr(Z_RET, srcLimit); // Offset of first unprocessed byte in buffer. 2836 } else { // Process one data block only. 2837 __ load_const_optimized(srcBufLen, (int)VM_Version::MsgDigest::_SHA512_dataBlk); // #srcBuff bytes to process 2838 __ kimd(srcBuff); 2839 __ add2reg(Z_RET, (int)VM_Version::MsgDigest::_SHA512_dataBlk, srcOff); // Offset of first unprocessed byte in buffer. 2840 } 2841 2842 __ bind(rtn); 2843 __ z_br(Z_R14); 2844 2845 if (multiBlock) { 2846 __ bind(useKLMD); 2847 #if 1 2848 // Security net: this stub is believed to be called for full-sized data blocks only 2849 // NOTE: 2850 // The following code is believed to be correct, but is is not tested. 2851 __ stop_static("SHA512 stub can digest full data blocks only. Use -XX:-UseSHA as remedy.", 0); 2852 #endif 2853 } 2854 2855 return __ addr_at(start_off); 2856 } 2857 2858 2859 /** 2860 * Arguments: 2861 * 2862 * Inputs: 2863 * Z_ARG1 - int crc 2864 * Z_ARG2 - byte* buf 2865 * Z_ARG3 - int length (of buffer) 2866 * 2867 * Result: 2868 * Z_RET - int crc result 2869 **/ 2870 // Compute CRC function (generic, for all polynomials). 2871 void generate_CRC_updateBytes(const char* name, Register table, bool invertCRC) { 2872 2873 // arguments to kernel_crc32: 2874 Register crc = Z_ARG1; // Current checksum, preset by caller or result from previous call, int. 2875 Register data = Z_ARG2; // source byte array 2876 Register dataLen = Z_ARG3; // #bytes to process, int 2877 // Register table = Z_ARG4; // crc table address. Preloaded and passed in by caller. 2878 const Register t0 = Z_R10; // work reg for kernel* emitters 2879 const Register t1 = Z_R11; // work reg for kernel* emitters 2880 const Register t2 = Z_R12; // work reg for kernel* emitters 2881 const Register t3 = Z_R13; // work reg for kernel* emitters 2882 2883 2884 assert_different_registers(crc, data, dataLen, table); 2885 2886 // We pass these values as ints, not as longs as required by C calling convention. 2887 // Crc used as int. 2888 __ z_llgfr(dataLen, dataLen); 2889 2890 __ resize_frame(-(6*8), Z_R0, true); // Resize frame to provide add'l space to spill 5 registers. 2891 __ z_stmg(Z_R10, Z_R13, 1*8, Z_SP); // Spill regs 10..11 to make them available as work registers. 2892 __ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, invertCRC); 2893 __ z_lmg(Z_R10, Z_R13, 1*8, Z_SP); // Spill regs 10..11 back from stack. 2894 __ resize_frame(+(6*8), Z_R0, true); // Resize frame to provide add'l space to spill 5 registers. 2895 2896 __ z_llgfr(Z_RET, crc); // Updated crc is function result. No copying required, just zero upper 32 bits. 2897 __ z_br(Z_R14); // Result already in Z_RET == Z_ARG1. 2898 } 2899 2900 2901 // Compute CRC32 function. 2902 address generate_CRC32_updateBytes(const char* name) { 2903 __ align(CodeEntryAlignment); 2904 StubCodeMark mark(this, "StubRoutines", name); 2905 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 2906 2907 assert(UseCRC32Intrinsics, "should not generate this stub (%s) with CRC32 intrinsics disabled", name); 2908 2909 BLOCK_COMMENT("CRC32_updateBytes {"); 2910 Register table = Z_ARG4; // crc32 table address. 2911 StubRoutines::zarch::generate_load_crc_table_addr(_masm, table); 2912 2913 generate_CRC_updateBytes(name, table, true); 2914 BLOCK_COMMENT("} CRC32_updateBytes"); 2915 2916 return __ addr_at(start_off); 2917 } 2918 2919 2920 // Compute CRC32C function. 2921 address generate_CRC32C_updateBytes(const char* name) { 2922 __ align(CodeEntryAlignment); 2923 StubCodeMark mark(this, "StubRoutines", name); 2924 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 2925 2926 assert(UseCRC32CIntrinsics, "should not generate this stub (%s) with CRC32C intrinsics disabled", name); 2927 2928 BLOCK_COMMENT("CRC32C_updateBytes {"); 2929 Register table = Z_ARG4; // crc32c table address. 2930 StubRoutines::zarch::generate_load_crc32c_table_addr(_masm, table); 2931 2932 generate_CRC_updateBytes(name, table, false); 2933 BLOCK_COMMENT("} CRC32C_updateBytes"); 2934 2935 return __ addr_at(start_off); 2936 } 2937 2938 2939 // Arguments: 2940 // Z_ARG1 - x address 2941 // Z_ARG2 - x length 2942 // Z_ARG3 - y address 2943 // Z_ARG4 - y length 2944 // Z_ARG5 - z address 2945 address generate_multiplyToLen() { 2946 __ align(CodeEntryAlignment); 2947 StubCodeMark mark(this, "StubRoutines", "multiplyToLen"); 2948 2949 address start = __ pc(); 2950 2951 const Register x = Z_ARG1; 2952 const Register xlen = Z_ARG2; 2953 const Register y = Z_ARG3; 2954 const Register ylen = Z_ARG4; 2955 const Register z = Z_ARG5; 2956 2957 // Next registers will be saved on stack in multiply_to_len(). 2958 const Register tmp1 = Z_tmp_1; 2959 const Register tmp2 = Z_tmp_2; 2960 const Register tmp3 = Z_tmp_3; 2961 const Register tmp4 = Z_tmp_4; 2962 const Register tmp5 = Z_R9; 2963 2964 BLOCK_COMMENT("Entry:"); 2965 2966 __ z_llgfr(xlen, xlen); 2967 __ z_llgfr(ylen, ylen); 2968 2969 __ multiply_to_len(x, xlen, y, ylen, z, tmp1, tmp2, tmp3, tmp4, tmp5); 2970 2971 __ z_br(Z_R14); // Return to caller. 2972 2973 return start; 2974 } 2975 2976 address generate_method_entry_barrier() { 2977 __ align(CodeEntryAlignment); 2978 StubCodeMark mark(this, "StubRoutines", "nmethod_entry_barrier"); 2979 2980 address start = __ pc(); 2981 2982 int nbytes_volatile = (8 + 5) * BytesPerWord; 2983 2984 // VM-Call Prologue 2985 __ save_return_pc(); 2986 __ push_frame_abi160(nbytes_volatile); 2987 __ save_volatile_regs(Z_SP, frame::z_abi_160_size, true, false); 2988 2989 // Prep arg for VM call 2990 // Create ptr to stored return_pc in caller frame. 2991 __ z_la(Z_ARG1, _z_abi(return_pc) + frame::z_abi_160_size + nbytes_volatile, Z_R0, Z_SP); 2992 2993 // VM-Call: BarrierSetNMethod::nmethod_stub_entry_barrier(address* return_address_ptr) 2994 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSetNMethod::nmethod_stub_entry_barrier)); 2995 __ z_ltr(Z_R0_scratch, Z_RET); 2996 2997 // VM-Call Epilogue 2998 __ restore_volatile_regs(Z_SP, frame::z_abi_160_size, true, false); 2999 __ pop_frame(); 3000 __ restore_return_pc(); 3001 3002 // Check return val of VM-Call 3003 __ z_bcr(Assembler::bcondZero, Z_R14); 3004 3005 // Pop frame built in prologue. 3006 // Required so wrong_method_stub can deduce caller. 3007 __ pop_frame(); 3008 __ restore_return_pc(); 3009 3010 // VM-Call indicates deoptimization required 3011 __ load_const_optimized(Z_R1_scratch, SharedRuntime::get_handle_wrong_method_stub()); 3012 __ z_br(Z_R1_scratch); 3013 3014 return start; 3015 } 3016 3017 address generate_cont_thaw(bool return_barrier, bool exception) { 3018 if (!Continuations::enabled()) return nullptr; 3019 Unimplemented(); 3020 return nullptr; 3021 } 3022 3023 address generate_cont_thaw() { 3024 if (!Continuations::enabled()) return nullptr; 3025 Unimplemented(); 3026 return nullptr; 3027 } 3028 3029 address generate_cont_returnBarrier() { 3030 if (!Continuations::enabled()) return nullptr; 3031 Unimplemented(); 3032 return nullptr; 3033 } 3034 3035 address generate_cont_returnBarrier_exception() { 3036 if (!Continuations::enabled()) return nullptr; 3037 Unimplemented(); 3038 return nullptr; 3039 } 3040 3041 // exception handler for upcall stubs 3042 address generate_upcall_stub_exception_handler() { 3043 StubCodeMark mark(this, "StubRoutines", "upcall stub exception handler"); 3044 address start = __ pc(); 3045 3046 // Native caller has no idea how to handle exceptions, 3047 // so we just crash here. Up to callee to catch exceptions. 3048 __ verify_oop(Z_ARG1); 3049 __ load_const_optimized(Z_R1_scratch, CAST_FROM_FN_PTR(uint64_t, UpcallLinker::handle_uncaught_exception)); 3050 __ call_c(Z_R1_scratch); 3051 __ should_not_reach_here(); 3052 3053 return start; 3054 } 3055 3056 // load Method* target of MethodHandle 3057 // Z_ARG1 = jobject receiver 3058 // Z_method = Method* result 3059 address generate_upcall_stub_load_target() { 3060 StubCodeMark mark(this, "StubRoutines", "upcall_stub_load_target"); 3061 address start = __ pc(); 3062 3063 __ resolve_global_jobject(Z_ARG1, Z_tmp_1, Z_tmp_2); 3064 // Load target method from receiver 3065 __ load_heap_oop(Z_method, Address(Z_ARG1, java_lang_invoke_MethodHandle::form_offset()), 3066 noreg, noreg, IS_NOT_NULL); 3067 __ load_heap_oop(Z_method, Address(Z_method, java_lang_invoke_LambdaForm::vmentry_offset()), 3068 noreg, noreg, IS_NOT_NULL); 3069 __ load_heap_oop(Z_method, Address(Z_method, java_lang_invoke_MemberName::method_offset()), 3070 noreg, noreg, IS_NOT_NULL); 3071 __ z_lg(Z_method, Address(Z_method, java_lang_invoke_ResolvedMethodName::vmtarget_offset())); 3072 __ z_stg(Z_method, Address(Z_thread, JavaThread::callee_target_offset())); // just in case callee is deoptimized 3073 3074 __ z_br(Z_R14); 3075 3076 return start; 3077 } 3078 3079 void generate_initial_stubs() { 3080 // Generates all stubs and initializes the entry points. 3081 3082 // Entry points that exist in all platforms. 3083 // Note: This is code that could be shared among different 3084 // platforms - however the benefit seems to be smaller than the 3085 // disadvantage of having a much more complicated generator 3086 // structure. See also comment in stubRoutines.hpp. 3087 StubRoutines::_forward_exception_entry = generate_forward_exception(); 3088 3089 StubRoutines::_call_stub_entry = generate_call_stub(StubRoutines::_call_stub_return_address); 3090 StubRoutines::_catch_exception_entry = generate_catch_exception(); 3091 3092 //---------------------------------------------------------------------- 3093 // Entry points that are platform specific. 3094 3095 if (UseCRC32Intrinsics) { 3096 StubRoutines::_crc_table_adr = (address)StubRoutines::zarch::_crc_table; 3097 StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes("CRC32_updateBytes"); 3098 } 3099 3100 if (UseCRC32CIntrinsics) { 3101 StubRoutines::_crc32c_table_addr = (address)StubRoutines::zarch::_crc32c_table; 3102 StubRoutines::_updateBytesCRC32C = generate_CRC32C_updateBytes("CRC32C_updateBytes"); 3103 } 3104 3105 // Comapct string intrinsics: Translate table for string inflate intrinsic. Used by trot instruction. 3106 StubRoutines::zarch::_trot_table_addr = (address)StubRoutines::zarch::_trot_table; 3107 } 3108 3109 void generate_continuation_stubs() { 3110 if (!Continuations::enabled()) return; 3111 3112 // Continuation stubs: 3113 StubRoutines::_cont_thaw = generate_cont_thaw(); 3114 StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier(); 3115 StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception(); 3116 } 3117 3118 void generate_final_stubs() { 3119 // Generates all stubs and initializes the entry points. 3120 3121 StubRoutines::zarch::_partial_subtype_check = generate_partial_subtype_check(); 3122 3123 // Support for verify_oop (must happen after universe_init). 3124 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop_subroutine(); 3125 3126 // Arraycopy stubs used by compilers. 3127 generate_arraycopy_stubs(); 3128 3129 // nmethod entry barriers for concurrent class unloading 3130 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); 3131 if (bs_nm != nullptr) { 3132 StubRoutines::_method_entry_barrier = generate_method_entry_barrier(); 3133 } 3134 3135 StubRoutines::_upcall_stub_exception_handler = generate_upcall_stub_exception_handler(); 3136 StubRoutines::_upcall_stub_load_target = generate_upcall_stub_load_target(); 3137 } 3138 3139 void generate_compiler_stubs() { 3140 #if COMPILER2_OR_JVMCI 3141 // Generate AES intrinsics code. 3142 if (UseAESIntrinsics) { 3143 if (VM_Version::has_Crypto_AES()) { 3144 StubRoutines::_aescrypt_encryptBlock = generate_AES_encryptBlock("AES_encryptBlock"); 3145 StubRoutines::_aescrypt_decryptBlock = generate_AES_decryptBlock("AES_decryptBlock"); 3146 StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_AES_encrypt("AES_encryptBlock_chaining"); 3147 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_AES_decrypt("AES_decryptBlock_chaining"); 3148 } else { 3149 // In PRODUCT builds, the function pointers will keep their initial (null) value. 3150 // LibraryCallKit::try_to_inline() will return false then, preventing the intrinsic to be called. 3151 assert(VM_Version::has_Crypto_AES(), "Inconsistent settings. Check vm_version_s390.cpp"); 3152 } 3153 } 3154 3155 if (UseAESCTRIntrinsics) { 3156 if (VM_Version::has_Crypto_AES_CTR()) { 3157 StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt("counterMode_AESCrypt"); 3158 } else { 3159 // In PRODUCT builds, the function pointers will keep their initial (null) value. 3160 // LibraryCallKit::try_to_inline() will return false then, preventing the intrinsic to be called. 3161 assert(VM_Version::has_Crypto_AES_CTR(), "Inconsistent settings. Check vm_version_s390.cpp"); 3162 } 3163 } 3164 3165 // Generate GHASH intrinsics code 3166 if (UseGHASHIntrinsics) { 3167 StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks(); 3168 } 3169 3170 // Generate SHA1/SHA256/SHA512 intrinsics code. 3171 if (UseSHA1Intrinsics) { 3172 StubRoutines::_sha1_implCompress = generate_SHA1_stub(false, "SHA1_singleBlock"); 3173 StubRoutines::_sha1_implCompressMB = generate_SHA1_stub(true, "SHA1_multiBlock"); 3174 } 3175 if (UseSHA256Intrinsics) { 3176 StubRoutines::_sha256_implCompress = generate_SHA256_stub(false, "SHA256_singleBlock"); 3177 StubRoutines::_sha256_implCompressMB = generate_SHA256_stub(true, "SHA256_multiBlock"); 3178 } 3179 if (UseSHA512Intrinsics) { 3180 StubRoutines::_sha512_implCompress = generate_SHA512_stub(false, "SHA512_singleBlock"); 3181 StubRoutines::_sha512_implCompressMB = generate_SHA512_stub(true, "SHA512_multiBlock"); 3182 } 3183 #ifdef COMPILER2 3184 if (UseMultiplyToLenIntrinsic) { 3185 StubRoutines::_multiplyToLen = generate_multiplyToLen(); 3186 } 3187 if (UseMontgomeryMultiplyIntrinsic) { 3188 StubRoutines::_montgomeryMultiply 3189 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply); 3190 } 3191 if (UseMontgomerySquareIntrinsic) { 3192 StubRoutines::_montgomerySquare 3193 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square); 3194 } 3195 if (UseSecondarySupersTable) { 3196 StubRoutines::_lookup_secondary_supers_table_slow_path_stub = generate_lookup_secondary_supers_table_slow_path_stub(); 3197 if (!InlineSecondarySupersTest) { 3198 for (int slot = 0; slot < Klass::SECONDARY_SUPERS_TABLE_SIZE; slot++) { 3199 StubRoutines::_lookup_secondary_supers_table_stubs[slot] = generate_lookup_secondary_supers_table_stub(slot); 3200 } 3201 } 3202 } 3203 #endif 3204 #endif // COMPILER2_OR_JVMCI 3205 } 3206 3207 public: 3208 StubGenerator(CodeBuffer* code, StubsKind kind) : StubCodeGenerator(code) { 3209 switch(kind) { 3210 case Initial_stubs: 3211 generate_initial_stubs(); 3212 break; 3213 case Continuation_stubs: 3214 generate_continuation_stubs(); 3215 break; 3216 case Compiler_stubs: 3217 generate_compiler_stubs(); 3218 break; 3219 case Final_stubs: 3220 generate_final_stubs(); 3221 break; 3222 default: 3223 fatal("unexpected stubs kind: %d", kind); 3224 break; 3225 }; 3226 } 3227 3228 private: 3229 int _stub_count; 3230 void stub_prolog(StubCodeDesc* cdesc) { 3231 #ifdef ASSERT 3232 // Put extra information in the stub code, to make it more readable. 3233 // Write the high part of the address. 3234 // [RGV] Check if there is a dependency on the size of this prolog. 3235 __ emit_data((intptr_t)cdesc >> 32); 3236 __ emit_data((intptr_t)cdesc); 3237 __ emit_data(++_stub_count); 3238 #endif 3239 align(true); 3240 } 3241 3242 void align(bool at_header = false) { 3243 // z/Architecture cache line size is 256 bytes. 3244 // There is no obvious benefit in aligning stub 3245 // code to cache lines. Use CodeEntryAlignment instead. 3246 const unsigned int icache_line_size = CodeEntryAlignment; 3247 const unsigned int icache_half_line_size = MIN2<unsigned int>(32, CodeEntryAlignment); 3248 3249 if (at_header) { 3250 while ((intptr_t)(__ pc()) % icache_line_size != 0) { 3251 __ z_illtrap(); 3252 } 3253 } else { 3254 while ((intptr_t)(__ pc()) % icache_half_line_size != 0) { 3255 __ z_nop(); 3256 } 3257 } 3258 } 3259 3260 }; 3261 3262 void StubGenerator_generate(CodeBuffer* code, StubCodeGenerator::StubsKind kind) { 3263 StubGenerator g(code, kind); 3264 }