1 /* 2 * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef CPU_X86_STUBGENERATOR_X86_64_HPP 26 #define CPU_X86_STUBGENERATOR_X86_64_HPP 27 28 #include "code/codeBlob.hpp" 29 #include "runtime/continuation.hpp" 30 #include "runtime/stubCodeGenerator.hpp" 31 32 // Stub Code definitions 33 34 class StubGenerator: public StubCodeGenerator { 35 private: 36 37 // Call stubs are used to call Java from C. 38 address generate_call_stub(address& return_address); 39 40 // Return point for a Java call if there's an exception thrown in 41 // Java code. The exception is caught and transformed into a 42 // pending exception stored in JavaThread that can be tested from 43 // within the VM. 44 // 45 // Note: Usually the parameters are removed by the callee. In case 46 // of an exception crossing an activation frame boundary, that is 47 // not the case if the callee is compiled code => need to setup the 48 // rsp. 49 // 50 // rax: exception oop 51 52 address generate_catch_exception(); 53 54 // Continuation point for runtime calls returning with a pending 55 // exception. The pending exception check happened in the runtime 56 // or native call stub. The pending exception in Thread is 57 // converted into a Java-level exception. 58 // 59 // Contract with Java-level exception handlers: 60 // rax: exception 61 // rdx: throwing pc 62 // 63 // NOTE: At entry of this stub, exception-pc must be on stack !! 64 65 address generate_forward_exception(); 66 67 // Support for intptr_t OrderAccess::fence() 68 address generate_orderaccess_fence(); 69 70 // Support for intptr_t get_previous_sp() 71 // 72 // This routine is used to find the previous stack pointer for the 73 // caller. 74 address generate_get_previous_sp(); 75 76 //---------------------------------------------------------------------------------------------------- 77 // Support for void verify_mxcsr() 78 // 79 // This routine is used with -Xcheck:jni to verify that native 80 // JNI code does not return to Java code without restoring the 81 // MXCSR register to our expected state. 82 83 address generate_verify_mxcsr(); 84 85 address generate_f2i_fixup(); 86 address generate_f2l_fixup(); 87 address generate_d2i_fixup(); 88 address generate_d2l_fixup(); 89 90 address generate_count_leading_zeros_lut(const char *stub_name); 91 address generate_popcount_avx_lut(const char *stub_name); 92 address generate_iota_indices(const char *stub_name); 93 address generate_vector_reverse_bit_lut(const char *stub_name); 94 95 address generate_vector_reverse_byte_perm_mask_long(const char *stub_name); 96 address generate_vector_reverse_byte_perm_mask_int(const char *stub_name); 97 address generate_vector_reverse_byte_perm_mask_short(const char *stub_name); 98 address generate_vector_byte_shuffle_mask(const char *stub_name); 99 100 address generate_fp_mask(const char *stub_name, int64_t mask); 101 102 address generate_vector_mask(const char *stub_name, int64_t mask); 103 104 address generate_vector_byte_perm_mask(const char *stub_name); 105 106 address generate_vector_fp_mask(const char *stub_name, int64_t mask); 107 108 address generate_vector_custom_i32(const char *stub_name, Assembler::AvxVectorLen len, 109 int32_t val0, int32_t val1, int32_t val2, int32_t val3, 110 int32_t val4 = 0, int32_t val5 = 0, int32_t val6 = 0, int32_t val7 = 0, 111 int32_t val8 = 0, int32_t val9 = 0, int32_t val10 = 0, int32_t val11 = 0, 112 int32_t val12 = 0, int32_t val13 = 0, int32_t val14 = 0, int32_t val15 = 0); 113 114 // Non-destructive plausibility checks for oops 115 address generate_verify_oop(); 116 117 // Verify that a register contains clean 32-bits positive value 118 // (high 32-bits are 0) so it could be used in 64-bits shifts. 119 void assert_clean_int(Register Rint, Register Rtmp); 120 121 // Generate overlap test for array copy stubs 122 void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf); 123 124 void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) { 125 assert(no_overlap_target != nullptr, "must be generated"); 126 array_overlap_test(no_overlap_target, nullptr, sf); 127 } 128 void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) { 129 array_overlap_test(nullptr, &L_no_overlap, sf); 130 } 131 132 133 // Shuffle first three arg regs on Windows into Linux/Solaris locations. 134 void setup_arg_regs(int nargs = 3); 135 void restore_arg_regs(); 136 137 #ifdef ASSERT 138 bool _regs_in_thread; 139 #endif 140 141 // This is used in places where r10 is a scratch register, and can 142 // be adapted if r9 is needed also. 143 void setup_arg_regs_using_thread(int nargs = 3); 144 145 void restore_arg_regs_using_thread(); 146 147 // Copy big chunks forward 148 void copy_bytes_forward(Register end_from, Register end_to, 149 Register qword_count, Register tmp1, 150 Register tmp2, Label& L_copy_bytes, 151 Label& L_copy_8_bytes, DecoratorSet decorators, 152 BasicType type); 153 154 // Copy big chunks backward 155 void copy_bytes_backward(Register from, Register dest, 156 Register qword_count, Register tmp1, 157 Register tmp2, Label& L_copy_bytes, 158 Label& L_copy_8_bytes, DecoratorSet decorators, 159 BasicType type); 160 161 void setup_argument_regs(BasicType type); 162 163 void restore_argument_regs(BasicType type); 164 165 #if COMPILER2_OR_JVMCI 166 // Following rules apply to AVX3 optimized arraycopy stubs: 167 // - If target supports AVX3 features (BW+VL+F) then implementation uses 32 byte vectors (YMMs) 168 // for both special cases (various small block sizes) and aligned copy loop. This is the 169 // default configuration. 170 // - If copy length is above AVX3Threshold, then implementation use 64 byte vectors (ZMMs) 171 // for main copy loop (and subsequent tail) since bulk of the cycles will be consumed in it. 172 // - If user forces MaxVectorSize=32 then above 4096 bytes its seen that REP MOVs shows a 173 // better performance for disjoint copies. For conjoint/backward copy vector based 174 // copy performs better. 175 // - If user sets AVX3Threshold=0, then special cases for small blocks sizes operate over 176 // 64 byte vector registers (ZMMs). 177 178 address generate_disjoint_copy_avx3_masked(address* entry, const char *name, int shift, 179 bool aligned, bool is_oop, bool dest_uninitialized); 180 181 address generate_conjoint_copy_avx3_masked(address* entry, const char *name, int shift, 182 address nooverlap_target, bool aligned, bool is_oop, 183 bool dest_uninitialized); 184 185 void arraycopy_avx3_special_cases(XMMRegister xmm, KRegister mask, Register from, 186 Register to, Register count, int shift, 187 Register index, Register temp, 188 bool use64byteVector, Label& L_entry, Label& L_exit); 189 190 void arraycopy_avx3_special_cases_conjoint(XMMRegister xmm, KRegister mask, Register from, 191 Register to, Register start_index, Register end_index, 192 Register count, int shift, Register temp, 193 bool use64byteVector, Label& L_entry, Label& L_exit); 194 195 void copy32_avx(Register dst, Register src, Register index, XMMRegister xmm, 196 int shift = Address::times_1, int offset = 0); 197 198 void copy64_avx(Register dst, Register src, Register index, XMMRegister xmm, 199 bool conjoint, int shift = Address::times_1, int offset = 0, 200 bool use64byteVector = false); 201 202 void copy64_masked_avx(Register dst, Register src, XMMRegister xmm, 203 KRegister mask, Register length, Register index, 204 Register temp, int shift = Address::times_1, int offset = 0, 205 bool use64byteVector = false); 206 207 void copy32_masked_avx(Register dst, Register src, XMMRegister xmm, 208 KRegister mask, Register length, Register index, 209 Register temp, int shift = Address::times_1, int offset = 0); 210 #endif // COMPILER2_OR_JVMCI 211 212 address generate_disjoint_byte_copy(bool aligned, address* entry, const char *name); 213 214 address generate_conjoint_byte_copy(bool aligned, address nooverlap_target, 215 address* entry, const char *name); 216 217 address generate_disjoint_short_copy(bool aligned, address *entry, const char *name); 218 219 address generate_fill(BasicType t, bool aligned, const char *name); 220 221 address generate_conjoint_short_copy(bool aligned, address nooverlap_target, 222 address *entry, const char *name); 223 address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, address* entry, 224 const char *name, bool dest_uninitialized = false); 225 address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, address nooverlap_target, 226 address *entry, const char *name, 227 bool dest_uninitialized = false); 228 address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, address *entry, 229 const char *name, bool dest_uninitialized = false); 230 address generate_conjoint_long_oop_copy(bool aligned, bool is_oop, 231 address nooverlap_target, address *entry, 232 const char *name, bool dest_uninitialized = false); 233 234 // Helper for generating a dynamic type check. 235 // Smashes no registers. 236 void generate_type_check(Register sub_klass, 237 Register super_check_offset, 238 Register super_klass, 239 Label& L_success); 240 241 // Generate checkcasting array copy stub 242 address generate_checkcast_copy(const char *name, address *entry, 243 bool dest_uninitialized = false); 244 245 // Generate 'unsafe' array copy stub 246 // Though just as safe as the other stubs, it takes an unscaled 247 // size_t argument instead of an element count. 248 // 249 // Examines the alignment of the operands and dispatches 250 // to a long, int, short, or byte copy loop. 251 address generate_unsafe_copy(const char *name, 252 address byte_copy_entry, address short_copy_entry, 253 address int_copy_entry, address long_copy_entry); 254 255 // Perform range checks on the proposed arraycopy. 256 // Kills temp, but nothing else. 257 // Also, clean the sign bits of src_pos and dst_pos. 258 void arraycopy_range_checks(Register src, // source array oop (c_rarg0) 259 Register src_pos, // source position (c_rarg1) 260 Register dst, // destination array oo (c_rarg2) 261 Register dst_pos, // destination position (c_rarg3) 262 Register length, 263 Register temp, 264 Label& L_failed); 265 266 // Generate generic array copy stubs 267 address generate_generic_copy(const char *name, 268 address byte_copy_entry, address short_copy_entry, 269 address int_copy_entry, address oop_copy_entry, 270 address long_copy_entry, address checkcast_copy_entry); 271 272 address generate_data_cache_writeback(); 273 274 address generate_data_cache_writeback_sync(); 275 276 void generate_arraycopy_stubs(); 277 278 279 // MD5 stubs 280 281 // ofs and limit are use for multi-block byte array. 282 // int com.sun.security.provider.MD5.implCompress(byte[] b, int ofs) 283 address generate_md5_implCompress(bool multi_block, const char *name); 284 285 286 // SHA stubs 287 288 // ofs and limit are use for multi-block byte array. 289 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 290 address generate_sha1_implCompress(bool multi_block, const char *name); 291 292 // ofs and limit are use for multi-block byte array. 293 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 294 address generate_sha256_implCompress(bool multi_block, const char *name); 295 address generate_sha512_implCompress(bool multi_block, const char *name); 296 297 // Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. 298 address generate_pshuffle_byte_flip_mask_sha512(); 299 300 address generate_upper_word_mask(); 301 address generate_shuffle_byte_flip_mask(); 302 address generate_pshuffle_byte_flip_mask(); 303 304 305 // AES intrinsic stubs 306 307 address generate_aescrypt_encryptBlock(); 308 309 address generate_aescrypt_decryptBlock(); 310 311 address generate_cipherBlockChaining_encryptAESCrypt(); 312 313 // A version of CBC/AES Decrypt which does 4 blocks in a loop at a time 314 // to hide instruction latency 315 address generate_cipherBlockChaining_decryptAESCrypt_Parallel(); 316 317 address generate_electronicCodeBook_encryptAESCrypt(); 318 319 void aesecb_encrypt(Register source_addr, Register dest_addr, Register key, Register len); 320 321 address generate_electronicCodeBook_decryptAESCrypt(); 322 323 void aesecb_decrypt(Register source_addr, Register dest_addr, Register key, Register len); 324 325 // Vector AES Galois Counter Mode implementation 326 address generate_galoisCounterMode_AESCrypt(); 327 void aesgcm_encrypt(Register in, Register len, Register ct, Register out, Register key, 328 Register state, Register subkeyHtbl, Register avx512_subkeyHtbl, Register counter); 329 330 331 // Vector AES Counter implementation 332 address generate_counterMode_VectorAESCrypt(); 333 void aesctr_encrypt(Register src_addr, Register dest_addr, Register key, Register counter, 334 Register len_reg, Register used, Register used_addr, Register saved_encCounter_start); 335 336 // This is a version of CTR/AES crypt which does 6 blocks in a loop at a time 337 // to hide instruction latency 338 address generate_counterMode_AESCrypt_Parallel(); 339 340 address generate_cipherBlockChaining_decryptVectorAESCrypt(); 341 342 address generate_key_shuffle_mask(); 343 344 void roundDec(XMMRegister xmm_reg); 345 void roundDeclast(XMMRegister xmm_reg); 346 void roundEnc(XMMRegister key, int rnum); 347 void lastroundEnc(XMMRegister key, int rnum); 348 void roundDec(XMMRegister key, int rnum); 349 void lastroundDec(XMMRegister key, int rnum); 350 void gfmul_avx512(XMMRegister ghash, XMMRegister hkey); 351 void generateHtbl_48_block_zmm(Register htbl, Register avx512_subkeyHtbl, Register rscratch); 352 void ghash16_encrypt16_parallel(Register key, Register subkeyHtbl, XMMRegister ctr_blockx, 353 XMMRegister aad_hashx, Register in, Register out, Register data, Register pos, bool reduction, 354 XMMRegister addmask, bool no_ghash_input, Register rounds, Register ghash_pos, 355 bool final_reduction, int index, XMMRegister counter_inc_mask); 356 // Load key and shuffle operation 357 void ev_load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask); 358 void ev_load_key(XMMRegister xmmdst, Register key, int offset, Register rscratch); 359 360 // Utility routine for loading a 128-bit key word in little endian format 361 // can optionally specify that the shuffle mask is already in an xmmregister 362 void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask); 363 void load_key(XMMRegister xmmdst, Register key, int offset, Register rscratch); 364 365 // Utility routine for increase 128bit counter (iv in CTR mode) 366 void inc_counter(Register reg, XMMRegister xmmdst, int inc_delta, Label& next_block); 367 368 void generate_aes_stubs(); 369 370 371 // GHASH stubs 372 373 void generate_ghash_stubs(); 374 375 void schoolbookAAD(int i, Register subkeyH, XMMRegister data, XMMRegister tmp0, 376 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3); 377 void gfmul(XMMRegister tmp0, XMMRegister t); 378 void generateHtbl_one_block(Register htbl, Register rscratch); 379 void generateHtbl_eight_blocks(Register htbl); 380 void avx_ghash(Register state, Register htbl, Register data, Register blocks); 381 382 // Used by GHASH and AES stubs. 383 address ghash_polynomial_addr(); 384 address ghash_shufflemask_addr(); 385 address ghash_long_swap_mask_addr(); // byte swap x86 long 386 address ghash_byte_swap_mask_addr(); // byte swap x86 byte array 387 388 // Single and multi-block ghash operations 389 address generate_ghash_processBlocks(); 390 391 // Ghash single and multi block operations using AVX instructions 392 address generate_avx_ghash_processBlocks(); 393 394 // ChaCha20 stubs and helper functions 395 void generate_chacha_stubs(); 396 address generate_chacha20Block_avx(); 397 address generate_chacha20Block_avx512(); 398 void cc20_quarter_round_avx(XMMRegister aVec, XMMRegister bVec, 399 XMMRegister cVec, XMMRegister dVec, XMMRegister scratch, 400 XMMRegister lrot8, XMMRegister lrot16, int vector_len); 401 void cc20_shift_lane_org(XMMRegister bVec, XMMRegister cVec, 402 XMMRegister dVec, int vector_len, bool colToDiag); 403 void cc20_keystream_collate_avx512(XMMRegister aVec, XMMRegister bVec, 404 XMMRegister cVec, XMMRegister dVec, Register baseAddr, int baseOffset); 405 406 // Poly1305 multiblock using IFMA instructions 407 address generate_poly1305_processBlocks(); 408 void poly1305_process_blocks_avx512(const Register input, const Register length, 409 const Register A0, const Register A1, const Register A2, 410 const Register R0, const Register R1, const Register C1); 411 void poly1305_multiply_scalar(const Register a0, const Register a1, const Register a2, 412 const Register r0, const Register r1, const Register c1, bool only128, 413 const Register t0, const Register t1, const Register t2, 414 const Register mulql, const Register mulqh); 415 void poly1305_multiply8_avx512(const XMMRegister A0, const XMMRegister A1, const XMMRegister A2, 416 const XMMRegister R0, const XMMRegister R1, const XMMRegister R2, const XMMRegister R1P, const XMMRegister R2P, 417 const XMMRegister P0L, const XMMRegister P0H, const XMMRegister P1L, const XMMRegister P1H, const XMMRegister P2L, const XMMRegister P2H, 418 const XMMRegister TMP, const Register rscratch); 419 void poly1305_limbs(const Register limbs, const Register a0, const Register a1, const Register a2, const Register t0, const Register t1); 420 void poly1305_limbs_out(const Register a0, const Register a1, const Register a2, const Register limbs, const Register t0, const Register t1); 421 void poly1305_limbs_avx512(const XMMRegister D0, const XMMRegister D1, 422 const XMMRegister L0, const XMMRegister L1, const XMMRegister L2, bool padMSG, 423 const XMMRegister TMP, const Register rscratch); 424 425 // BASE64 stubs 426 427 address base64_shuffle_addr(); 428 address base64_avx2_shuffle_addr(); 429 address base64_avx2_input_mask_addr(); 430 address base64_avx2_lut_addr(); 431 address base64_encoding_table_addr(); 432 433 // Code for generating Base64 encoding. 434 // Intrinsic function prototype in Base64.java: 435 // private void encodeBlock(byte[] src, int sp, int sl, byte[] dst, int dp, boolean isURL) 436 address generate_base64_encodeBlock(); 437 438 // base64 AVX512vbmi tables 439 address base64_vbmi_lookup_lo_addr(); 440 address base64_vbmi_lookup_hi_addr(); 441 address base64_vbmi_lookup_lo_url_addr(); 442 address base64_vbmi_lookup_hi_url_addr(); 443 address base64_vbmi_pack_vec_addr(); 444 address base64_vbmi_join_0_1_addr(); 445 address base64_vbmi_join_1_2_addr(); 446 address base64_vbmi_join_2_3_addr(); 447 address base64_decoding_table_addr(); 448 address base64_AVX2_decode_tables_addr(); 449 address base64_AVX2_decode_LUT_tables_addr(); 450 451 // Code for generating Base64 decoding. 452 // 453 // Based on the article (and associated code) from https://arxiv.org/abs/1910.05109. 454 // 455 // Intrinsic function prototype in Base64.java: 456 // private void decodeBlock(byte[] src, int sp, int sl, byte[] dst, int dp, boolean isURL, isMIME); 457 address generate_base64_decodeBlock(); 458 459 address generate_updateBytesCRC32(); 460 address generate_updateBytesCRC32C(bool is_pclmulqdq_supported); 461 462 address generate_updateBytesAdler32(); 463 464 address generate_multiplyToLen(); 465 466 address generate_vectorizedMismatch(); 467 468 address generate_squareToLen(); 469 470 address generate_method_entry_barrier(); 471 472 address generate_mulAdd(); 473 474 address generate_bigIntegerRightShift(); 475 address generate_bigIntegerLeftShift(); 476 477 address generate_float16ToFloat(); 478 address generate_floatToFloat16(); 479 480 // Libm trigonometric stubs 481 482 address generate_libmSin(); 483 address generate_libmCos(); 484 address generate_libmTan(); 485 address generate_libmExp(); 486 address generate_libmPow(); 487 address generate_libmLog(); 488 address generate_libmLog10(); 489 490 // Shared constants 491 static address ZERO; 492 static address NEG_ZERO; 493 static address ONE; 494 static address ONEHALF; 495 static address SIGN_MASK; 496 static address TWO_POW_55; 497 static address TWO_POW_M55; 498 static address SHIFTER; 499 static address PI32INV; 500 static address PI_INV_TABLE; 501 static address Ctable; 502 static address SC_1; 503 static address SC_2; 504 static address SC_3; 505 static address SC_4; 506 static address PI_4; 507 static address P_1; 508 static address P_3; 509 static address P_2; 510 511 void generate_libm_stubs(); 512 513 514 address generate_cont_thaw(const char* label, Continuation::thaw_kind kind); 515 address generate_cont_thaw(); 516 517 // TODO: will probably need multiple return barriers depending on return type 518 address generate_cont_returnBarrier(); 519 address generate_cont_returnBarrier_exception(); 520 521 #if INCLUDE_JFR 522 523 // For c2: c_rarg0 is junk, call to runtime to write a checkpoint. 524 // It returns a jobject handle to the event writer. 525 // The handle is dereferenced and the return value is the event writer oop. 526 RuntimeStub* generate_jfr_write_checkpoint(); 527 528 #endif // INCLUDE_JFR 529 530 // Continuation point for throwing of implicit exceptions that are 531 // not handled in the current activation. Fabricates an exception 532 // oop and initiates normal exception dispatching in this 533 // frame. Since we need to preserve callee-saved values (currently 534 // only for C2, but done for C1 as well) we need a callee-saved oop 535 // map and therefore have to make these stubs into RuntimeStubs 536 // rather than BufferBlobs. If the compiler needs all registers to 537 // be preserved between the fault point and the exception handler 538 // then it must assume responsibility for that in 539 // AbstractCompiler::continuation_for_implicit_null_exception or 540 // continuation_for_implicit_division_by_zero_exception. All other 541 // implicit exceptions (e.g., NullPointerException or 542 // AbstractMethodError on entry) are either at call sites or 543 // otherwise assume that stack unwinding will be initiated, so 544 // caller saved registers were assumed volatile in the compiler. 545 address generate_throw_exception(const char* name, 546 address runtime_entry, 547 Register arg1 = noreg, 548 Register arg2 = noreg); 549 550 // interpreter or compiled code marshalling registers to/from inline type instance 551 address generate_return_value_stub(address destination, const char* name, bool has_res); 552 553 void create_control_words(); 554 555 // Initialization 556 void generate_initial_stubs(); 557 void generate_continuation_stubs(); 558 void generate_compiler_stubs(); 559 void generate_final_stubs(); 560 561 public: 562 StubGenerator(CodeBuffer* code, StubsKind kind); 563 }; 564 565 #endif // CPU_X86_STUBGENERATOR_X86_64_HPP