< prev index next >

src/hotspot/cpu/x86/stubRoutines_x86.cpp

Print this page

 65 address StubRoutines::x86::_k256_W_adr = NULL;
 66 address StubRoutines::x86::_k512_W_addr = NULL;
 67 address StubRoutines::x86::_pshuffle_byte_flip_mask_addr_sha512 = NULL;
 68 // Base64 masks
 69 address StubRoutines::x86::_encoding_table_base64 = NULL;
 70 address StubRoutines::x86::_shuffle_base64 = NULL;
 71 address StubRoutines::x86::_avx2_shuffle_base64 = NULL;
 72 address StubRoutines::x86::_avx2_input_mask_base64 = NULL;
 73 address StubRoutines::x86::_avx2_lut_base64 = NULL;
 74 address StubRoutines::x86::_lookup_lo_base64 = NULL;
 75 address StubRoutines::x86::_lookup_hi_base64 = NULL;
 76 address StubRoutines::x86::_lookup_lo_base64url = NULL;
 77 address StubRoutines::x86::_lookup_hi_base64url = NULL;
 78 address StubRoutines::x86::_pack_vec_base64 = NULL;
 79 address StubRoutines::x86::_join_0_1_base64 = NULL;
 80 address StubRoutines::x86::_join_1_2_base64 = NULL;
 81 address StubRoutines::x86::_join_2_3_base64 = NULL;
 82 address StubRoutines::x86::_decoding_table_base64 = NULL;
 83 #endif
 84 address StubRoutines::x86::_pshuffle_byte_flip_mask_addr = NULL;

 85 
 86 uint64_t StubRoutines::x86::_crc_by128_masks[] =
 87 {
 88   /* The fields in this structure are arranged so that they can be
 89    * picked up two at a time with 128-bit loads.
 90    *
 91    * Because of flipped bit order for this CRC polynomials
 92    * the constant for X**N is left-shifted by 1.  This is because
 93    * a 64 x 64 polynomial multiply produces a 127-bit result
 94    * but the highest term is always aligned to bit 0 in the container.
 95    * Pre-shifting by one fixes this, at the cost of potentially making
 96    * the 32-bit constant no longer fit in a 32-bit container (thus the
 97    * use of uint64_t, though this is also the size used by the carry-
 98    * less multiply instruction.
 99    *
100    * In addition, the flipped bit order and highest-term-at-least-bit
101    * multiply changes the constants used.  The 96-bit result will be
102    * aligned to the high-term end of the target 128-bit container,
103    * not the low-term end; that is, instead of a 512-bit or 576-bit fold,
104    * instead it is a 480 (=512-32) or 544 (=512+64-32) bit fold.

 65 address StubRoutines::x86::_k256_W_adr = NULL;
 66 address StubRoutines::x86::_k512_W_addr = NULL;
 67 address StubRoutines::x86::_pshuffle_byte_flip_mask_addr_sha512 = NULL;
 68 // Base64 masks
 69 address StubRoutines::x86::_encoding_table_base64 = NULL;
 70 address StubRoutines::x86::_shuffle_base64 = NULL;
 71 address StubRoutines::x86::_avx2_shuffle_base64 = NULL;
 72 address StubRoutines::x86::_avx2_input_mask_base64 = NULL;
 73 address StubRoutines::x86::_avx2_lut_base64 = NULL;
 74 address StubRoutines::x86::_lookup_lo_base64 = NULL;
 75 address StubRoutines::x86::_lookup_hi_base64 = NULL;
 76 address StubRoutines::x86::_lookup_lo_base64url = NULL;
 77 address StubRoutines::x86::_lookup_hi_base64url = NULL;
 78 address StubRoutines::x86::_pack_vec_base64 = NULL;
 79 address StubRoutines::x86::_join_0_1_base64 = NULL;
 80 address StubRoutines::x86::_join_1_2_base64 = NULL;
 81 address StubRoutines::x86::_join_2_3_base64 = NULL;
 82 address StubRoutines::x86::_decoding_table_base64 = NULL;
 83 #endif
 84 address StubRoutines::x86::_pshuffle_byte_flip_mask_addr = NULL;
 85 address StubRoutines::x86::_check_lock_stack = NULL;
 86 
 87 uint64_t StubRoutines::x86::_crc_by128_masks[] =
 88 {
 89   /* The fields in this structure are arranged so that they can be
 90    * picked up two at a time with 128-bit loads.
 91    *
 92    * Because of flipped bit order for this CRC polynomials
 93    * the constant for X**N is left-shifted by 1.  This is because
 94    * a 64 x 64 polynomial multiply produces a 127-bit result
 95    * but the highest term is always aligned to bit 0 in the container.
 96    * Pre-shifting by one fixes this, at the cost of potentially making
 97    * the 32-bit constant no longer fit in a 32-bit container (thus the
 98    * use of uint64_t, though this is also the size used by the carry-
 99    * less multiply instruction.
100    *
101    * In addition, the flipped bit order and highest-term-at-least-bit
102    * multiply changes the constants used.  The 96-bit result will be
103    * aligned to the high-term end of the target 128-bit container,
104    * not the low-term end; that is, instead of a 512-bit or 576-bit fold,
105    * instead it is a 480 (=512-32) or 544 (=512+64-32) bit fold.
< prev index next >