1 /*
  2  * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2021, Huawei Technologies Co., Ltd. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #ifndef CPU_RISCV_MATCHER_RISCV_HPP
 27 #define CPU_RISCV_MATCHER_RISCV_HPP
 28 
 29   // Defined within class Matcher
 30 
 31   // false => size gets scaled to BytesPerLong, ok.
 32   static const bool init_array_count_is_in_bytes = false;
 33 
 34   // Whether this platform implements the scalable vector feature
 35   static const bool implements_scalable_vector = true;
 36 
 37   static const bool supports_scalable_vector() {
 38     return UseVExt;
 39   }
 40 
 41   // riscv64 supports misaligned vectors store/load.
 42   static constexpr bool misaligned_vectors_ok() {
 43     return true;
 44   }
 45 
 46   // Whether code generation need accurate ConvI2L types.
 47   static const bool convi2l_type_required = false;
 48 
 49   // Does the CPU require late expand (see block.cpp for description of late expand)?
 50   static const bool require_postalloc_expand = false;
 51 
 52   // Do we need to mask the count passed to shift instructions or does
 53   // the cpu only look at the lower 5/6 bits anyway?
 54   static const bool need_masked_shift_count = false;
 55 
 56   // No support for generic vector operands.
 57   static const bool supports_generic_vector_operands = false;
 58 
 59   // No support for 48 extra htbl entries in aes-gcm intrinsic
 60   static const int htbl_entries = 0;
 61 
 62   static constexpr bool isSimpleConstant64(jlong value) {
 63     // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
 64     // Probably always true, even if a temp register is required.
 65     return true;
 66   }
 67 
 68   // Use conditional move (CMOVL)
 69   static constexpr int long_cmove_cost() {
 70     // long cmoves are no more expensive than int cmoves
 71     return 0;
 72   }
 73 
 74   static constexpr int float_cmove_cost() {
 75     // float cmoves are no more expensive than int cmoves
 76     return 0;
 77   }
 78 
 79   // This affects two different things:
 80   //  - how Decode nodes are matched
 81   //  - how ImplicitNullCheck opportunities are recognized
 82   // If true, the matcher will try to remove all Decodes and match them
 83   // (as operands) into nodes. NullChecks are not prepared to deal with
 84   // Decodes by final_graph_reshaping().
 85   // If false, final_graph_reshaping() forces the decode behind the Cmp
 86   // for a NullCheck. The matcher matches the Decode node into a register.
 87   // Implicit_null_check optimization moves the Decode along with the
 88   // memory operation back up before the NullCheck.
 89   static bool narrow_oop_use_complex_address() {
 90     return CompressedOops::shift() == 0;
 91   }
 92 
 93   static bool narrow_klass_use_complex_address() {
 94     return false;
 95   }
 96 
 97   static bool const_oop_prefer_decode() {
 98     // Prefer ConN+DecodeN over ConP in simple compressed oops mode.
 99     return CompressedOops::base() == NULL;
100   }
101 
102   static bool const_klass_prefer_decode() {
103     // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
104     return CompressedKlassPointers::base() == NULL;
105   }
106 
107   // Is it better to copy float constants, or load them directly from
108   // memory?  Intel can load a float constant from a direct address,
109   // requiring no extra registers.  Most RISCs will have to materialize
110   // an address into a register first, so they would do better to copy
111   // the constant from stack.
112   static const bool rematerialize_float_constants = false;
113 
114   // If CPU can load and store mis-aligned doubles directly then no
115   // fixup is needed.  Else we split the double into 2 integer pieces
116   // and move it piece-by-piece.  Only happens when passing doubles into
117   // C code as the Java calling convention forces doubles to be aligned.
118   static const bool misaligned_doubles_ok = true;
119 
120   // Advertise here if the CPU requires explicit rounding operations to implement strictfp mode.
121   static const bool strict_fp_requires_explicit_rounding = false;
122 
123   // Are floats converted to double when stored to stack during
124   // deoptimization?
125   static constexpr bool float_in_double() { return false; }
126 
127   // Do ints take an entire long register or just half?
128   // The relevant question is how the int is callee-saved:
129   // the whole long is written but de-opt'ing will have to extract
130   // the relevant 32 bits.
131   static const bool int_in_long = true;
132 
133   // Does the CPU supports vector variable shift instructions?
134   static constexpr bool supports_vector_variable_shifts(void) {
135     return false;
136   }
137 
138   // Does the CPU supports vector variable rotate instructions?
139   static constexpr bool supports_vector_variable_rotates(void) {
140     return false;
141   }
142 
143   // Does the CPU supports vector constant rotate instructions?
144   static constexpr bool supports_vector_constant_rotates(int shift) {
145     return false;
146   }
147 
148   // Does the CPU supports vector unsigned comparison instructions?
149   static const bool supports_vector_comparison_unsigned(int vlen, BasicType bt) {
150     return false;
151   }
152 
153   // Some microarchitectures have mask registers used on vectors
154   static const bool has_predicated_vectors(void) {
155     return false;
156   }
157 
158   // true means we have fast l2f convers
159   // false means that conversion is done by runtime call
160   static constexpr bool convL2FSupported(void) {
161       return true;
162   }
163 
164   // Implements a variant of EncodeISOArrayNode that encode ASCII only
165   static const bool supports_encode_ascii_array = false;
166 
167 #endif // CPU_RISCV_MATCHER_RISCV_HPP