1 /*
  2  * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #ifndef CPU_RISCV_MATCHER_RISCV_HPP
 27 #define CPU_RISCV_MATCHER_RISCV_HPP
 28 
 29   // Defined within class Matcher
 30 
 31   // false => size gets scaled to BytesPerLong, ok.
 32   static const bool init_array_count_is_in_bytes = false;
 33 
 34   // Whether this platform implements the scalable vector feature
 35   static const bool implements_scalable_vector = true;
 36 
 37   static const bool supports_scalable_vector() {
 38     return UseRVV;
 39   }
 40 
 41   // riscv supports misaligned vectors store/load.
 42   static constexpr bool misaligned_vectors_ok() {
 43     return true;
 44   }
 45 
 46   // Whether code generation need accurate ConvI2L types.
 47   static const bool convi2l_type_required = false;
 48 
 49   // Does the CPU require late expand (see block.cpp for description of late expand)?
 50   static const bool require_postalloc_expand = false;
 51 
 52   // Do we need to mask the count passed to shift instructions or does
 53   // the cpu only look at the lower 5/6 bits anyway?
 54   static const bool need_masked_shift_count = false;
 55 
 56   // No support for generic vector operands.
 57   static const bool supports_generic_vector_operands = false;
 58 
 59   static constexpr bool isSimpleConstant64(jlong value) {
 60     // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
 61     // Probably always true, even if a temp register is required.
 62     return true;
 63   }
 64 
 65   // Use conditional move (CMOVL)
 66   static constexpr int long_cmove_cost() {
 67     // long cmoves are no more expensive than int cmoves
 68     return 0;
 69   }
 70 
 71   static constexpr int float_cmove_cost() {
 72     // float cmoves are no more expensive than int cmoves
 73     return 0;
 74   }
 75 
 76   // This affects two different things:
 77   //  - how Decode nodes are matched
 78   //  - how ImplicitNullCheck opportunities are recognized
 79   // If true, the matcher will try to remove all Decodes and match them
 80   // (as operands) into nodes. NullChecks are not prepared to deal with
 81   // Decodes by final_graph_reshaping().
 82   // If false, final_graph_reshaping() forces the decode behind the Cmp
 83   // for a NullCheck. The matcher matches the Decode node into a register.
 84   // Implicit_null_check optimization moves the Decode along with the
 85   // memory operation back up before the NullCheck.
 86   static bool narrow_oop_use_complex_address() {
 87     return CompressedOops::shift() == 0;
 88   }
 89 
 90   static bool narrow_klass_use_complex_address() {
 91     return false;
 92   }
 93 
 94   static bool const_oop_prefer_decode() {
 95     // Prefer ConN+DecodeN over ConP in simple compressed oops mode.
 96     return CompressedOops::base() == NULL;
 97   }
 98 
 99   static bool const_klass_prefer_decode() {
100     // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
101     return CompressedKlassPointers::base() == NULL;
102   }
103 
104   // Is it better to copy float constants, or load them directly from
105   // memory?  Intel can load a float constant from a direct address,
106   // requiring no extra registers.  Most RISCs will have to materialize
107   // an address into a register first, so they would do better to copy
108   // the constant from stack.
109   static const bool rematerialize_float_constants = false;
110 
111   // If CPU can load and store mis-aligned doubles directly then no
112   // fixup is needed.  Else we split the double into 2 integer pieces
113   // and move it piece-by-piece.  Only happens when passing doubles into
114   // C code as the Java calling convention forces doubles to be aligned.
115   static const bool misaligned_doubles_ok = true;
116 
117   // Advertise here if the CPU requires explicit rounding operations to implement strictfp mode.
118   static const bool strict_fp_requires_explicit_rounding = false;
119 
120   // Are floats converted to double when stored to stack during
121   // deoptimization?
122   static constexpr bool float_in_double() { return false; }
123 
124   // Do ints take an entire long register or just half?
125   // The relevant question is how the int is callee-saved:
126   // the whole long is written but de-opt'ing will have to extract
127   // the relevant 32 bits.
128   static const bool int_in_long = true;
129 
130   // Does the CPU supports vector variable shift instructions?
131   static constexpr bool supports_vector_variable_shifts(void) {
132     return false;
133   }
134 
135   // Does the CPU supports vector variable rotate instructions?
136   static constexpr bool supports_vector_variable_rotates(void) {
137     return false;
138   }
139 
140   // Does the CPU supports vector constant rotate instructions?
141   static constexpr bool supports_vector_constant_rotates(int shift) {
142     return false;
143   }
144 
145   // Does the CPU supports vector unsigned comparison instructions?
146   static const bool supports_vector_comparison_unsigned(int vlen, BasicType bt) {
147     return false;
148   }
149 
150   // Some microarchitectures have mask registers used on vectors
151   static const bool has_predicated_vectors(void) {
152     return false;
153   }
154 
155   // true means we have fast l2f convers
156   // false means that conversion is done by runtime call
157   static constexpr bool convL2FSupported(void) {
158       return true;
159   }
160 
161   // Implements a variant of EncodeISOArrayNode that encode ASCII only
162   static const bool supports_encode_ascii_array = false;
163 
164   // Returns pre-selection estimated size of a vector operation.
165   static int vector_op_pre_select_sz_estimate(int vopc, BasicType ety, int vlen) {
166     return 0;
167   }
168 
169 #endif // CPU_RISCV_MATCHER_RISCV_HPP