1 /*
2 * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "c1/c1_Compilation.hpp"
26 #include "c1/c1_Compiler.hpp"
27 #include "c1/c1_FrameMap.hpp"
28 #include "c1/c1_GraphBuilder.hpp"
29 #include "c1/c1_LinearScan.hpp"
30 #include "c1/c1_MacroAssembler.hpp"
31 #include "c1/c1_Runtime1.hpp"
32 #include "c1/c1_ValueType.hpp"
33 #include "compiler/compileBroker.hpp"
34 #include "compiler/compilerDirectives.hpp"
35 #include "interpreter/linkResolver.hpp"
36 #include "jfr/support/jfrIntrinsics.hpp"
37 #include "memory/allocation.hpp"
38 #include "memory/allocation.inline.hpp"
39 #include "memory/resourceArea.hpp"
40 #include "runtime/interfaceSupport.inline.hpp"
41 #include "runtime/sharedRuntime.hpp"
42 #include "runtime/vm_version.hpp"
43 #include "utilities/bitMap.inline.hpp"
44 #include "utilities/macros.hpp"
45
46
47 Compiler::Compiler() : AbstractCompiler(compiler_c1) {
48 }
49
50 bool Compiler::init_c1_runtime() {
51 BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
52 FrameMap::initialize();
53 if (!Runtime1::initialize(buffer_blob)) {
54 return false;
55 }
56 // initialize data structures
57 ValueType::initialize();
58 GraphBuilder::initialize();
59 // note: to use more than one instance of LinearScan at a time this function call has to
60 // be moved somewhere outside of this constructor:
61 Interval::initialize();
62 return true;
63 }
64
65
66 void Compiler::initialize() {
67 // Buffer blob must be allocated per C1 compiler thread at startup
68 BufferBlob* buffer_blob = init_buffer_blob();
69
70 if (should_perform_init()) {
71 if (buffer_blob == nullptr || !init_c1_runtime()) {
72 // When we come here we are in state 'initializing'; entire C1 compilation
73 // can be shut down.
74 set_state(failed);
75 } else {
76 set_state(initialized);
77 }
78 }
79 }
80
81 uint Compiler::code_buffer_size() {
82 return Compilation::desired_max_code_buffer_size + Compilation::desired_max_constant_size;
83 }
84
85 BufferBlob* Compiler::init_buffer_blob() {
86 // Allocate buffer blob once at startup since allocation for each
87 // compilation seems to be too expensive (at least on Intel win32).
88 assert (CompilerThread::current()->get_buffer_blob() == nullptr, "Should initialize only once");
89
90 // Setup CodeBuffer.
91 BufferBlob* buffer_blob = BufferBlob::create("C1 temporary CodeBuffer", code_buffer_size());
92 if (buffer_blob != nullptr) {
93 CompilerThread::current()->set_buffer_blob(buffer_blob);
94 }
95
96 return buffer_blob;
97 }
98
99 bool Compiler::is_intrinsic_supported(const methodHandle& method) {
100 vmIntrinsics::ID id = method->intrinsic_id();
101 assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
102
103 if (method->is_synchronized()) {
104 // C1 does not support intrinsification of synchronized methods.
105 return false;
106 }
107 return Compiler::is_intrinsic_supported(id);
108 }
109
110 bool Compiler::is_intrinsic_supported(vmIntrinsics::ID id) {
111 switch (id) {
112 case vmIntrinsics::_compareAndSetLong:
113 break;
114 case vmIntrinsics::_getAndAddInt:
115 if (!VM_Version::supports_atomic_getadd4()) return false;
116 break;
117 case vmIntrinsics::_getAndAddLong:
118 if (!VM_Version::supports_atomic_getadd8()) return false;
119 break;
120 case vmIntrinsics::_getAndSetInt:
121 if (!VM_Version::supports_atomic_getset4()) return false;
122 break;
123 case vmIntrinsics::_getAndSetLong:
124 if (!VM_Version::supports_atomic_getset8()) return false;
125 break;
126 case vmIntrinsics::_getAndSetReference:
127 #ifdef _LP64
128 if (!UseCompressedOops && !VM_Version::supports_atomic_getset8()) return false;
129 if (UseCompressedOops && !VM_Version::supports_atomic_getset4()) return false;
130 #else
131 if (!VM_Version::supports_atomic_getset4()) return false;
132 #endif
133 break;
134 case vmIntrinsics::_onSpinWait:
135 if (!VM_Version::supports_on_spin_wait()) return false;
136 break;
137 case vmIntrinsics::_floatToFloat16:
138 case vmIntrinsics::_float16ToFloat:
139 if (!VM_Version::supports_float16()) return false;
140 break;
141 case vmIntrinsics::_arraycopy:
142 case vmIntrinsics::_currentTimeMillis:
143 case vmIntrinsics::_nanoTime:
144 case vmIntrinsics::_Reference_get0:
145 // Use the intrinsic version of Reference.get() so that the value in
146 // the referent field can be registered by the G1 pre-barrier code.
147 // Also to prevent commoning reads from this field across safepoint
148 // since GC can change its value.
149 case vmIntrinsics::_loadFence:
150 case vmIntrinsics::_storeFence:
151 case vmIntrinsics::_storeStoreFence:
152 case vmIntrinsics::_fullFence:
153 case vmIntrinsics::_floatToRawIntBits:
154 case vmIntrinsics::_intBitsToFloat:
155 case vmIntrinsics::_doubleToRawLongBits:
156 case vmIntrinsics::_longBitsToDouble:
157 case vmIntrinsics::_getClass:
158 case vmIntrinsics::_isInstance:
159 case vmIntrinsics::_currentCarrierThread:
160 case vmIntrinsics::_currentThread:
161 case vmIntrinsics::_scopedValueCache:
162 case vmIntrinsics::_dabs:
163 case vmIntrinsics::_dsqrt:
164 case vmIntrinsics::_dsqrt_strict:
165 case vmIntrinsics::_dsin:
166 case vmIntrinsics::_dcos:
167 case vmIntrinsics::_dtan:
168 #if defined(AMD64)
169 case vmIntrinsics::_dsinh:
170 case vmIntrinsics::_dtanh:
171 case vmIntrinsics::_dcbrt:
172 #endif
173 case vmIntrinsics::_dlog:
174 case vmIntrinsics::_dlog10:
175 case vmIntrinsics::_dexp:
176 case vmIntrinsics::_dpow:
177 case vmIntrinsics::_fmaD:
178 case vmIntrinsics::_fmaF:
179 case vmIntrinsics::_getReference:
180 case vmIntrinsics::_getBoolean:
181 case vmIntrinsics::_getByte:
182 case vmIntrinsics::_getShort:
183 case vmIntrinsics::_getChar:
184 case vmIntrinsics::_getInt:
185 case vmIntrinsics::_getLong:
186 case vmIntrinsics::_getFloat:
187 case vmIntrinsics::_getDouble:
188 case vmIntrinsics::_putReference:
189 case vmIntrinsics::_putBoolean:
190 case vmIntrinsics::_putByte:
191 case vmIntrinsics::_putShort:
192 case vmIntrinsics::_putChar:
193 case vmIntrinsics::_putInt:
194 case vmIntrinsics::_putLong:
195 case vmIntrinsics::_putFloat:
196 case vmIntrinsics::_putDouble:
197 case vmIntrinsics::_getReferenceVolatile:
198 case vmIntrinsics::_getBooleanVolatile:
199 case vmIntrinsics::_getByteVolatile:
200 case vmIntrinsics::_getShortVolatile:
201 case vmIntrinsics::_getCharVolatile:
202 case vmIntrinsics::_getIntVolatile:
203 case vmIntrinsics::_getLongVolatile:
204 case vmIntrinsics::_getFloatVolatile:
205 case vmIntrinsics::_getDoubleVolatile:
206 case vmIntrinsics::_putReferenceVolatile:
207 case vmIntrinsics::_putBooleanVolatile:
208 case vmIntrinsics::_putByteVolatile:
209 case vmIntrinsics::_putShortVolatile:
210 case vmIntrinsics::_putCharVolatile:
211 case vmIntrinsics::_putIntVolatile:
212 case vmIntrinsics::_putLongVolatile:
213 case vmIntrinsics::_putFloatVolatile:
214 case vmIntrinsics::_putDoubleVolatile:
215 case vmIntrinsics::_getShortUnaligned:
216 case vmIntrinsics::_getCharUnaligned:
217 case vmIntrinsics::_getIntUnaligned:
218 case vmIntrinsics::_getLongUnaligned:
219 case vmIntrinsics::_putShortUnaligned:
220 case vmIntrinsics::_putCharUnaligned:
221 case vmIntrinsics::_putIntUnaligned:
222 case vmIntrinsics::_putLongUnaligned:
223 case vmIntrinsics::_Preconditions_checkIndex:
224 case vmIntrinsics::_Preconditions_checkLongIndex:
225 case vmIntrinsics::_updateCRC32:
226 case vmIntrinsics::_updateBytesCRC32:
227 case vmIntrinsics::_updateByteBufferCRC32:
228 #if defined(S390) || defined(PPC64) || defined(AARCH64) || defined(AMD64)
229 case vmIntrinsics::_updateBytesCRC32C:
230 case vmIntrinsics::_updateDirectByteBufferCRC32C:
231 #endif
232 case vmIntrinsics::_vectorizedMismatch:
233 case vmIntrinsics::_compareAndSetInt:
234 case vmIntrinsics::_compareAndSetReference:
235 case vmIntrinsics::_getCharStringU:
236 case vmIntrinsics::_putCharStringU:
237 #ifdef JFR_HAVE_INTRINSICS
238 case vmIntrinsics::_counterTime:
239 #endif
240 case vmIntrinsics::_getObjectSize:
241 #if defined(X86) || defined(AARCH64) || defined(S390) || defined(RISCV64) || defined(PPC64)
242 case vmIntrinsics::_clone:
243 #endif
244 break;
245 case vmIntrinsics::_blackhole:
246 break;
247 default:
248 return false; // Intrinsics not on the previous list are not available.
249 }
250
251 return true;
252 }
253
254 void Compiler::compile_method(ciEnv* env, ciMethod* method, int entry_bci, bool install_code, DirectiveSet* directive) {
255 BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
256 assert(buffer_blob != nullptr, "Must exist");
257 // invoke compilation
258 {
259 // We are nested here because we need for the destructor
260 // of Compilation to occur before we release the any
261 // competing compiler thread
262 ResourceMark rm;
263 Compilation c(this, env, method, entry_bci, buffer_blob, install_code, directive);
264 }
265 }
266
267
268 void Compiler::print_timers() {
269 Compilation::print_timers();
270 }