1 /*
2 * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "c1/c1_Compilation.hpp"
26 #include "c1/c1_Compiler.hpp"
27 #include "c1/c1_FrameMap.hpp"
28 #include "c1/c1_GraphBuilder.hpp"
29 #include "c1/c1_LinearScan.hpp"
30 #include "c1/c1_MacroAssembler.hpp"
31 #include "c1/c1_Runtime1.hpp"
32 #include "c1/c1_ValueType.hpp"
33 #include "code/aotCodeCache.hpp"
34 #include "compiler/compileBroker.hpp"
35 #include "compiler/compilerDirectives.hpp"
36 #include "interpreter/linkResolver.hpp"
37 #include "jfr/support/jfrIntrinsics.hpp"
38 #include "memory/allocation.hpp"
39 #include "memory/allocation.inline.hpp"
40 #include "memory/resourceArea.hpp"
41 #include "runtime/interfaceSupport.inline.hpp"
42 #include "runtime/sharedRuntime.hpp"
43 #include "runtime/vm_version.hpp"
44 #include "utilities/bitMap.inline.hpp"
45 #include "utilities/macros.hpp"
46
47
48 Compiler::Compiler() : AbstractCompiler(compiler_c1) {
49 }
50
51 bool Compiler::init_c1_runtime() {
52 BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
53 FrameMap::initialize();
54 if (!Runtime1::initialize(buffer_blob)) {
55 return false;
56 }
57 AOTCodeCache::init_c1_table();
58 // initialize data structures
59 ValueType::initialize();
60 GraphBuilder::initialize();
61 // note: to use more than one instance of LinearScan at a time this function call has to
62 // be moved somewhere outside of this constructor:
63 Interval::initialize();
64 return true;
65 }
66
67
68 void Compiler::initialize() {
69 // Buffer blob must be allocated per C1 compiler thread at startup
70 BufferBlob* buffer_blob = init_buffer_blob();
71
72 if (should_perform_init()) {
73 if (buffer_blob == nullptr || !init_c1_runtime()) {
74 // When we come here we are in state 'initializing'; entire C1 compilation
75 // can be shut down.
76 set_state(failed);
77 } else {
78 set_state(initialized);
79 }
80 }
81 }
82
83 uint Compiler::code_buffer_size() {
84 return Compilation::desired_max_code_buffer_size + Compilation::desired_max_constant_size;
85 }
86
87 BufferBlob* Compiler::init_buffer_blob() {
88 // Allocate buffer blob once at startup since allocation for each
89 // compilation seems to be too expensive (at least on Intel win32).
90 assert (CompilerThread::current()->get_buffer_blob() == nullptr, "Should initialize only once");
91
92 // Setup CodeBuffer.
93 BufferBlob* buffer_blob = BufferBlob::create("C1 temporary CodeBuffer", code_buffer_size());
94 if (buffer_blob != nullptr) {
95 CompilerThread::current()->set_buffer_blob(buffer_blob);
96 }
97
98 return buffer_blob;
99 }
100
101 bool Compiler::is_intrinsic_supported(const methodHandle& method) {
102 vmIntrinsics::ID id = method->intrinsic_id();
103 assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
104
105 if (method->is_synchronized()) {
106 // C1 does not support intrinsification of synchronized methods.
107 return false;
108 }
109 return Compiler::is_intrinsic_supported(id);
110 }
111
112 bool Compiler::is_intrinsic_supported(vmIntrinsics::ID id) {
113 switch (id) {
114 case vmIntrinsics::_compareAndSetLong:
115 break;
116 case vmIntrinsics::_getAndAddInt:
117 if (!VM_Version::supports_atomic_getadd4()) return false;
118 break;
119 case vmIntrinsics::_getAndAddLong:
120 if (!VM_Version::supports_atomic_getadd8()) return false;
121 break;
122 case vmIntrinsics::_getAndSetInt:
123 if (!VM_Version::supports_atomic_getset4()) return false;
124 break;
125 case vmIntrinsics::_getAndSetLong:
126 if (!VM_Version::supports_atomic_getset8()) return false;
127 break;
128 case vmIntrinsics::_getAndSetReference:
129 #ifdef _LP64
130 if (!UseCompressedOops && !VM_Version::supports_atomic_getset8()) return false;
131 if (UseCompressedOops && !VM_Version::supports_atomic_getset4()) return false;
132 #else
133 if (!VM_Version::supports_atomic_getset4()) return false;
134 #endif
135 break;
136 case vmIntrinsics::_onSpinWait:
137 if (!VM_Version::supports_on_spin_wait()) return false;
138 break;
139 case vmIntrinsics::_floatToFloat16:
140 case vmIntrinsics::_float16ToFloat:
141 if (!VM_Version::supports_float16()) return false;
142 break;
143 case vmIntrinsics::_arraycopy:
144 case vmIntrinsics::_currentTimeMillis:
145 case vmIntrinsics::_nanoTime:
146 case vmIntrinsics::_Reference_get0:
147 // Use the intrinsic version of Reference.get() so that the value in
148 // the referent field can be registered by the G1 pre-barrier code.
149 // Also to prevent commoning reads from this field across safepoint
150 // since GC can change its value.
151 case vmIntrinsics::_loadFence:
152 case vmIntrinsics::_storeFence:
153 case vmIntrinsics::_storeStoreFence:
154 case vmIntrinsics::_fullFence:
155 case vmIntrinsics::_floatToRawIntBits:
156 case vmIntrinsics::_intBitsToFloat:
157 case vmIntrinsics::_doubleToRawLongBits:
158 case vmIntrinsics::_longBitsToDouble:
159 case vmIntrinsics::_getClass:
160 case vmIntrinsics::_isInstance:
161 case vmIntrinsics::_currentCarrierThread:
162 case vmIntrinsics::_currentThread:
163 case vmIntrinsics::_scopedValueCache:
164 case vmIntrinsics::_dabs:
165 case vmIntrinsics::_dsqrt:
166 case vmIntrinsics::_dsqrt_strict:
167 case vmIntrinsics::_dsin:
168 case vmIntrinsics::_dcos:
169 case vmIntrinsics::_dtan:
170 #if defined(AMD64)
171 case vmIntrinsics::_dsinh:
172 case vmIntrinsics::_dtanh:
173 case vmIntrinsics::_dcbrt:
174 #endif
175 case vmIntrinsics::_dlog:
176 case vmIntrinsics::_dlog10:
177 case vmIntrinsics::_dexp:
178 case vmIntrinsics::_dpow:
179 case vmIntrinsics::_fmaD:
180 case vmIntrinsics::_fmaF:
181 case vmIntrinsics::_getReference:
182 case vmIntrinsics::_getBoolean:
183 case vmIntrinsics::_getByte:
184 case vmIntrinsics::_getShort:
185 case vmIntrinsics::_getChar:
186 case vmIntrinsics::_getInt:
187 case vmIntrinsics::_getLong:
188 case vmIntrinsics::_getFloat:
189 case vmIntrinsics::_getDouble:
190 case vmIntrinsics::_putReference:
191 case vmIntrinsics::_putBoolean:
192 case vmIntrinsics::_putByte:
193 case vmIntrinsics::_putShort:
194 case vmIntrinsics::_putChar:
195 case vmIntrinsics::_putInt:
196 case vmIntrinsics::_putLong:
197 case vmIntrinsics::_putFloat:
198 case vmIntrinsics::_putDouble:
199 case vmIntrinsics::_getReferenceVolatile:
200 case vmIntrinsics::_getBooleanVolatile:
201 case vmIntrinsics::_getByteVolatile:
202 case vmIntrinsics::_getShortVolatile:
203 case vmIntrinsics::_getCharVolatile:
204 case vmIntrinsics::_getIntVolatile:
205 case vmIntrinsics::_getLongVolatile:
206 case vmIntrinsics::_getFloatVolatile:
207 case vmIntrinsics::_getDoubleVolatile:
208 case vmIntrinsics::_putReferenceVolatile:
209 case vmIntrinsics::_putBooleanVolatile:
210 case vmIntrinsics::_putByteVolatile:
211 case vmIntrinsics::_putShortVolatile:
212 case vmIntrinsics::_putCharVolatile:
213 case vmIntrinsics::_putIntVolatile:
214 case vmIntrinsics::_putLongVolatile:
215 case vmIntrinsics::_putFloatVolatile:
216 case vmIntrinsics::_putDoubleVolatile:
217 case vmIntrinsics::_getShortUnaligned:
218 case vmIntrinsics::_getCharUnaligned:
219 case vmIntrinsics::_getIntUnaligned:
220 case vmIntrinsics::_getLongUnaligned:
221 case vmIntrinsics::_putShortUnaligned:
222 case vmIntrinsics::_putCharUnaligned:
223 case vmIntrinsics::_putIntUnaligned:
224 case vmIntrinsics::_putLongUnaligned:
225 case vmIntrinsics::_Preconditions_checkIndex:
226 case vmIntrinsics::_Preconditions_checkLongIndex:
227 case vmIntrinsics::_updateCRC32:
228 case vmIntrinsics::_updateBytesCRC32:
229 case vmIntrinsics::_updateByteBufferCRC32:
230 #if defined(S390) || defined(PPC64) || defined(AARCH64) || defined(AMD64)
231 case vmIntrinsics::_updateBytesCRC32C:
232 case vmIntrinsics::_updateDirectByteBufferCRC32C:
233 #endif
234 case vmIntrinsics::_vectorizedMismatch:
235 case vmIntrinsics::_compareAndSetInt:
236 case vmIntrinsics::_compareAndSetReference:
237 case vmIntrinsics::_getCharStringU:
238 case vmIntrinsics::_putCharStringU:
239 #ifdef JFR_HAVE_INTRINSICS
240 case vmIntrinsics::_counterTime:
241 #endif
242 case vmIntrinsics::_getObjectSize:
243 #if defined(X86) || defined(AARCH64) || defined(S390) || defined(RISCV64) || defined(PPC64)
244 case vmIntrinsics::_clone:
245 #endif
246 break;
247 case vmIntrinsics::_blackhole:
248 break;
249 default:
250 return false; // Intrinsics not on the previous list are not available.
251 }
252
253 return true;
254 }
255
256 void Compiler::compile_method(ciEnv* env, ciMethod* method, int entry_bci, bool install_code, DirectiveSet* directive) {
257 CompileTask* task = env->task();
258 if (install_code && task->is_aot_load()) {
259 assert(!task->preload(), "Pre-loading AOT code is not implemeted for C1 code");
260 bool success = AOTCodeCache::load_nmethod(env, method, entry_bci, this, CompLevel(task->comp_level()));
261 if (success) {
262 assert(task->is_success(), "sanity");
263 return;
264 }
265 AOTCodeCache::invalidate(task->aot_code_entry()); // mark aot_code_entry as not entrant
266 if (AOTCodeCache::is_code_load_thread_on()) {
267 // Bail out if AOT code load failed in AOT Code loading thread
268 // when UseAOTCodeLoadThread flag is on.
269 // We want this thread go quickly through AOT code load requests
270 // instead of spending time on normal compilation.
271 // TODO: pass this task to normal compilation thread.
272 env->record_failure("Failed to load AOT code");
273 return;
274 } else {
275 // Do normal compilation
276 task->clear_aot();
277 }
278 }
279 BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
280 assert(buffer_blob != nullptr, "Must exist");
281 // invoke compilation
282 {
283 // We are nested here because we need for the destructor
284 // of Compilation to occur before we release the any
285 // competing compiler thread
286 ResourceMark rm;
287 Compilation c(this, env, method, entry_bci, buffer_blob, install_code, directive);
288 }
289 }
290
291
292 void Compiler::print_timers() {
293 Compilation::print_timers();
294 }