1 /*
  2  * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "c1/c1_Compilation.hpp"
 27 #include "c1/c1_Compiler.hpp"
 28 #include "c1/c1_FrameMap.hpp"
 29 #include "c1/c1_GraphBuilder.hpp"
 30 #include "c1/c1_LinearScan.hpp"
 31 #include "c1/c1_MacroAssembler.hpp"
 32 #include "c1/c1_Runtime1.hpp"
 33 #include "c1/c1_ValueType.hpp"
 34 #include "code/SCCache.hpp"
 35 #include "compiler/compileBroker.hpp"
 36 #include "compiler/compilerDirectives.hpp"
 37 #include "interpreter/linkResolver.hpp"
 38 #include "jfr/support/jfrIntrinsics.hpp"
 39 #include "memory/allocation.hpp"
 40 #include "memory/allocation.inline.hpp"
 41 #include "memory/resourceArea.hpp"
 42 #include "runtime/interfaceSupport.inline.hpp"
 43 #include "runtime/sharedRuntime.hpp"
 44 #include "runtime/vm_version.hpp"
 45 #include "utilities/bitMap.inline.hpp"
 46 #include "utilities/macros.hpp"
 47 
 48 
 49 Compiler::Compiler() : AbstractCompiler(compiler_c1) {
 50 }
 51 
 52 void Compiler::init_c1_runtime() {
 53   BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
 54   Runtime1::initialize(buffer_blob);
 55   SCCache::init_c1_table();
 56   FrameMap::initialize();
 57   // initialize data structures
 58   ValueType::initialize();
 59   GraphBuilder::initialize();
 60   // note: to use more than one instance of LinearScan at a time this function call has to
 61   //       be moved somewhere outside of this constructor:
 62   Interval::initialize();
 63 }
 64 
 65 
 66 void Compiler::initialize() {
 67   // Buffer blob must be allocated per C1 compiler thread at startup
 68   BufferBlob* buffer_blob = init_buffer_blob();
 69 
 70   if (should_perform_init()) {
 71     if (buffer_blob == nullptr) {
 72       // When we come here we are in state 'initializing'; entire C1 compilation
 73       // can be shut down.
 74       set_state(failed);
 75     } else {
 76       init_c1_runtime();
 77       set_state(initialized);
 78     }
 79   }
 80 }
 81 
 82 uint Compiler::code_buffer_size() {
 83   return Compilation::desired_max_code_buffer_size() + Compilation::desired_max_constant_size();
 84 }
 85 
 86 BufferBlob* Compiler::init_buffer_blob() {
 87   // Allocate buffer blob once at startup since allocation for each
 88   // compilation seems to be too expensive (at least on Intel win32).
 89   assert (CompilerThread::current()->get_buffer_blob() == nullptr, "Should initialize only once");
 90 
 91   // setup CodeBuffer.  Preallocate a BufferBlob of size
 92   // NMethodSizeLimit plus some extra space for constants.
 93   BufferBlob* buffer_blob = BufferBlob::create("C1 temporary CodeBuffer", code_buffer_size());
 94   if (buffer_blob != nullptr) {
 95     CompilerThread::current()->set_buffer_blob(buffer_blob);
 96   }
 97 
 98   return buffer_blob;
 99 }
100 
101 bool Compiler::is_intrinsic_supported(const methodHandle& method) {
102   vmIntrinsics::ID id = method->intrinsic_id();
103   assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
104 
105   if (method->is_synchronized()) {
106     // C1 does not support intrinsification of synchronized methods.
107     return false;
108   }
109   return Compiler::is_intrinsic_supported(id);
110 }
111 
112 bool Compiler::is_intrinsic_supported(vmIntrinsics::ID id) {
113   switch (id) {
114   case vmIntrinsics::_compareAndSetLong:
115     break;
116   case vmIntrinsics::_getAndAddInt:
117     if (!VM_Version::supports_atomic_getadd4()) return false;
118     break;
119   case vmIntrinsics::_getAndAddLong:
120     if (!VM_Version::supports_atomic_getadd8()) return false;
121     break;
122   case vmIntrinsics::_getAndSetInt:
123     if (!VM_Version::supports_atomic_getset4()) return false;
124     break;
125   case vmIntrinsics::_getAndSetLong:
126     if (!VM_Version::supports_atomic_getset8()) return false;
127     break;
128   case vmIntrinsics::_getAndSetReference:
129 #ifdef _LP64
130     if (!UseCompressedOops && !VM_Version::supports_atomic_getset8()) return false;
131     if (UseCompressedOops && !VM_Version::supports_atomic_getset4()) return false;
132 #else
133     if (!VM_Version::supports_atomic_getset4()) return false;
134 #endif
135     break;
136   case vmIntrinsics::_onSpinWait:
137     if (!VM_Version::supports_on_spin_wait()) return false;
138     break;
139   case vmIntrinsics::_floatToFloat16:
140   case vmIntrinsics::_float16ToFloat:
141     if (!VM_Version::supports_float16()) return false;
142     break;
143   case vmIntrinsics::_arraycopy:
144   case vmIntrinsics::_currentTimeMillis:
145   case vmIntrinsics::_nanoTime:
146   case vmIntrinsics::_Reference_get:
147     // Use the intrinsic version of Reference.get() so that the value in
148     // the referent field can be registered by the G1 pre-barrier code.
149     // Also to prevent commoning reads from this field across safepoint
150     // since GC can change its value.
151   case vmIntrinsics::_loadFence:
152   case vmIntrinsics::_storeFence:
153   case vmIntrinsics::_storeStoreFence:
154   case vmIntrinsics::_fullFence:
155   case vmIntrinsics::_floatToRawIntBits:
156   case vmIntrinsics::_intBitsToFloat:
157   case vmIntrinsics::_doubleToRawLongBits:
158   case vmIntrinsics::_longBitsToDouble:
159   case vmIntrinsics::_getClass:
160   case vmIntrinsics::_isInstance:
161   case vmIntrinsics::_isPrimitive:
162   case vmIntrinsics::_getModifiers:
163   case vmIntrinsics::_currentCarrierThread:
164   case vmIntrinsics::_currentThread:
165   case vmIntrinsics::_scopedValueCache:
166   case vmIntrinsics::_dabs:
167   case vmIntrinsics::_dsqrt:
168   case vmIntrinsics::_dsqrt_strict:
169   case vmIntrinsics::_dsin:
170   case vmIntrinsics::_dcos:
171   case vmIntrinsics::_dtan:
172   #if defined(AMD64)
173   case vmIntrinsics::_dtanh:
174   #endif
175   case vmIntrinsics::_dlog:
176   case vmIntrinsics::_dlog10:
177   case vmIntrinsics::_dexp:
178   case vmIntrinsics::_dpow:
179   case vmIntrinsics::_fmaD:
180   case vmIntrinsics::_fmaF:
181   case vmIntrinsics::_getReference:
182   case vmIntrinsics::_getBoolean:
183   case vmIntrinsics::_getByte:
184   case vmIntrinsics::_getShort:
185   case vmIntrinsics::_getChar:
186   case vmIntrinsics::_getInt:
187   case vmIntrinsics::_getLong:
188   case vmIntrinsics::_getFloat:
189   case vmIntrinsics::_getDouble:
190   case vmIntrinsics::_putReference:
191   case vmIntrinsics::_putBoolean:
192   case vmIntrinsics::_putByte:
193   case vmIntrinsics::_putShort:
194   case vmIntrinsics::_putChar:
195   case vmIntrinsics::_putInt:
196   case vmIntrinsics::_putLong:
197   case vmIntrinsics::_putFloat:
198   case vmIntrinsics::_putDouble:
199   case vmIntrinsics::_getReferenceVolatile:
200   case vmIntrinsics::_getBooleanVolatile:
201   case vmIntrinsics::_getByteVolatile:
202   case vmIntrinsics::_getShortVolatile:
203   case vmIntrinsics::_getCharVolatile:
204   case vmIntrinsics::_getIntVolatile:
205   case vmIntrinsics::_getLongVolatile:
206   case vmIntrinsics::_getFloatVolatile:
207   case vmIntrinsics::_getDoubleVolatile:
208   case vmIntrinsics::_putReferenceVolatile:
209   case vmIntrinsics::_putBooleanVolatile:
210   case vmIntrinsics::_putByteVolatile:
211   case vmIntrinsics::_putShortVolatile:
212   case vmIntrinsics::_putCharVolatile:
213   case vmIntrinsics::_putIntVolatile:
214   case vmIntrinsics::_putLongVolatile:
215   case vmIntrinsics::_putFloatVolatile:
216   case vmIntrinsics::_putDoubleVolatile:
217   case vmIntrinsics::_getShortUnaligned:
218   case vmIntrinsics::_getCharUnaligned:
219   case vmIntrinsics::_getIntUnaligned:
220   case vmIntrinsics::_getLongUnaligned:
221   case vmIntrinsics::_putShortUnaligned:
222   case vmIntrinsics::_putCharUnaligned:
223   case vmIntrinsics::_putIntUnaligned:
224   case vmIntrinsics::_putLongUnaligned:
225   case vmIntrinsics::_Preconditions_checkIndex:
226   case vmIntrinsics::_Preconditions_checkLongIndex:
227   case vmIntrinsics::_updateCRC32:
228   case vmIntrinsics::_updateBytesCRC32:
229   case vmIntrinsics::_updateByteBufferCRC32:
230 #if defined(S390) || defined(PPC64) || defined(AARCH64)
231   case vmIntrinsics::_updateBytesCRC32C:
232   case vmIntrinsics::_updateDirectByteBufferCRC32C:
233 #endif
234   case vmIntrinsics::_vectorizedMismatch:
235   case vmIntrinsics::_compareAndSetInt:
236   case vmIntrinsics::_compareAndSetReference:
237   case vmIntrinsics::_getCharStringU:
238   case vmIntrinsics::_putCharStringU:
239 #ifdef JFR_HAVE_INTRINSICS
240   case vmIntrinsics::_counterTime:
241 #endif
242   case vmIntrinsics::_getObjectSize:
243 #if defined(X86) || defined(AARCH64) || defined(S390) || defined(RISCV) || defined(PPC64)
244   case vmIntrinsics::_clone:
245 #endif
246     break;
247   case vmIntrinsics::_blackhole:
248     break;
249   default:
250     return false; // Intrinsics not on the previous list are not available.
251   }
252 
253   return true;
254 }
255 
256 void Compiler::compile_method(ciEnv* env, ciMethod* method, int entry_bci, bool install_code, DirectiveSet* directive) {
257   CompileTask* task = env->task();
258   if (install_code && task->is_scc()) {
259     assert(!task->preload(), "Pre-loading cached code is not implemeted for C1 code");
260     bool success = SCCache::load_nmethod(env, method, entry_bci, this, CompLevel(task->comp_level()));
261     if (success) {
262       assert(task->is_success(), "sanity");
263       return;
264     }
265     SCCache::invalidate(task->scc_entry()); // mark scc_entry as not entrant
266     if (SCCache::is_code_load_thread_on() && !StoreCachedCode) {
267       // Bail out if failed to load cached code in SC thread
268       // unless the code is updating.
269       env->record_failure("Failed to load cached code");
270       return;
271     }
272     task->clear_scc();
273   }
274   BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
275   assert(buffer_blob != nullptr, "Must exist");
276   // invoke compilation
277   {
278     // We are nested here because we need for the destructor
279     // of Compilation to occur before we release the any
280     // competing compiler thread
281     ResourceMark rm;
282     Compilation c(this, env, method, entry_bci, buffer_blob, install_code, directive);
283   }
284 }
285 
286 
287 void Compiler::print_timers() {
288   Compilation::print_timers();
289 }