1 /*
2 * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/macroAssembler.hpp"
26 #include "ci/ciSymbols.hpp"
27 #include "ci/ciUtilities.inline.hpp"
28 #include "classfile/vmIntrinsics.hpp"
29 #include "compiler/compileBroker.hpp"
30 #include "compiler/compileLog.hpp"
31 #include "gc/shared/barrierSet.hpp"
32 #include "jfr/support/jfrIntrinsics.hpp"
33 #include "memory/resourceArea.hpp"
34 #include "oops/klass.inline.hpp"
35 #include "oops/objArrayKlass.hpp"
36 #include "opto/addnode.hpp"
37 #include "opto/arraycopynode.hpp"
38 #include "opto/c2compiler.hpp"
39 #include "opto/castnode.hpp"
40 #include "opto/cfgnode.hpp"
41 #include "opto/convertnode.hpp"
42 #include "opto/countbitsnode.hpp"
43 #include "opto/idealKit.hpp"
44 #include "opto/library_call.hpp"
45 #include "opto/mathexactnode.hpp"
46 #include "opto/mulnode.hpp"
47 #include "opto/narrowptrnode.hpp"
48 #include "opto/opaquenode.hpp"
49 #include "opto/parse.hpp"
50 #include "opto/rootnode.hpp"
51 #include "opto/runtime.hpp"
52 #include "opto/subnode.hpp"
53 #include "opto/vectornode.hpp"
54 #include "prims/jvmtiExport.hpp"
55 #include "prims/jvmtiThreadState.hpp"
56 #include "prims/unsafe.hpp"
57 #include "runtime/jniHandles.inline.hpp"
58 #include "runtime/mountUnmountDisabler.hpp"
59 #include "runtime/objectMonitor.hpp"
60 #include "runtime/sharedRuntime.hpp"
61 #include "runtime/stubRoutines.hpp"
62 #include "utilities/macros.hpp"
63 #include "utilities/powerOfTwo.hpp"
64
65 //---------------------------make_vm_intrinsic----------------------------
66 CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
67 vmIntrinsicID id = m->intrinsic_id();
68 assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
69
70 if (!m->is_loaded()) {
71 // Do not attempt to inline unloaded methods.
72 return nullptr;
73 }
74
75 C2Compiler* compiler = (C2Compiler*)CompileBroker::compiler(CompLevel_full_optimization);
76 bool is_available = false;
77
78 {
79 // For calling is_intrinsic_supported and is_intrinsic_disabled_by_flag
80 // the compiler must transition to '_thread_in_vm' state because both
81 // methods access VM-internal data.
82 VM_ENTRY_MARK;
83 methodHandle mh(THREAD, m->get_Method());
84 is_available = compiler != nullptr && compiler->is_intrinsic_available(mh, C->directive());
85 if (is_available && is_virtual) {
86 is_available = vmIntrinsics::does_virtual_dispatch(id);
87 }
88 }
89
90 if (is_available) {
91 assert(id <= vmIntrinsics::LAST_COMPILER_INLINE, "caller responsibility");
92 assert(id != vmIntrinsics::_Object_init && id != vmIntrinsics::_invoke, "enum out of order?");
93 return new LibraryIntrinsic(m, is_virtual,
94 vmIntrinsics::predicates_needed(id),
95 vmIntrinsics::does_virtual_dispatch(id),
96 id);
97 } else {
98 return nullptr;
99 }
100 }
101
102 JVMState* LibraryIntrinsic::generate(JVMState* jvms) {
103 LibraryCallKit kit(jvms, this);
104 Compile* C = kit.C;
105 int nodes = C->unique();
106 #ifndef PRODUCT
107 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
108 char buf[1000];
109 const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
110 tty->print_cr("Intrinsic %s", str);
111 }
112 #endif
113 ciMethod* callee = kit.callee();
114 const int bci = kit.bci();
115 #ifdef ASSERT
116 Node* ctrl = kit.control();
117 #endif
118 // Try to inline the intrinsic.
119 if (callee->check_intrinsic_candidate() &&
120 kit.try_to_inline(_last_predicate)) {
121 const char *inline_msg = is_virtual() ? "(intrinsic, virtual)"
122 : "(intrinsic)";
123 CompileTask::print_inlining_ul(callee, jvms->depth() - 1, bci, InliningResult::SUCCESS, inline_msg);
124 C->inline_printer()->record(callee, jvms, InliningResult::SUCCESS, inline_msg);
125 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
126 if (C->log()) {
127 C->log()->elem("intrinsic id='%s'%s nodes='%d'",
128 vmIntrinsics::name_at(intrinsic_id()),
129 (is_virtual() ? " virtual='1'" : ""),
130 C->unique() - nodes);
131 }
132 // Push the result from the inlined method onto the stack.
133 kit.push_result();
134 return kit.transfer_exceptions_into_jvms();
135 }
136
137 // The intrinsic bailed out
138 assert(ctrl == kit.control(), "Control flow was added although the intrinsic bailed out");
139 assert(jvms->map() == kit.map(), "Out of sync JVM state");
140 if (jvms->has_method()) {
141 // Not a root compile.
142 const char* msg;
143 if (callee->intrinsic_candidate()) {
144 msg = is_virtual() ? "failed to inline (intrinsic, virtual)" : "failed to inline (intrinsic)";
145 } else {
146 msg = is_virtual() ? "failed to inline (intrinsic, virtual), method not annotated"
147 : "failed to inline (intrinsic), method not annotated";
148 }
149 CompileTask::print_inlining_ul(callee, jvms->depth() - 1, bci, InliningResult::FAILURE, msg);
150 C->inline_printer()->record(callee, jvms, InliningResult::FAILURE, msg);
151 } else {
152 // Root compile
153 ResourceMark rm;
154 stringStream msg_stream;
155 msg_stream.print("Did not generate intrinsic %s%s at bci:%d in",
156 vmIntrinsics::name_at(intrinsic_id()),
157 is_virtual() ? " (virtual)" : "", bci);
158 const char *msg = msg_stream.freeze();
159 log_debug(jit, inlining)("%s", msg);
160 if (C->print_intrinsics() || C->print_inlining()) {
161 tty->print("%s", msg);
162 }
163 }
164 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
165
166 return nullptr;
167 }
168
169 Node* LibraryIntrinsic::generate_predicate(JVMState* jvms, int predicate) {
170 LibraryCallKit kit(jvms, this);
171 Compile* C = kit.C;
172 int nodes = C->unique();
173 _last_predicate = predicate;
174 #ifndef PRODUCT
175 assert(is_predicated() && predicate < predicates_count(), "sanity");
176 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
177 char buf[1000];
178 const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
179 tty->print_cr("Predicate for intrinsic %s", str);
180 }
181 #endif
182 ciMethod* callee = kit.callee();
183 const int bci = kit.bci();
184
185 Node* slow_ctl = kit.try_to_predicate(predicate);
186 if (!kit.failing()) {
187 const char *inline_msg = is_virtual() ? "(intrinsic, virtual, predicate)"
188 : "(intrinsic, predicate)";
189 CompileTask::print_inlining_ul(callee, jvms->depth() - 1, bci, InliningResult::SUCCESS, inline_msg);
190 C->inline_printer()->record(callee, jvms, InliningResult::SUCCESS, inline_msg);
191
192 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
193 if (C->log()) {
194 C->log()->elem("predicate_intrinsic id='%s'%s nodes='%d'",
195 vmIntrinsics::name_at(intrinsic_id()),
196 (is_virtual() ? " virtual='1'" : ""),
197 C->unique() - nodes);
198 }
199 return slow_ctl; // Could be null if the check folds.
200 }
201
202 // The intrinsic bailed out
203 if (jvms->has_method()) {
204 // Not a root compile.
205 const char* msg = "failed to generate predicate for intrinsic";
206 CompileTask::print_inlining_ul(kit.callee(), jvms->depth() - 1, bci, InliningResult::FAILURE, msg);
207 C->inline_printer()->record(kit.callee(), jvms, InliningResult::FAILURE, msg);
208 } else {
209 // Root compile
210 ResourceMark rm;
211 stringStream msg_stream;
212 msg_stream.print("Did not generate intrinsic %s%s at bci:%d in",
213 vmIntrinsics::name_at(intrinsic_id()),
214 is_virtual() ? " (virtual)" : "", bci);
215 const char *msg = msg_stream.freeze();
216 log_debug(jit, inlining)("%s", msg);
217 C->inline_printer()->record(kit.callee(), jvms, InliningResult::FAILURE, msg);
218 }
219 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
220 return nullptr;
221 }
222
223 bool LibraryCallKit::try_to_inline(int predicate) {
224 // Handle symbolic names for otherwise undistinguished boolean switches:
225 const bool is_store = true;
226 const bool is_compress = true;
227 const bool is_static = true;
228 const bool is_volatile = true;
229
230 if (!jvms()->has_method()) {
231 // Root JVMState has a null method.
232 assert(map()->memory()->Opcode() == Op_Parm, "");
233 // Insert the memory aliasing node
234 set_all_memory(reset_memory());
235 }
236 assert(merged_memory(), "");
237
238 switch (intrinsic_id()) {
239 case vmIntrinsics::_hashCode: return inline_native_hashcode(intrinsic()->is_virtual(), !is_static);
240 case vmIntrinsics::_identityHashCode: return inline_native_hashcode(/*!virtual*/ false, is_static);
241 case vmIntrinsics::_getClass: return inline_native_getClass();
242
243 case vmIntrinsics::_ceil:
244 case vmIntrinsics::_floor:
245 case vmIntrinsics::_rint:
246 case vmIntrinsics::_dsin:
247 case vmIntrinsics::_dcos:
248 case vmIntrinsics::_dtan:
249 case vmIntrinsics::_dsinh:
250 case vmIntrinsics::_dtanh:
251 case vmIntrinsics::_dcbrt:
252 case vmIntrinsics::_dabs:
253 case vmIntrinsics::_fabs:
254 case vmIntrinsics::_iabs:
255 case vmIntrinsics::_labs:
256 case vmIntrinsics::_datan2:
257 case vmIntrinsics::_dsqrt:
258 case vmIntrinsics::_dsqrt_strict:
259 case vmIntrinsics::_dexp:
260 case vmIntrinsics::_dlog:
261 case vmIntrinsics::_dlog10:
262 case vmIntrinsics::_dpow:
263 case vmIntrinsics::_dcopySign:
264 case vmIntrinsics::_fcopySign:
265 case vmIntrinsics::_dsignum:
266 case vmIntrinsics::_roundF:
267 case vmIntrinsics::_roundD:
268 case vmIntrinsics::_fsignum: return inline_math_native(intrinsic_id());
269
270 case vmIntrinsics::_notify:
271 case vmIntrinsics::_notifyAll:
272 return inline_notify(intrinsic_id());
273
274 case vmIntrinsics::_addExactI: return inline_math_addExactI(false /* add */);
275 case vmIntrinsics::_addExactL: return inline_math_addExactL(false /* add */);
276 case vmIntrinsics::_decrementExactI: return inline_math_subtractExactI(true /* decrement */);
277 case vmIntrinsics::_decrementExactL: return inline_math_subtractExactL(true /* decrement */);
278 case vmIntrinsics::_incrementExactI: return inline_math_addExactI(true /* increment */);
279 case vmIntrinsics::_incrementExactL: return inline_math_addExactL(true /* increment */);
280 case vmIntrinsics::_multiplyExactI: return inline_math_multiplyExactI();
281 case vmIntrinsics::_multiplyExactL: return inline_math_multiplyExactL();
282 case vmIntrinsics::_multiplyHigh: return inline_math_multiplyHigh();
283 case vmIntrinsics::_unsignedMultiplyHigh: return inline_math_unsignedMultiplyHigh();
284 case vmIntrinsics::_negateExactI: return inline_math_negateExactI();
285 case vmIntrinsics::_negateExactL: return inline_math_negateExactL();
286 case vmIntrinsics::_subtractExactI: return inline_math_subtractExactI(false /* subtract */);
287 case vmIntrinsics::_subtractExactL: return inline_math_subtractExactL(false /* subtract */);
288
289 case vmIntrinsics::_arraycopy: return inline_arraycopy();
290
291 case vmIntrinsics::_arraySort: return inline_array_sort();
292 case vmIntrinsics::_arrayPartition: return inline_array_partition();
293
294 case vmIntrinsics::_compareToL: return inline_string_compareTo(StrIntrinsicNode::LL);
295 case vmIntrinsics::_compareToU: return inline_string_compareTo(StrIntrinsicNode::UU);
296 case vmIntrinsics::_compareToLU: return inline_string_compareTo(StrIntrinsicNode::LU);
297 case vmIntrinsics::_compareToUL: return inline_string_compareTo(StrIntrinsicNode::UL);
298
299 case vmIntrinsics::_indexOfL: return inline_string_indexOf(StrIntrinsicNode::LL);
300 case vmIntrinsics::_indexOfU: return inline_string_indexOf(StrIntrinsicNode::UU);
301 case vmIntrinsics::_indexOfUL: return inline_string_indexOf(StrIntrinsicNode::UL);
302 case vmIntrinsics::_indexOfIL: return inline_string_indexOfI(StrIntrinsicNode::LL);
303 case vmIntrinsics::_indexOfIU: return inline_string_indexOfI(StrIntrinsicNode::UU);
304 case vmIntrinsics::_indexOfIUL: return inline_string_indexOfI(StrIntrinsicNode::UL);
305 case vmIntrinsics::_indexOfU_char: return inline_string_indexOfChar(StrIntrinsicNode::U);
306 case vmIntrinsics::_indexOfL_char: return inline_string_indexOfChar(StrIntrinsicNode::L);
307
308 case vmIntrinsics::_equalsL: return inline_string_equals(StrIntrinsicNode::LL);
309
310 case vmIntrinsics::_vectorizedHashCode: return inline_vectorizedHashCode();
311
312 case vmIntrinsics::_toBytesStringU: return inline_string_toBytesU();
313 case vmIntrinsics::_getCharsStringU: return inline_string_getCharsU();
314 case vmIntrinsics::_getCharStringU: return inline_string_char_access(!is_store);
315 case vmIntrinsics::_putCharStringU: return inline_string_char_access( is_store);
316
317 case vmIntrinsics::_compressStringC:
318 case vmIntrinsics::_compressStringB: return inline_string_copy( is_compress);
319 case vmIntrinsics::_inflateStringC:
320 case vmIntrinsics::_inflateStringB: return inline_string_copy(!is_compress);
321
322 case vmIntrinsics::_getReference: return inline_unsafe_access(!is_store, T_OBJECT, Relaxed, false);
323 case vmIntrinsics::_getBoolean: return inline_unsafe_access(!is_store, T_BOOLEAN, Relaxed, false);
324 case vmIntrinsics::_getByte: return inline_unsafe_access(!is_store, T_BYTE, Relaxed, false);
325 case vmIntrinsics::_getShort: return inline_unsafe_access(!is_store, T_SHORT, Relaxed, false);
326 case vmIntrinsics::_getChar: return inline_unsafe_access(!is_store, T_CHAR, Relaxed, false);
327 case vmIntrinsics::_getInt: return inline_unsafe_access(!is_store, T_INT, Relaxed, false);
328 case vmIntrinsics::_getLong: return inline_unsafe_access(!is_store, T_LONG, Relaxed, false);
329 case vmIntrinsics::_getFloat: return inline_unsafe_access(!is_store, T_FLOAT, Relaxed, false);
330 case vmIntrinsics::_getDouble: return inline_unsafe_access(!is_store, T_DOUBLE, Relaxed, false);
331
332 case vmIntrinsics::_putReference: return inline_unsafe_access( is_store, T_OBJECT, Relaxed, false);
333 case vmIntrinsics::_putBoolean: return inline_unsafe_access( is_store, T_BOOLEAN, Relaxed, false);
334 case vmIntrinsics::_putByte: return inline_unsafe_access( is_store, T_BYTE, Relaxed, false);
335 case vmIntrinsics::_putShort: return inline_unsafe_access( is_store, T_SHORT, Relaxed, false);
336 case vmIntrinsics::_putChar: return inline_unsafe_access( is_store, T_CHAR, Relaxed, false);
337 case vmIntrinsics::_putInt: return inline_unsafe_access( is_store, T_INT, Relaxed, false);
338 case vmIntrinsics::_putLong: return inline_unsafe_access( is_store, T_LONG, Relaxed, false);
339 case vmIntrinsics::_putFloat: return inline_unsafe_access( is_store, T_FLOAT, Relaxed, false);
340 case vmIntrinsics::_putDouble: return inline_unsafe_access( is_store, T_DOUBLE, Relaxed, false);
341
342 case vmIntrinsics::_getReferenceVolatile: return inline_unsafe_access(!is_store, T_OBJECT, Volatile, false);
343 case vmIntrinsics::_getBooleanVolatile: return inline_unsafe_access(!is_store, T_BOOLEAN, Volatile, false);
344 case vmIntrinsics::_getByteVolatile: return inline_unsafe_access(!is_store, T_BYTE, Volatile, false);
345 case vmIntrinsics::_getShortVolatile: return inline_unsafe_access(!is_store, T_SHORT, Volatile, false);
346 case vmIntrinsics::_getCharVolatile: return inline_unsafe_access(!is_store, T_CHAR, Volatile, false);
347 case vmIntrinsics::_getIntVolatile: return inline_unsafe_access(!is_store, T_INT, Volatile, false);
348 case vmIntrinsics::_getLongVolatile: return inline_unsafe_access(!is_store, T_LONG, Volatile, false);
349 case vmIntrinsics::_getFloatVolatile: return inline_unsafe_access(!is_store, T_FLOAT, Volatile, false);
350 case vmIntrinsics::_getDoubleVolatile: return inline_unsafe_access(!is_store, T_DOUBLE, Volatile, false);
351
352 case vmIntrinsics::_putReferenceVolatile: return inline_unsafe_access( is_store, T_OBJECT, Volatile, false);
353 case vmIntrinsics::_putBooleanVolatile: return inline_unsafe_access( is_store, T_BOOLEAN, Volatile, false);
354 case vmIntrinsics::_putByteVolatile: return inline_unsafe_access( is_store, T_BYTE, Volatile, false);
355 case vmIntrinsics::_putShortVolatile: return inline_unsafe_access( is_store, T_SHORT, Volatile, false);
356 case vmIntrinsics::_putCharVolatile: return inline_unsafe_access( is_store, T_CHAR, Volatile, false);
357 case vmIntrinsics::_putIntVolatile: return inline_unsafe_access( is_store, T_INT, Volatile, false);
358 case vmIntrinsics::_putLongVolatile: return inline_unsafe_access( is_store, T_LONG, Volatile, false);
359 case vmIntrinsics::_putFloatVolatile: return inline_unsafe_access( is_store, T_FLOAT, Volatile, false);
360 case vmIntrinsics::_putDoubleVolatile: return inline_unsafe_access( is_store, T_DOUBLE, Volatile, false);
361
362 case vmIntrinsics::_getShortUnaligned: return inline_unsafe_access(!is_store, T_SHORT, Relaxed, true);
363 case vmIntrinsics::_getCharUnaligned: return inline_unsafe_access(!is_store, T_CHAR, Relaxed, true);
364 case vmIntrinsics::_getIntUnaligned: return inline_unsafe_access(!is_store, T_INT, Relaxed, true);
365 case vmIntrinsics::_getLongUnaligned: return inline_unsafe_access(!is_store, T_LONG, Relaxed, true);
366
367 case vmIntrinsics::_putShortUnaligned: return inline_unsafe_access( is_store, T_SHORT, Relaxed, true);
368 case vmIntrinsics::_putCharUnaligned: return inline_unsafe_access( is_store, T_CHAR, Relaxed, true);
369 case vmIntrinsics::_putIntUnaligned: return inline_unsafe_access( is_store, T_INT, Relaxed, true);
370 case vmIntrinsics::_putLongUnaligned: return inline_unsafe_access( is_store, T_LONG, Relaxed, true);
371
372 case vmIntrinsics::_getReferenceAcquire: return inline_unsafe_access(!is_store, T_OBJECT, Acquire, false);
373 case vmIntrinsics::_getBooleanAcquire: return inline_unsafe_access(!is_store, T_BOOLEAN, Acquire, false);
374 case vmIntrinsics::_getByteAcquire: return inline_unsafe_access(!is_store, T_BYTE, Acquire, false);
375 case vmIntrinsics::_getShortAcquire: return inline_unsafe_access(!is_store, T_SHORT, Acquire, false);
376 case vmIntrinsics::_getCharAcquire: return inline_unsafe_access(!is_store, T_CHAR, Acquire, false);
377 case vmIntrinsics::_getIntAcquire: return inline_unsafe_access(!is_store, T_INT, Acquire, false);
378 case vmIntrinsics::_getLongAcquire: return inline_unsafe_access(!is_store, T_LONG, Acquire, false);
379 case vmIntrinsics::_getFloatAcquire: return inline_unsafe_access(!is_store, T_FLOAT, Acquire, false);
380 case vmIntrinsics::_getDoubleAcquire: return inline_unsafe_access(!is_store, T_DOUBLE, Acquire, false);
381
382 case vmIntrinsics::_putReferenceRelease: return inline_unsafe_access( is_store, T_OBJECT, Release, false);
383 case vmIntrinsics::_putBooleanRelease: return inline_unsafe_access( is_store, T_BOOLEAN, Release, false);
384 case vmIntrinsics::_putByteRelease: return inline_unsafe_access( is_store, T_BYTE, Release, false);
385 case vmIntrinsics::_putShortRelease: return inline_unsafe_access( is_store, T_SHORT, Release, false);
386 case vmIntrinsics::_putCharRelease: return inline_unsafe_access( is_store, T_CHAR, Release, false);
387 case vmIntrinsics::_putIntRelease: return inline_unsafe_access( is_store, T_INT, Release, false);
388 case vmIntrinsics::_putLongRelease: return inline_unsafe_access( is_store, T_LONG, Release, false);
389 case vmIntrinsics::_putFloatRelease: return inline_unsafe_access( is_store, T_FLOAT, Release, false);
390 case vmIntrinsics::_putDoubleRelease: return inline_unsafe_access( is_store, T_DOUBLE, Release, false);
391
392 case vmIntrinsics::_getReferenceOpaque: return inline_unsafe_access(!is_store, T_OBJECT, Opaque, false);
393 case vmIntrinsics::_getBooleanOpaque: return inline_unsafe_access(!is_store, T_BOOLEAN, Opaque, false);
394 case vmIntrinsics::_getByteOpaque: return inline_unsafe_access(!is_store, T_BYTE, Opaque, false);
395 case vmIntrinsics::_getShortOpaque: return inline_unsafe_access(!is_store, T_SHORT, Opaque, false);
396 case vmIntrinsics::_getCharOpaque: return inline_unsafe_access(!is_store, T_CHAR, Opaque, false);
397 case vmIntrinsics::_getIntOpaque: return inline_unsafe_access(!is_store, T_INT, Opaque, false);
398 case vmIntrinsics::_getLongOpaque: return inline_unsafe_access(!is_store, T_LONG, Opaque, false);
399 case vmIntrinsics::_getFloatOpaque: return inline_unsafe_access(!is_store, T_FLOAT, Opaque, false);
400 case vmIntrinsics::_getDoubleOpaque: return inline_unsafe_access(!is_store, T_DOUBLE, Opaque, false);
401
402 case vmIntrinsics::_putReferenceOpaque: return inline_unsafe_access( is_store, T_OBJECT, Opaque, false);
403 case vmIntrinsics::_putBooleanOpaque: return inline_unsafe_access( is_store, T_BOOLEAN, Opaque, false);
404 case vmIntrinsics::_putByteOpaque: return inline_unsafe_access( is_store, T_BYTE, Opaque, false);
405 case vmIntrinsics::_putShortOpaque: return inline_unsafe_access( is_store, T_SHORT, Opaque, false);
406 case vmIntrinsics::_putCharOpaque: return inline_unsafe_access( is_store, T_CHAR, Opaque, false);
407 case vmIntrinsics::_putIntOpaque: return inline_unsafe_access( is_store, T_INT, Opaque, false);
408 case vmIntrinsics::_putLongOpaque: return inline_unsafe_access( is_store, T_LONG, Opaque, false);
409 case vmIntrinsics::_putFloatOpaque: return inline_unsafe_access( is_store, T_FLOAT, Opaque, false);
410 case vmIntrinsics::_putDoubleOpaque: return inline_unsafe_access( is_store, T_DOUBLE, Opaque, false);
411
412 case vmIntrinsics::_compareAndSetReference: return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap, Volatile);
413 case vmIntrinsics::_compareAndSetByte: return inline_unsafe_load_store(T_BYTE, LS_cmp_swap, Volatile);
414 case vmIntrinsics::_compareAndSetShort: return inline_unsafe_load_store(T_SHORT, LS_cmp_swap, Volatile);
415 case vmIntrinsics::_compareAndSetInt: return inline_unsafe_load_store(T_INT, LS_cmp_swap, Volatile);
416 case vmIntrinsics::_compareAndSetLong: return inline_unsafe_load_store(T_LONG, LS_cmp_swap, Volatile);
417
418 case vmIntrinsics::_weakCompareAndSetReferencePlain: return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Relaxed);
419 case vmIntrinsics::_weakCompareAndSetReferenceAcquire: return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Acquire);
420 case vmIntrinsics::_weakCompareAndSetReferenceRelease: return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Release);
421 case vmIntrinsics::_weakCompareAndSetReference: return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Volatile);
422 case vmIntrinsics::_weakCompareAndSetBytePlain: return inline_unsafe_load_store(T_BYTE, LS_cmp_swap_weak, Relaxed);
423 case vmIntrinsics::_weakCompareAndSetByteAcquire: return inline_unsafe_load_store(T_BYTE, LS_cmp_swap_weak, Acquire);
424 case vmIntrinsics::_weakCompareAndSetByteRelease: return inline_unsafe_load_store(T_BYTE, LS_cmp_swap_weak, Release);
425 case vmIntrinsics::_weakCompareAndSetByte: return inline_unsafe_load_store(T_BYTE, LS_cmp_swap_weak, Volatile);
426 case vmIntrinsics::_weakCompareAndSetShortPlain: return inline_unsafe_load_store(T_SHORT, LS_cmp_swap_weak, Relaxed);
427 case vmIntrinsics::_weakCompareAndSetShortAcquire: return inline_unsafe_load_store(T_SHORT, LS_cmp_swap_weak, Acquire);
428 case vmIntrinsics::_weakCompareAndSetShortRelease: return inline_unsafe_load_store(T_SHORT, LS_cmp_swap_weak, Release);
429 case vmIntrinsics::_weakCompareAndSetShort: return inline_unsafe_load_store(T_SHORT, LS_cmp_swap_weak, Volatile);
430 case vmIntrinsics::_weakCompareAndSetIntPlain: return inline_unsafe_load_store(T_INT, LS_cmp_swap_weak, Relaxed);
431 case vmIntrinsics::_weakCompareAndSetIntAcquire: return inline_unsafe_load_store(T_INT, LS_cmp_swap_weak, Acquire);
432 case vmIntrinsics::_weakCompareAndSetIntRelease: return inline_unsafe_load_store(T_INT, LS_cmp_swap_weak, Release);
433 case vmIntrinsics::_weakCompareAndSetInt: return inline_unsafe_load_store(T_INT, LS_cmp_swap_weak, Volatile);
434 case vmIntrinsics::_weakCompareAndSetLongPlain: return inline_unsafe_load_store(T_LONG, LS_cmp_swap_weak, Relaxed);
435 case vmIntrinsics::_weakCompareAndSetLongAcquire: return inline_unsafe_load_store(T_LONG, LS_cmp_swap_weak, Acquire);
436 case vmIntrinsics::_weakCompareAndSetLongRelease: return inline_unsafe_load_store(T_LONG, LS_cmp_swap_weak, Release);
437 case vmIntrinsics::_weakCompareAndSetLong: return inline_unsafe_load_store(T_LONG, LS_cmp_swap_weak, Volatile);
438
439 case vmIntrinsics::_compareAndExchangeReference: return inline_unsafe_load_store(T_OBJECT, LS_cmp_exchange, Volatile);
440 case vmIntrinsics::_compareAndExchangeReferenceAcquire: return inline_unsafe_load_store(T_OBJECT, LS_cmp_exchange, Acquire);
441 case vmIntrinsics::_compareAndExchangeReferenceRelease: return inline_unsafe_load_store(T_OBJECT, LS_cmp_exchange, Release);
442 case vmIntrinsics::_compareAndExchangeByte: return inline_unsafe_load_store(T_BYTE, LS_cmp_exchange, Volatile);
443 case vmIntrinsics::_compareAndExchangeByteAcquire: return inline_unsafe_load_store(T_BYTE, LS_cmp_exchange, Acquire);
444 case vmIntrinsics::_compareAndExchangeByteRelease: return inline_unsafe_load_store(T_BYTE, LS_cmp_exchange, Release);
445 case vmIntrinsics::_compareAndExchangeShort: return inline_unsafe_load_store(T_SHORT, LS_cmp_exchange, Volatile);
446 case vmIntrinsics::_compareAndExchangeShortAcquire: return inline_unsafe_load_store(T_SHORT, LS_cmp_exchange, Acquire);
447 case vmIntrinsics::_compareAndExchangeShortRelease: return inline_unsafe_load_store(T_SHORT, LS_cmp_exchange, Release);
448 case vmIntrinsics::_compareAndExchangeInt: return inline_unsafe_load_store(T_INT, LS_cmp_exchange, Volatile);
449 case vmIntrinsics::_compareAndExchangeIntAcquire: return inline_unsafe_load_store(T_INT, LS_cmp_exchange, Acquire);
450 case vmIntrinsics::_compareAndExchangeIntRelease: return inline_unsafe_load_store(T_INT, LS_cmp_exchange, Release);
451 case vmIntrinsics::_compareAndExchangeLong: return inline_unsafe_load_store(T_LONG, LS_cmp_exchange, Volatile);
452 case vmIntrinsics::_compareAndExchangeLongAcquire: return inline_unsafe_load_store(T_LONG, LS_cmp_exchange, Acquire);
453 case vmIntrinsics::_compareAndExchangeLongRelease: return inline_unsafe_load_store(T_LONG, LS_cmp_exchange, Release);
454
455 case vmIntrinsics::_getAndAddByte: return inline_unsafe_load_store(T_BYTE, LS_get_add, Volatile);
456 case vmIntrinsics::_getAndAddShort: return inline_unsafe_load_store(T_SHORT, LS_get_add, Volatile);
457 case vmIntrinsics::_getAndAddInt: return inline_unsafe_load_store(T_INT, LS_get_add, Volatile);
458 case vmIntrinsics::_getAndAddLong: return inline_unsafe_load_store(T_LONG, LS_get_add, Volatile);
459
460 case vmIntrinsics::_getAndSetByte: return inline_unsafe_load_store(T_BYTE, LS_get_set, Volatile);
461 case vmIntrinsics::_getAndSetShort: return inline_unsafe_load_store(T_SHORT, LS_get_set, Volatile);
462 case vmIntrinsics::_getAndSetInt: return inline_unsafe_load_store(T_INT, LS_get_set, Volatile);
463 case vmIntrinsics::_getAndSetLong: return inline_unsafe_load_store(T_LONG, LS_get_set, Volatile);
464 case vmIntrinsics::_getAndSetReference: return inline_unsafe_load_store(T_OBJECT, LS_get_set, Volatile);
465
466 case vmIntrinsics::_loadFence:
467 case vmIntrinsics::_storeFence:
468 case vmIntrinsics::_storeStoreFence:
469 case vmIntrinsics::_fullFence: return inline_unsafe_fence(intrinsic_id());
470
471 case vmIntrinsics::_onSpinWait: return inline_onspinwait();
472
473 case vmIntrinsics::_currentCarrierThread: return inline_native_currentCarrierThread();
474 case vmIntrinsics::_currentThread: return inline_native_currentThread();
475 case vmIntrinsics::_setCurrentThread: return inline_native_setCurrentThread();
476
477 case vmIntrinsics::_scopedValueCache: return inline_native_scopedValueCache();
478 case vmIntrinsics::_setScopedValueCache: return inline_native_setScopedValueCache();
479
480 case vmIntrinsics::_Continuation_pin: return inline_native_Continuation_pinning(false);
481 case vmIntrinsics::_Continuation_unpin: return inline_native_Continuation_pinning(true);
482
483 case vmIntrinsics::_vthreadEndFirstTransition: return inline_native_vthread_end_transition(CAST_FROM_FN_PTR(address, OptoRuntime::vthread_end_first_transition_Java()),
484 "endFirstTransition", true);
485 case vmIntrinsics::_vthreadStartFinalTransition: return inline_native_vthread_start_transition(CAST_FROM_FN_PTR(address, OptoRuntime::vthread_start_final_transition_Java()),
486 "startFinalTransition", true);
487 case vmIntrinsics::_vthreadStartTransition: return inline_native_vthread_start_transition(CAST_FROM_FN_PTR(address, OptoRuntime::vthread_start_transition_Java()),
488 "startTransition", false);
489 case vmIntrinsics::_vthreadEndTransition: return inline_native_vthread_end_transition(CAST_FROM_FN_PTR(address, OptoRuntime::vthread_end_transition_Java()),
490 "endTransition", false);
491 #if INCLUDE_JVMTI
492 case vmIntrinsics::_notifyJvmtiVThreadDisableSuspend: return inline_native_notify_jvmti_sync();
493 #endif
494
495 #ifdef JFR_HAVE_INTRINSICS
496 case vmIntrinsics::_counterTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, JfrTime::time_function()), "counterTime");
497 case vmIntrinsics::_getEventWriter: return inline_native_getEventWriter();
498 case vmIntrinsics::_jvm_commit: return inline_native_jvm_commit();
499 #endif
500 case vmIntrinsics::_currentTimeMillis: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
501 case vmIntrinsics::_nanoTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
502 case vmIntrinsics::_writeback0: return inline_unsafe_writeback0();
503 case vmIntrinsics::_writebackPreSync0: return inline_unsafe_writebackSync0(true);
504 case vmIntrinsics::_writebackPostSync0: return inline_unsafe_writebackSync0(false);
505 case vmIntrinsics::_allocateInstance: return inline_unsafe_allocate();
506 case vmIntrinsics::_copyMemory: return inline_unsafe_copyMemory();
507 case vmIntrinsics::_setMemory: return inline_unsafe_setMemory();
508 case vmIntrinsics::_getLength: return inline_native_getLength();
509 case vmIntrinsics::_copyOf: return inline_array_copyOf(false);
510 case vmIntrinsics::_copyOfRange: return inline_array_copyOf(true);
511 case vmIntrinsics::_equalsB: return inline_array_equals(StrIntrinsicNode::LL);
512 case vmIntrinsics::_equalsC: return inline_array_equals(StrIntrinsicNode::UU);
513 case vmIntrinsics::_Preconditions_checkIndex: return inline_preconditions_checkIndex(T_INT);
514 case vmIntrinsics::_Preconditions_checkLongIndex: return inline_preconditions_checkIndex(T_LONG);
515 case vmIntrinsics::_clone: return inline_native_clone(intrinsic()->is_virtual());
516
517 case vmIntrinsics::_allocateUninitializedArray: return inline_unsafe_newArray(true);
518 case vmIntrinsics::_newArray: return inline_unsafe_newArray(false);
519
520 case vmIntrinsics::_isAssignableFrom: return inline_native_subtype_check();
521
522 case vmIntrinsics::_isInstance:
523 case vmIntrinsics::_isHidden:
524 case vmIntrinsics::_getSuperclass: return inline_native_Class_query(intrinsic_id());
525
526 case vmIntrinsics::_floatToRawIntBits:
527 case vmIntrinsics::_floatToIntBits:
528 case vmIntrinsics::_intBitsToFloat:
529 case vmIntrinsics::_doubleToRawLongBits:
530 case vmIntrinsics::_doubleToLongBits:
531 case vmIntrinsics::_longBitsToDouble:
532 case vmIntrinsics::_floatToFloat16:
533 case vmIntrinsics::_float16ToFloat: return inline_fp_conversions(intrinsic_id());
534 case vmIntrinsics::_sqrt_float16: return inline_fp16_operations(intrinsic_id(), 1);
535 case vmIntrinsics::_fma_float16: return inline_fp16_operations(intrinsic_id(), 3);
536 case vmIntrinsics::_floatIsFinite:
537 case vmIntrinsics::_floatIsInfinite:
538 case vmIntrinsics::_doubleIsFinite:
539 case vmIntrinsics::_doubleIsInfinite: return inline_fp_range_check(intrinsic_id());
540
541 case vmIntrinsics::_numberOfLeadingZeros_i:
542 case vmIntrinsics::_numberOfLeadingZeros_l:
543 case vmIntrinsics::_numberOfTrailingZeros_i:
544 case vmIntrinsics::_numberOfTrailingZeros_l:
545 case vmIntrinsics::_bitCount_i:
546 case vmIntrinsics::_bitCount_l:
547 case vmIntrinsics::_reverse_i:
548 case vmIntrinsics::_reverse_l:
549 case vmIntrinsics::_reverseBytes_i:
550 case vmIntrinsics::_reverseBytes_l:
551 case vmIntrinsics::_reverseBytes_s:
552 case vmIntrinsics::_reverseBytes_c: return inline_number_methods(intrinsic_id());
553
554 case vmIntrinsics::_compress_i:
555 case vmIntrinsics::_compress_l:
556 case vmIntrinsics::_expand_i:
557 case vmIntrinsics::_expand_l: return inline_bitshuffle_methods(intrinsic_id());
558
559 case vmIntrinsics::_compareUnsigned_i:
560 case vmIntrinsics::_compareUnsigned_l: return inline_compare_unsigned(intrinsic_id());
561
562 case vmIntrinsics::_divideUnsigned_i:
563 case vmIntrinsics::_divideUnsigned_l:
564 case vmIntrinsics::_remainderUnsigned_i:
565 case vmIntrinsics::_remainderUnsigned_l: return inline_divmod_methods(intrinsic_id());
566
567 case vmIntrinsics::_getCallerClass: return inline_native_Reflection_getCallerClass();
568
569 case vmIntrinsics::_Reference_get0: return inline_reference_get0();
570 case vmIntrinsics::_Reference_refersTo0: return inline_reference_refersTo0(false);
571 case vmIntrinsics::_Reference_reachabilityFence: return inline_reference_reachabilityFence();
572 case vmIntrinsics::_PhantomReference_refersTo0: return inline_reference_refersTo0(true);
573 case vmIntrinsics::_Reference_clear0: return inline_reference_clear0(false);
574 case vmIntrinsics::_PhantomReference_clear0: return inline_reference_clear0(true);
575
576 case vmIntrinsics::_Class_cast: return inline_Class_cast();
577
578 case vmIntrinsics::_aescrypt_encryptBlock:
579 case vmIntrinsics::_aescrypt_decryptBlock: return inline_aescrypt_Block(intrinsic_id());
580
581 case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
582 case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
583 return inline_cipherBlockChaining_AESCrypt(intrinsic_id());
584
585 case vmIntrinsics::_electronicCodeBook_encryptAESCrypt:
586 case vmIntrinsics::_electronicCodeBook_decryptAESCrypt:
587 return inline_electronicCodeBook_AESCrypt(intrinsic_id());
588
589 case vmIntrinsics::_counterMode_AESCrypt:
590 return inline_counterMode_AESCrypt(intrinsic_id());
591
592 case vmIntrinsics::_galoisCounterMode_AESCrypt:
593 return inline_galoisCounterMode_AESCrypt();
594
595 case vmIntrinsics::_md5_implCompress:
596 case vmIntrinsics::_sha_implCompress:
597 case vmIntrinsics::_sha2_implCompress:
598 case vmIntrinsics::_sha5_implCompress:
599 case vmIntrinsics::_sha3_implCompress:
600 return inline_digestBase_implCompress(intrinsic_id());
601 case vmIntrinsics::_double_keccak:
602 return inline_double_keccak();
603
604 case vmIntrinsics::_digestBase_implCompressMB:
605 return inline_digestBase_implCompressMB(predicate);
606
607 case vmIntrinsics::_multiplyToLen:
608 return inline_multiplyToLen();
609
610 case vmIntrinsics::_squareToLen:
611 return inline_squareToLen();
612
613 case vmIntrinsics::_mulAdd:
614 return inline_mulAdd();
615
616 case vmIntrinsics::_montgomeryMultiply:
617 return inline_montgomeryMultiply();
618 case vmIntrinsics::_montgomerySquare:
619 return inline_montgomerySquare();
620
621 case vmIntrinsics::_bigIntegerRightShiftWorker:
622 return inline_bigIntegerShift(true);
623 case vmIntrinsics::_bigIntegerLeftShiftWorker:
624 return inline_bigIntegerShift(false);
625
626 case vmIntrinsics::_vectorizedMismatch:
627 return inline_vectorizedMismatch();
628
629 case vmIntrinsics::_ghash_processBlocks:
630 return inline_ghash_processBlocks();
631 case vmIntrinsics::_chacha20Block:
632 return inline_chacha20Block();
633 case vmIntrinsics::_kyberNtt:
634 return inline_kyberNtt();
635 case vmIntrinsics::_kyberInverseNtt:
636 return inline_kyberInverseNtt();
637 case vmIntrinsics::_kyberNttMult:
638 return inline_kyberNttMult();
639 case vmIntrinsics::_kyberAddPoly_2:
640 return inline_kyberAddPoly_2();
641 case vmIntrinsics::_kyberAddPoly_3:
642 return inline_kyberAddPoly_3();
643 case vmIntrinsics::_kyber12To16:
644 return inline_kyber12To16();
645 case vmIntrinsics::_kyberBarrettReduce:
646 return inline_kyberBarrettReduce();
647 case vmIntrinsics::_dilithiumAlmostNtt:
648 return inline_dilithiumAlmostNtt();
649 case vmIntrinsics::_dilithiumAlmostInverseNtt:
650 return inline_dilithiumAlmostInverseNtt();
651 case vmIntrinsics::_dilithiumNttMult:
652 return inline_dilithiumNttMult();
653 case vmIntrinsics::_dilithiumMontMulByConstant:
654 return inline_dilithiumMontMulByConstant();
655 case vmIntrinsics::_dilithiumDecomposePoly:
656 return inline_dilithiumDecomposePoly();
657 case vmIntrinsics::_base64_encodeBlock:
658 return inline_base64_encodeBlock();
659 case vmIntrinsics::_base64_decodeBlock:
660 return inline_base64_decodeBlock();
661 case vmIntrinsics::_poly1305_processBlocks:
662 return inline_poly1305_processBlocks();
663 case vmIntrinsics::_intpoly_montgomeryMult_P256:
664 return inline_intpoly_montgomeryMult_P256();
665 case vmIntrinsics::_intpoly_assign:
666 return inline_intpoly_assign();
667 case vmIntrinsics::_encodeISOArray:
668 case vmIntrinsics::_encodeByteISOArray:
669 return inline_encodeISOArray(false);
670 case vmIntrinsics::_encodeAsciiArray:
671 return inline_encodeISOArray(true);
672
673 case vmIntrinsics::_updateCRC32:
674 return inline_updateCRC32();
675 case vmIntrinsics::_updateBytesCRC32:
676 return inline_updateBytesCRC32();
677 case vmIntrinsics::_updateByteBufferCRC32:
678 return inline_updateByteBufferCRC32();
679
680 case vmIntrinsics::_updateBytesCRC32C:
681 return inline_updateBytesCRC32C();
682 case vmIntrinsics::_updateDirectByteBufferCRC32C:
683 return inline_updateDirectByteBufferCRC32C();
684
685 case vmIntrinsics::_updateBytesAdler32:
686 return inline_updateBytesAdler32();
687 case vmIntrinsics::_updateByteBufferAdler32:
688 return inline_updateByteBufferAdler32();
689
690 case vmIntrinsics::_profileBoolean:
691 return inline_profileBoolean();
692 case vmIntrinsics::_isCompileConstant:
693 return inline_isCompileConstant();
694
695 case vmIntrinsics::_countPositives:
696 return inline_countPositives();
697
698 case vmIntrinsics::_fmaD:
699 case vmIntrinsics::_fmaF:
700 return inline_fma(intrinsic_id());
701
702 case vmIntrinsics::_isDigit:
703 case vmIntrinsics::_isLowerCase:
704 case vmIntrinsics::_isUpperCase:
705 case vmIntrinsics::_isWhitespace:
706 return inline_character_compare(intrinsic_id());
707
708 case vmIntrinsics::_min:
709 case vmIntrinsics::_max:
710 case vmIntrinsics::_min_strict:
711 case vmIntrinsics::_max_strict:
712 case vmIntrinsics::_minL:
713 case vmIntrinsics::_maxL:
714 case vmIntrinsics::_minF:
715 case vmIntrinsics::_maxF:
716 case vmIntrinsics::_minD:
717 case vmIntrinsics::_maxD:
718 case vmIntrinsics::_minF_strict:
719 case vmIntrinsics::_maxF_strict:
720 case vmIntrinsics::_minD_strict:
721 case vmIntrinsics::_maxD_strict:
722 return inline_min_max(intrinsic_id());
723
724 case vmIntrinsics::_VectorUnaryOp:
725 return inline_vector_nary_operation(1);
726 case vmIntrinsics::_VectorBinaryOp:
727 return inline_vector_nary_operation(2);
728 case vmIntrinsics::_VectorUnaryLibOp:
729 return inline_vector_call(1);
730 case vmIntrinsics::_VectorBinaryLibOp:
731 return inline_vector_call(2);
732 case vmIntrinsics::_VectorTernaryOp:
733 return inline_vector_nary_operation(3);
734 case vmIntrinsics::_VectorFromBitsCoerced:
735 return inline_vector_frombits_coerced();
736 case vmIntrinsics::_VectorMaskOp:
737 return inline_vector_mask_operation();
738 case vmIntrinsics::_VectorLoadOp:
739 return inline_vector_mem_operation(/*is_store=*/false);
740 case vmIntrinsics::_VectorLoadMaskedOp:
741 return inline_vector_mem_masked_operation(/*is_store*/false);
742 case vmIntrinsics::_VectorStoreOp:
743 return inline_vector_mem_operation(/*is_store=*/true);
744 case vmIntrinsics::_VectorStoreMaskedOp:
745 return inline_vector_mem_masked_operation(/*is_store=*/true);
746 case vmIntrinsics::_VectorGatherOp:
747 return inline_vector_gather_scatter(/*is_scatter*/ false);
748 case vmIntrinsics::_VectorScatterOp:
749 return inline_vector_gather_scatter(/*is_scatter*/ true);
750 case vmIntrinsics::_VectorReductionCoerced:
751 return inline_vector_reduction();
752 case vmIntrinsics::_VectorTest:
753 return inline_vector_test();
754 case vmIntrinsics::_VectorBlend:
755 return inline_vector_blend();
756 case vmIntrinsics::_VectorRearrange:
757 return inline_vector_rearrange();
758 case vmIntrinsics::_VectorSelectFrom:
759 return inline_vector_select_from();
760 case vmIntrinsics::_VectorCompare:
761 return inline_vector_compare();
762 case vmIntrinsics::_VectorBroadcastInt:
763 return inline_vector_broadcast_int();
764 case vmIntrinsics::_VectorConvert:
765 return inline_vector_convert();
766 case vmIntrinsics::_VectorInsert:
767 return inline_vector_insert();
768 case vmIntrinsics::_VectorExtract:
769 return inline_vector_extract();
770 case vmIntrinsics::_VectorCompressExpand:
771 return inline_vector_compress_expand();
772 case vmIntrinsics::_VectorSelectFromTwoVectorOp:
773 return inline_vector_select_from_two_vectors();
774 case vmIntrinsics::_IndexVector:
775 return inline_index_vector();
776 case vmIntrinsics::_IndexPartiallyInUpperRange:
777 return inline_index_partially_in_upper_range();
778
779 case vmIntrinsics::_getObjectSize:
780 return inline_getObjectSize();
781
782 case vmIntrinsics::_blackhole:
783 return inline_blackhole();
784
785 default:
786 // If you get here, it may be that someone has added a new intrinsic
787 // to the list in vmIntrinsics.hpp without implementing it here.
788 #ifndef PRODUCT
789 if ((PrintMiscellaneous && (Verbose || WizardMode)) || PrintOpto) {
790 tty->print_cr("*** Warning: Unimplemented intrinsic %s(%d)",
791 vmIntrinsics::name_at(intrinsic_id()), vmIntrinsics::as_int(intrinsic_id()));
792 }
793 #endif
794 return false;
795 }
796 }
797
798 Node* LibraryCallKit::try_to_predicate(int predicate) {
799 if (!jvms()->has_method()) {
800 // Root JVMState has a null method.
801 assert(map()->memory()->Opcode() == Op_Parm, "");
802 // Insert the memory aliasing node
803 set_all_memory(reset_memory());
804 }
805 assert(merged_memory(), "");
806
807 switch (intrinsic_id()) {
808 case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
809 return inline_cipherBlockChaining_AESCrypt_predicate(false);
810 case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
811 return inline_cipherBlockChaining_AESCrypt_predicate(true);
812 case vmIntrinsics::_electronicCodeBook_encryptAESCrypt:
813 return inline_electronicCodeBook_AESCrypt_predicate(false);
814 case vmIntrinsics::_electronicCodeBook_decryptAESCrypt:
815 return inline_electronicCodeBook_AESCrypt_predicate(true);
816 case vmIntrinsics::_counterMode_AESCrypt:
817 return inline_counterMode_AESCrypt_predicate();
818 case vmIntrinsics::_digestBase_implCompressMB:
819 return inline_digestBase_implCompressMB_predicate(predicate);
820 case vmIntrinsics::_galoisCounterMode_AESCrypt:
821 return inline_galoisCounterMode_AESCrypt_predicate();
822
823 default:
824 // If you get here, it may be that someone has added a new intrinsic
825 // to the list in vmIntrinsics.hpp without implementing it here.
826 #ifndef PRODUCT
827 if ((PrintMiscellaneous && (Verbose || WizardMode)) || PrintOpto) {
828 tty->print_cr("*** Warning: Unimplemented predicate for intrinsic %s(%d)",
829 vmIntrinsics::name_at(intrinsic_id()), vmIntrinsics::as_int(intrinsic_id()));
830 }
831 #endif
832 Node* slow_ctl = control();
833 set_control(top()); // No fast path intrinsic
834 return slow_ctl;
835 }
836 }
837
838 //------------------------------set_result-------------------------------
839 // Helper function for finishing intrinsics.
840 void LibraryCallKit::set_result(RegionNode* region, PhiNode* value) {
841 record_for_igvn(region);
842 set_control(_gvn.transform(region));
843 set_result( _gvn.transform(value));
844 assert(value->type()->basic_type() == result()->bottom_type()->basic_type(), "sanity");
845 }
846
847 RegionNode* LibraryCallKit::create_bailout() {
848 RegionNode* bailout = new RegionNode(1);
849 record_for_igvn(bailout);
850 return bailout;
851 }
852
853 bool LibraryCallKit::check_bailout(RegionNode* bailout) {
854 if (bailout->req() > 1) {
855 bailout = _gvn.transform(bailout)->as_Region();
856 Node* frame = _gvn.transform(new ParmNode(C->start(), TypeFunc::FramePtr));
857 Node* halt = _gvn.transform(new HaltNode(bailout, frame, "unexpected guard failure in intrinsic"));
858 C->root()->add_req(halt);
859 }
860 return stopped();
861 }
862
863 //------------------------------generate_guard---------------------------
864 // Helper function for generating guarded fast-slow graph structures.
865 // The given 'test', if true, guards a slow path. If the test fails
866 // then a fast path can be taken. (We generally hope it fails.)
867 // In all cases, GraphKit::control() is updated to the fast path.
868 // The returned value represents the control for the slow path.
869 // The return value is never 'top'; it is either a valid control
870 // or null if it is obvious that the slow path can never be taken.
871 // Also, if region and the slow control are not null, the slow edge
872 // is appended to the region.
873 Node* LibraryCallKit::generate_guard(Node* test, RegionNode* region, float true_prob) {
874 if (stopped()) {
875 // Already short circuited.
876 return nullptr;
877 }
878
879 // Build an if node and its projections.
880 // If test is true we take the slow path, which we assume is uncommon.
881 if (_gvn.type(test) == TypeInt::ZERO) {
882 // The slow branch is never taken. No need to build this guard.
883 return nullptr;
884 }
885
886 IfNode* iff = create_and_map_if(control(), test, true_prob, COUNT_UNKNOWN);
887
888 Node* if_slow = _gvn.transform(new IfTrueNode(iff));
889 if (if_slow == top()) {
890 // The slow branch is never taken. No need to build this guard.
891 return nullptr;
892 }
893
894 if (region != nullptr)
895 region->add_req(if_slow);
896
897 Node* if_fast = _gvn.transform(new IfFalseNode(iff));
898 set_control(if_fast);
899
900 return if_slow;
901 }
902
903 inline Node* LibraryCallKit::generate_slow_guard(Node* test, RegionNode* region) {
904 return generate_guard(test, region, PROB_UNLIKELY_MAG(3));
905 }
906 inline Node* LibraryCallKit::generate_fair_guard(Node* test, RegionNode* region) {
907 return generate_guard(test, region, PROB_FAIR);
908 }
909
910 inline Node* LibraryCallKit::generate_negative_guard(Node* index, RegionNode* region,
911 Node** pos_index, bool with_opaque) {
912 if (stopped())
913 return nullptr; // already stopped
914 if (_gvn.type(index)->higher_equal(TypeInt::POS)) // [0,maxint]
915 return nullptr; // index is already adequately typed
916 Node* cmp_lt = _gvn.transform(new CmpINode(index, intcon(0)));
917 Node* bol_lt = _gvn.transform(new BoolNode(cmp_lt, BoolTest::lt));
918 if (with_opaque) {
919 bol_lt = _gvn.transform(new OpaqueConstantBoolNode(C, bol_lt, false));
920 }
921 Node* is_neg = generate_guard(bol_lt, region, PROB_MIN);
922 if (is_neg != nullptr && pos_index != nullptr) {
923 // Emulate effect of Parse::adjust_map_after_if.
924 Node* ccast = new CastIINode(control(), index, TypeInt::POS);
925 (*pos_index) = _gvn.transform(ccast);
926 }
927 return is_neg;
928 }
929
930 // Make sure that 'position' is a valid limit index, in [0..length].
931 // There are two equivalent plans for checking this:
932 // A. (offset + copyLength) unsigned<= arrayLength
933 // B. offset <= (arrayLength - copyLength)
934 // We require that all of the values above, except for the sum and
935 // difference, are already known to be non-negative.
936 // Plan A is robust in the face of overflow, if offset and copyLength
937 // are both hugely positive.
938 //
939 // Plan B is less direct and intuitive, but it does not overflow at
940 // all, since the difference of two non-negatives is always
941 // representable. Whenever Java methods must perform the equivalent
942 // check they generally use Plan B instead of Plan A.
943 // For the moment we use Plan A.
944 inline Node* LibraryCallKit::generate_limit_guard(Node* offset,
945 Node* subseq_length,
946 Node* array_length,
947 RegionNode* region,
948 bool with_opaque) {
949 if (stopped())
950 return nullptr; // already stopped
951 bool zero_offset = _gvn.type(offset) == TypeInt::ZERO;
952 if (zero_offset && subseq_length->eqv_uncast(array_length))
953 return nullptr; // common case of whole-array copy
954 Node* last = subseq_length;
955 if (!zero_offset) // last += offset
956 last = _gvn.transform(new AddINode(last, offset));
957 Node* cmp_lt = _gvn.transform(new CmpUNode(array_length, last));
958 Node* bol_lt = _gvn.transform(new BoolNode(cmp_lt, BoolTest::lt));
959 if (with_opaque) {
960 bol_lt = _gvn.transform(new OpaqueConstantBoolNode(C, bol_lt, false));
961 }
962 Node* is_over = generate_guard(bol_lt, region, PROB_MIN);
963 return is_over;
964 }
965
966 // Emit range checks for the given String.value byte array
967 void LibraryCallKit::generate_string_range_check(Node* array,
968 Node* offset,
969 Node* count,
970 bool char_count,
971 RegionNode* region) {
972 if (stopped()) {
973 return; // already stopped
974 }
975 if (char_count) {
976 // Convert char count to byte count
977 count = _gvn.transform(new LShiftINode(count, intcon(1)));
978 }
979 // Offset and count must not be negative
980 generate_negative_guard(offset, region, nullptr, true);
981 generate_negative_guard(count, region, nullptr, true);
982 // Offset + count must not exceed length of array
983 generate_limit_guard(offset, count, load_array_length(array), region, true);
984 }
985
986 Node* LibraryCallKit::current_thread_helper(Node*& tls_output, ByteSize handle_offset,
987 bool is_immutable) {
988 ciKlass* thread_klass = env()->Thread_klass();
989 const Type* thread_type
990 = TypeOopPtr::make_from_klass(thread_klass)->cast_to_ptr_type(TypePtr::NotNull);
991
992 Node* thread = _gvn.transform(new ThreadLocalNode());
993 Node* p = off_heap_plus_addr(thread, in_bytes(handle_offset));
994 tls_output = thread;
995
996 Node* thread_obj_handle
997 = (is_immutable
998 ? LoadNode::make(_gvn, nullptr, immutable_memory(), p, p->bottom_type()->is_ptr(),
999 TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered)
1000 : make_load(nullptr, p, p->bottom_type()->is_ptr(), T_ADDRESS, MemNode::unordered));
1001 thread_obj_handle = _gvn.transform(thread_obj_handle);
1002
1003 DecoratorSet decorators = IN_NATIVE;
1004 if (is_immutable) {
1005 decorators |= C2_IMMUTABLE_MEMORY;
1006 }
1007 return access_load(thread_obj_handle, thread_type, T_OBJECT, decorators);
1008 }
1009
1010 //--------------------------generate_current_thread--------------------
1011 Node* LibraryCallKit::generate_current_thread(Node* &tls_output) {
1012 return current_thread_helper(tls_output, JavaThread::threadObj_offset(),
1013 /*is_immutable*/false);
1014 }
1015
1016 //--------------------------generate_virtual_thread--------------------
1017 Node* LibraryCallKit::generate_virtual_thread(Node* tls_output) {
1018 return current_thread_helper(tls_output, JavaThread::vthread_offset(),
1019 !C->method()->changes_current_thread());
1020 }
1021
1022 //------------------------------make_string_method_node------------------------
1023 // Helper method for String intrinsic functions. This version is called with
1024 // str1 and str2 pointing to byte[] nodes containing Latin1 or UTF16 encoded
1025 // characters (depending on 'is_byte'). cnt1 and cnt2 are pointing to Int nodes
1026 // containing the lengths of str1 and str2.
1027 Node* LibraryCallKit::make_string_method_node(int opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2, StrIntrinsicNode::ArgEnc ae) {
1028 Node* result = nullptr;
1029 switch (opcode) {
1030 case Op_StrIndexOf:
1031 result = new StrIndexOfNode(control(), memory(TypeAryPtr::BYTES),
1032 str1_start, cnt1, str2_start, cnt2, ae);
1033 break;
1034 case Op_StrComp:
1035 result = new StrCompNode(control(), memory(TypeAryPtr::BYTES),
1036 str1_start, cnt1, str2_start, cnt2, ae);
1037 break;
1038 case Op_StrEquals:
1039 // We already know that cnt1 == cnt2 here (checked in 'inline_string_equals').
1040 // Use the constant length if there is one because optimized match rule may exist.
1041 result = new StrEqualsNode(control(), memory(TypeAryPtr::BYTES),
1042 str1_start, str2_start, cnt2->is_Con() ? cnt2 : cnt1, ae);
1043 break;
1044 default:
1045 ShouldNotReachHere();
1046 return nullptr;
1047 }
1048
1049 // All these intrinsics have checks.
1050 C->set_has_split_ifs(true); // Has chance for split-if optimization
1051 clear_upper_avx();
1052
1053 return _gvn.transform(result);
1054 }
1055
1056 //------------------------------inline_string_compareTo------------------------
1057 bool LibraryCallKit::inline_string_compareTo(StrIntrinsicNode::ArgEnc ae) {
1058 Node* arg1 = argument(0);
1059 Node* arg2 = argument(1);
1060
1061 arg1 = must_be_not_null(arg1, true);
1062 arg2 = must_be_not_null(arg2, true);
1063
1064 // Get start addr and length of first argument
1065 Node* arg1_start = array_element_address(arg1, intcon(0), T_BYTE);
1066 Node* arg1_cnt = load_array_length(arg1);
1067
1068 // Get start addr and length of second argument
1069 Node* arg2_start = array_element_address(arg2, intcon(0), T_BYTE);
1070 Node* arg2_cnt = load_array_length(arg2);
1071
1072 Node* result = make_string_method_node(Op_StrComp, arg1_start, arg1_cnt, arg2_start, arg2_cnt, ae);
1073 set_result(result);
1074 return true;
1075 }
1076
1077 //------------------------------inline_string_equals------------------------
1078 bool LibraryCallKit::inline_string_equals(StrIntrinsicNode::ArgEnc ae) {
1079 Node* arg1 = argument(0);
1080 Node* arg2 = argument(1);
1081
1082 // paths (plus control) merge
1083 RegionNode* region = new RegionNode(3);
1084 Node* phi = new PhiNode(region, TypeInt::BOOL);
1085
1086 if (!stopped()) {
1087
1088 arg1 = must_be_not_null(arg1, true);
1089 arg2 = must_be_not_null(arg2, true);
1090
1091 // Get start addr and length of first argument
1092 Node* arg1_start = array_element_address(arg1, intcon(0), T_BYTE);
1093 Node* arg1_cnt = load_array_length(arg1);
1094
1095 // Get start addr and length of second argument
1096 Node* arg2_start = array_element_address(arg2, intcon(0), T_BYTE);
1097 Node* arg2_cnt = load_array_length(arg2);
1098
1099 // Check for arg1_cnt != arg2_cnt
1100 Node* cmp = _gvn.transform(new CmpINode(arg1_cnt, arg2_cnt));
1101 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
1102 Node* if_ne = generate_slow_guard(bol, nullptr);
1103 if (if_ne != nullptr) {
1104 phi->init_req(2, intcon(0));
1105 region->init_req(2, if_ne);
1106 }
1107
1108 // Check for count == 0 is done by assembler code for StrEquals.
1109
1110 if (!stopped()) {
1111 Node* equals = make_string_method_node(Op_StrEquals, arg1_start, arg1_cnt, arg2_start, arg2_cnt, ae);
1112 phi->init_req(1, equals);
1113 region->init_req(1, control());
1114 }
1115 }
1116
1117 // post merge
1118 set_control(_gvn.transform(region));
1119 record_for_igvn(region);
1120
1121 set_result(_gvn.transform(phi));
1122 return true;
1123 }
1124
1125 //------------------------------inline_array_equals----------------------------
1126 bool LibraryCallKit::inline_array_equals(StrIntrinsicNode::ArgEnc ae) {
1127 assert(ae == StrIntrinsicNode::UU || ae == StrIntrinsicNode::LL, "unsupported array types");
1128 Node* arg1 = argument(0);
1129 Node* arg2 = argument(1);
1130
1131 const TypeAryPtr* mtype = (ae == StrIntrinsicNode::UU) ? TypeAryPtr::CHARS : TypeAryPtr::BYTES;
1132 set_result(_gvn.transform(new AryEqNode(control(), memory(mtype), arg1, arg2, ae)));
1133 clear_upper_avx();
1134
1135 return true;
1136 }
1137
1138
1139 //------------------------------inline_countPositives------------------------------
1140 // int java.lang.StringCoding#countPositives0(byte[] ba, int off, int len)
1141 bool LibraryCallKit::inline_countPositives() {
1142 assert(callee()->signature()->size() == 3, "countPositives has 3 parameters");
1143 // no receiver since it is static method
1144 Node* ba = argument(0);
1145 Node* offset = argument(1);
1146 Node* len = argument(2);
1147
1148 ba = must_be_not_null(ba, true);
1149 RegionNode* bailout = create_bailout();
1150 generate_string_range_check(ba, offset, len, false, bailout);
1151 if (check_bailout(bailout)) {
1152 return true;
1153 }
1154
1155 Node* ba_start = array_element_address(ba, offset, T_BYTE);
1156 Node* result = new CountPositivesNode(control(), memory(TypeAryPtr::BYTES), ba_start, len);
1157 set_result(_gvn.transform(result));
1158 clear_upper_avx();
1159 return true;
1160 }
1161
1162 bool LibraryCallKit::inline_preconditions_checkIndex(BasicType bt) {
1163 Node* index = argument(0);
1164 Node* length = bt == T_INT ? argument(1) : argument(2);
1165 if (too_many_traps(Deoptimization::Reason_intrinsic) || too_many_traps(Deoptimization::Reason_range_check)) {
1166 return false;
1167 }
1168
1169 // check that length is positive
1170 Node* len_pos_cmp = _gvn.transform(CmpNode::make(length, integercon(0, bt), bt));
1171 Node* len_pos_bol = _gvn.transform(new BoolNode(len_pos_cmp, BoolTest::ge));
1172
1173 {
1174 BuildCutout unless(this, len_pos_bol, PROB_MAX);
1175 uncommon_trap(Deoptimization::Reason_intrinsic,
1176 Deoptimization::Action_make_not_entrant);
1177 }
1178
1179 if (stopped()) {
1180 // Length is known to be always negative during compilation and the IR graph so far constructed is good so return success
1181 return true;
1182 }
1183
1184 // length is now known positive, add a cast node to make this explicit
1185 jlong upper_bound = _gvn.type(length)->is_integer(bt)->hi_as_long();
1186 Node* casted_length = ConstraintCastNode::make_cast_for_basic_type(
1187 control(), length, TypeInteger::make(0, upper_bound, Type::WidenMax, bt),
1188 ConstraintCastNode::DependencyType::FloatingNarrowing, bt);
1189 casted_length = _gvn.transform(casted_length);
1190 replace_in_map(length, casted_length);
1191 length = casted_length;
1192
1193 // Use an unsigned comparison for the range check itself
1194 Node* rc_cmp = _gvn.transform(CmpNode::make(index, length, bt, true));
1195 BoolTest::mask btest = BoolTest::lt;
1196 Node* rc_bool = _gvn.transform(new BoolNode(rc_cmp, btest));
1197 RangeCheckNode* rc = new RangeCheckNode(control(), rc_bool, PROB_MAX, COUNT_UNKNOWN);
1198 _gvn.set_type(rc, rc->Value(&_gvn));
1199 if (!rc_bool->is_Con()) {
1200 record_for_igvn(rc);
1201 }
1202 set_control(_gvn.transform(new IfTrueNode(rc)));
1203 {
1204 PreserveJVMState pjvms(this);
1205 set_control(_gvn.transform(new IfFalseNode(rc)));
1206 uncommon_trap(Deoptimization::Reason_range_check,
1207 Deoptimization::Action_make_not_entrant);
1208 }
1209
1210 if (stopped()) {
1211 // Range check is known to always fail during compilation and the IR graph so far constructed is good so return success
1212 return true;
1213 }
1214
1215 // index is now known to be >= 0 and < length, cast it
1216 Node* result = ConstraintCastNode::make_cast_for_basic_type(
1217 control(), index, TypeInteger::make(0, upper_bound, Type::WidenMax, bt),
1218 ConstraintCastNode::DependencyType::FloatingNarrowing, bt);
1219 result = _gvn.transform(result);
1220 set_result(result);
1221 replace_in_map(index, result);
1222 return true;
1223 }
1224
1225 //------------------------------inline_string_indexOf------------------------
1226 bool LibraryCallKit::inline_string_indexOf(StrIntrinsicNode::ArgEnc ae) {
1227 if (!Matcher::match_rule_supported(Op_StrIndexOf)) {
1228 return false;
1229 }
1230 Node* src = argument(0);
1231 Node* tgt = argument(1);
1232
1233 // Make the merge point
1234 RegionNode* result_rgn = new RegionNode(4);
1235 Node* result_phi = new PhiNode(result_rgn, TypeInt::INT);
1236
1237 src = must_be_not_null(src, true);
1238 tgt = must_be_not_null(tgt, true);
1239
1240 // Get start addr and length of source string
1241 Node* src_start = array_element_address(src, intcon(0), T_BYTE);
1242 Node* src_count = load_array_length(src);
1243
1244 // Get start addr and length of substring
1245 Node* tgt_start = array_element_address(tgt, intcon(0), T_BYTE);
1246 Node* tgt_count = load_array_length(tgt);
1247
1248 Node* result = nullptr;
1249 bool call_opt_stub = (StubRoutines::_string_indexof_array[ae] != nullptr);
1250
1251 if (ae == StrIntrinsicNode::UU || ae == StrIntrinsicNode::UL) {
1252 // Divide src size by 2 if String is UTF16 encoded
1253 src_count = _gvn.transform(new RShiftINode(src_count, intcon(1)));
1254 }
1255 if (ae == StrIntrinsicNode::UU) {
1256 // Divide substring size by 2 if String is UTF16 encoded
1257 tgt_count = _gvn.transform(new RShiftINode(tgt_count, intcon(1)));
1258 }
1259
1260 if (call_opt_stub) {
1261 Node* call = make_runtime_call(RC_LEAF, OptoRuntime::string_IndexOf_Type(),
1262 StubRoutines::_string_indexof_array[ae],
1263 "stringIndexOf", TypePtr::BOTTOM, src_start,
1264 src_count, tgt_start, tgt_count);
1265 result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
1266 } else {
1267 result = make_indexOf_node(src_start, src_count, tgt_start, tgt_count,
1268 result_rgn, result_phi, ae);
1269 }
1270 if (result != nullptr) {
1271 result_phi->init_req(3, result);
1272 result_rgn->init_req(3, control());
1273 }
1274 set_control(_gvn.transform(result_rgn));
1275 record_for_igvn(result_rgn);
1276 set_result(_gvn.transform(result_phi));
1277
1278 return true;
1279 }
1280
1281 //-----------------------------inline_string_indexOfI-----------------------
1282 bool LibraryCallKit::inline_string_indexOfI(StrIntrinsicNode::ArgEnc ae) {
1283 if (!Matcher::match_rule_supported(Op_StrIndexOf)) {
1284 return false;
1285 }
1286
1287 assert(callee()->signature()->size() == 5, "String.indexOf() has 5 arguments");
1288 Node* src = argument(0); // byte[]
1289 Node* src_count = argument(1); // char count
1290 Node* tgt = argument(2); // byte[]
1291 Node* tgt_count = argument(3); // char count
1292 Node* from_index = argument(4); // char index
1293
1294 src = must_be_not_null(src, true);
1295 tgt = must_be_not_null(tgt, true);
1296
1297 // Multiply byte array index by 2 if String is UTF16 encoded
1298 Node* src_offset = (ae == StrIntrinsicNode::LL) ? from_index : _gvn.transform(new LShiftINode(from_index, intcon(1)));
1299 src_count = _gvn.transform(new SubINode(src_count, from_index));
1300 Node* src_start = array_element_address(src, src_offset, T_BYTE);
1301 Node* tgt_start = array_element_address(tgt, intcon(0), T_BYTE);
1302
1303 // Range checks
1304 RegionNode* bailout = create_bailout();
1305 generate_string_range_check(src, src_offset, src_count, ae != StrIntrinsicNode::LL, bailout);
1306 generate_string_range_check(tgt, intcon(0), tgt_count, ae == StrIntrinsicNode::UU, bailout);
1307 if (check_bailout(bailout)) {
1308 return true;
1309 }
1310
1311 RegionNode* region = new RegionNode(5);
1312 Node* phi = new PhiNode(region, TypeInt::INT);
1313 Node* result = nullptr;
1314
1315 bool call_opt_stub = (StubRoutines::_string_indexof_array[ae] != nullptr);
1316
1317 if (call_opt_stub) {
1318 Node* call = make_runtime_call(RC_LEAF, OptoRuntime::string_IndexOf_Type(),
1319 StubRoutines::_string_indexof_array[ae],
1320 "stringIndexOf", TypePtr::BOTTOM, src_start,
1321 src_count, tgt_start, tgt_count);
1322 result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
1323 } else {
1324 result = make_indexOf_node(src_start, src_count, tgt_start, tgt_count,
1325 region, phi, ae);
1326 }
1327 if (result != nullptr) {
1328 // The result is index relative to from_index if substring was found, -1 otherwise.
1329 // Generate code which will fold into cmove.
1330 Node* cmp = _gvn.transform(new CmpINode(result, intcon(0)));
1331 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::lt));
1332
1333 Node* if_lt = generate_slow_guard(bol, nullptr);
1334 if (if_lt != nullptr) {
1335 // result == -1
1336 phi->init_req(3, result);
1337 region->init_req(3, if_lt);
1338 }
1339 if (!stopped()) {
1340 result = _gvn.transform(new AddINode(result, from_index));
1341 phi->init_req(4, result);
1342 region->init_req(4, control());
1343 }
1344 }
1345
1346 set_control(_gvn.transform(region));
1347 record_for_igvn(region);
1348 set_result(_gvn.transform(phi));
1349 clear_upper_avx();
1350
1351 return true;
1352 }
1353
1354 // Create StrIndexOfNode with fast path checks
1355 Node* LibraryCallKit::make_indexOf_node(Node* src_start, Node* src_count, Node* tgt_start, Node* tgt_count,
1356 RegionNode* region, Node* phi, StrIntrinsicNode::ArgEnc ae) {
1357 // Check for substr count > string count
1358 Node* cmp = _gvn.transform(new CmpINode(tgt_count, src_count));
1359 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::gt));
1360 Node* if_gt = generate_slow_guard(bol, nullptr);
1361 if (if_gt != nullptr) {
1362 phi->init_req(1, intcon(-1));
1363 region->init_req(1, if_gt);
1364 }
1365 if (!stopped()) {
1366 // Check for substr count == 0
1367 cmp = _gvn.transform(new CmpINode(tgt_count, intcon(0)));
1368 bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
1369 Node* if_zero = generate_slow_guard(bol, nullptr);
1370 if (if_zero != nullptr) {
1371 phi->init_req(2, intcon(0));
1372 region->init_req(2, if_zero);
1373 }
1374 }
1375 if (!stopped()) {
1376 return make_string_method_node(Op_StrIndexOf, src_start, src_count, tgt_start, tgt_count, ae);
1377 }
1378 return nullptr;
1379 }
1380
1381 //-----------------------------inline_string_indexOfChar-----------------------
1382 bool LibraryCallKit::inline_string_indexOfChar(StrIntrinsicNode::ArgEnc ae) {
1383 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
1384 return false;
1385 }
1386 if (!Matcher::match_rule_supported(Op_StrIndexOfChar)) {
1387 return false;
1388 }
1389 assert(callee()->signature()->size() == 4, "String.indexOfChar() has 4 arguments");
1390 Node* src = argument(0); // byte[]
1391 Node* int_ch = argument(1);
1392 Node* from_index = argument(2);
1393 Node* max = argument(3);
1394
1395 src = must_be_not_null(src, true);
1396
1397 Node* src_offset = ae == StrIntrinsicNode::L ? from_index : _gvn.transform(new LShiftINode(from_index, intcon(1)));
1398 Node* src_start = array_element_address(src, src_offset, T_BYTE);
1399 Node* src_count = _gvn.transform(new SubINode(max, from_index));
1400
1401 // Range checks
1402 RegionNode* bailout = create_bailout();
1403 generate_string_range_check(src, src_offset, src_count, ae == StrIntrinsicNode::U, bailout);
1404 if (check_bailout(bailout)) {
1405 return true;
1406 }
1407
1408 // Check for int_ch >= 0
1409 Node* int_ch_cmp = _gvn.transform(new CmpINode(int_ch, intcon(0)));
1410 Node* int_ch_bol = _gvn.transform(new BoolNode(int_ch_cmp, BoolTest::ge));
1411 {
1412 BuildCutout unless(this, int_ch_bol, PROB_MAX);
1413 uncommon_trap(Deoptimization::Reason_intrinsic,
1414 Deoptimization::Action_maybe_recompile);
1415 }
1416 if (stopped()) {
1417 return true;
1418 }
1419
1420 RegionNode* region = new RegionNode(3);
1421 Node* phi = new PhiNode(region, TypeInt::INT);
1422
1423 Node* result = new StrIndexOfCharNode(control(), memory(TypeAryPtr::BYTES), src_start, src_count, int_ch, ae);
1424 C->set_has_split_ifs(true); // Has chance for split-if optimization
1425 _gvn.transform(result);
1426
1427 Node* cmp = _gvn.transform(new CmpINode(result, intcon(0)));
1428 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::lt));
1429
1430 Node* if_lt = generate_slow_guard(bol, nullptr);
1431 if (if_lt != nullptr) {
1432 // result == -1
1433 phi->init_req(2, result);
1434 region->init_req(2, if_lt);
1435 }
1436 if (!stopped()) {
1437 result = _gvn.transform(new AddINode(result, from_index));
1438 phi->init_req(1, result);
1439 region->init_req(1, control());
1440 }
1441 set_control(_gvn.transform(region));
1442 record_for_igvn(region);
1443 set_result(_gvn.transform(phi));
1444 clear_upper_avx();
1445
1446 return true;
1447 }
1448 //---------------------------inline_string_copy---------------------
1449 // compressIt == true --> generate a compressed copy operation (compress char[]/byte[] to byte[])
1450 // int StringUTF16.compress0(char[] src, int srcOff, byte[] dst, int dstOff, int len)
1451 // int StringUTF16.compress0(byte[] src, int srcOff, byte[] dst, int dstOff, int len)
1452 // compressIt == false --> generate an inflated copy operation (inflate byte[] to char[]/byte[])
1453 // void StringLatin1.inflate0(byte[] src, int srcOff, char[] dst, int dstOff, int len)
1454 // void StringLatin1.inflate0(byte[] src, int srcOff, byte[] dst, int dstOff, int len)
1455 bool LibraryCallKit::inline_string_copy(bool compress) {
1456 int nargs = 5; // 2 oops, 3 ints
1457 assert(callee()->signature()->size() == nargs, "string copy has 5 arguments");
1458
1459 Node* src = argument(0);
1460 Node* src_offset = argument(1);
1461 Node* dst = argument(2);
1462 Node* dst_offset = argument(3);
1463 Node* length = argument(4);
1464
1465 // Check for allocation before we add nodes that would confuse
1466 // tightly_coupled_allocation()
1467 AllocateArrayNode* alloc = tightly_coupled_allocation(dst);
1468
1469 // Figure out the size and type of the elements we will be copying.
1470 const TypeAryPtr* src_type = src->Value(&_gvn)->isa_aryptr();
1471 const TypeAryPtr* dst_type = dst->Value(&_gvn)->isa_aryptr();
1472 if (src_type == nullptr || dst_type == nullptr) {
1473 return false;
1474 }
1475 BasicType src_elem = src_type->elem()->array_element_basic_type();
1476 BasicType dst_elem = dst_type->elem()->array_element_basic_type();
1477 assert((compress && dst_elem == T_BYTE && (src_elem == T_BYTE || src_elem == T_CHAR)) ||
1478 (!compress && src_elem == T_BYTE && (dst_elem == T_BYTE || dst_elem == T_CHAR)),
1479 "Unsupported array types for inline_string_copy");
1480
1481 src = must_be_not_null(src, true);
1482 dst = must_be_not_null(dst, true);
1483
1484 // Convert char[] offsets to byte[] offsets
1485 bool convert_src = (compress && src_elem == T_BYTE);
1486 bool convert_dst = (!compress && dst_elem == T_BYTE);
1487 if (convert_src) {
1488 src_offset = _gvn.transform(new LShiftINode(src_offset, intcon(1)));
1489 } else if (convert_dst) {
1490 dst_offset = _gvn.transform(new LShiftINode(dst_offset, intcon(1)));
1491 }
1492
1493 // Range checks
1494 RegionNode* bailout = create_bailout();
1495 generate_string_range_check(src, src_offset, length, convert_src, bailout);
1496 generate_string_range_check(dst, dst_offset, length, convert_dst, bailout);
1497 if (check_bailout(bailout)) {
1498 return true;
1499 }
1500
1501 Node* src_start = array_element_address(src, src_offset, src_elem);
1502 Node* dst_start = array_element_address(dst, dst_offset, dst_elem);
1503 // 'src_start' points to src array + scaled offset
1504 // 'dst_start' points to dst array + scaled offset
1505 Node* count = nullptr;
1506 if (compress) {
1507 count = compress_string(src_start, TypeAryPtr::get_array_body_type(src_elem), dst_start, length);
1508 } else {
1509 inflate_string(src_start, dst_start, TypeAryPtr::get_array_body_type(dst_elem), length);
1510 }
1511
1512 if (alloc != nullptr) {
1513 if (alloc->maybe_set_complete(&_gvn)) {
1514 // "You break it, you buy it."
1515 InitializeNode* init = alloc->initialization();
1516 assert(init->is_complete(), "we just did this");
1517 init->set_complete_with_arraycopy();
1518 assert(dst->is_CheckCastPP(), "sanity");
1519 assert(dst->in(0)->in(0) == init, "dest pinned");
1520 }
1521 // Do not let stores that initialize this object be reordered with
1522 // a subsequent store that would make this object accessible by
1523 // other threads.
1524 // Record what AllocateNode this StoreStore protects so that
1525 // escape analysis can go from the MemBarStoreStoreNode to the
1526 // AllocateNode and eliminate the MemBarStoreStoreNode if possible
1527 // based on the escape status of the AllocateNode.
1528 insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
1529 }
1530 if (compress) {
1531 set_result(_gvn.transform(count));
1532 }
1533 clear_upper_avx();
1534
1535 return true;
1536 }
1537
1538 #ifdef _LP64
1539 #define XTOP ,top() /*additional argument*/
1540 #else //_LP64
1541 #define XTOP /*no additional argument*/
1542 #endif //_LP64
1543
1544 //------------------------inline_string_toBytesU--------------------------
1545 // public static byte[] StringUTF16.toBytes0(char[] value, int off, int len)
1546 bool LibraryCallKit::inline_string_toBytesU() {
1547 // Get the arguments.
1548 assert(callee()->signature()->size() == 3, "character array encoder requires 3 arguments");
1549 Node* value = argument(0);
1550 Node* offset = argument(1);
1551 Node* length = argument(2);
1552
1553 Node* newcopy = nullptr;
1554
1555 // Set the original stack and the reexecute bit for the interpreter to reexecute
1556 // the bytecode that invokes StringUTF16.toBytes0() if deoptimization happens.
1557 { PreserveReexecuteState preexecs(this);
1558 jvms()->set_should_reexecute(true);
1559
1560 value = must_be_not_null(value, true);
1561 RegionNode* bailout = create_bailout();
1562 generate_negative_guard(offset, bailout, nullptr, true);
1563 generate_negative_guard(length, bailout, nullptr, true);
1564 generate_limit_guard(offset, length, load_array_length(value), bailout, true);
1565 // Make sure that resulting byte[] length does not overflow Integer.MAX_VALUE
1566 generate_limit_guard(length, intcon(0), intcon(max_jint/2), bailout, true);
1567 if (check_bailout(bailout)) {
1568 return true;
1569 }
1570
1571 Node* size = _gvn.transform(new LShiftINode(length, intcon(1)));
1572 Node* klass_node = makecon(TypeKlassPtr::make(ciTypeArrayKlass::make(T_BYTE)));
1573 newcopy = new_array(klass_node, size, 0); // no arguments to push
1574 AllocateArrayNode* alloc = tightly_coupled_allocation(newcopy);
1575 guarantee(alloc != nullptr, "created above");
1576
1577 // Calculate starting addresses.
1578 Node* src_start = array_element_address(value, offset, T_CHAR);
1579 Node* dst_start = basic_plus_adr(newcopy, arrayOopDesc::base_offset_in_bytes(T_BYTE));
1580
1581 // Check if dst array address is aligned to HeapWordSize
1582 bool aligned = (arrayOopDesc::base_offset_in_bytes(T_BYTE) % HeapWordSize == 0);
1583 // If true, then check if src array address is aligned to HeapWordSize
1584 if (aligned) {
1585 const TypeInt* toffset = gvn().type(offset)->is_int();
1586 aligned = toffset->is_con() && ((arrayOopDesc::base_offset_in_bytes(T_CHAR) +
1587 toffset->get_con() * type2aelembytes(T_CHAR)) % HeapWordSize == 0);
1588 }
1589
1590 // Figure out which arraycopy runtime method to call (disjoint, uninitialized).
1591 const char* copyfunc_name = "arraycopy";
1592 address copyfunc_addr = StubRoutines::select_arraycopy_function(T_CHAR, aligned, true, copyfunc_name, true);
1593 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
1594 OptoRuntime::fast_arraycopy_Type(),
1595 copyfunc_addr, copyfunc_name, TypeRawPtr::BOTTOM,
1596 src_start, dst_start, ConvI2X(length) XTOP);
1597 // Do not let reads from the cloned object float above the arraycopy.
1598 if (alloc->maybe_set_complete(&_gvn)) {
1599 // "You break it, you buy it."
1600 InitializeNode* init = alloc->initialization();
1601 assert(init->is_complete(), "we just did this");
1602 init->set_complete_with_arraycopy();
1603 assert(newcopy->is_CheckCastPP(), "sanity");
1604 assert(newcopy->in(0)->in(0) == init, "dest pinned");
1605 }
1606 // Do not let stores that initialize this object be reordered with
1607 // a subsequent store that would make this object accessible by
1608 // other threads.
1609 // Record what AllocateNode this StoreStore protects so that
1610 // escape analysis can go from the MemBarStoreStoreNode to the
1611 // AllocateNode and eliminate the MemBarStoreStoreNode if possible
1612 // based on the escape status of the AllocateNode.
1613 insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
1614 } // original reexecute is set back here
1615
1616 C->set_has_split_ifs(true); // Has chance for split-if optimization
1617 if (!stopped()) {
1618 set_result(newcopy);
1619 }
1620 clear_upper_avx();
1621
1622 return true;
1623 }
1624
1625 //------------------------inline_string_getCharsU--------------------------
1626 // public void StringUTF16.getChars0(byte[] src, int srcBegin, int srcEnd, char dst[], int dstBegin)
1627 bool LibraryCallKit::inline_string_getCharsU() {
1628 assert(callee()->signature()->size() == 5, "StringUTF16.getChars0() has 5 arguments");
1629 // Get the arguments.
1630 Node* src = argument(0);
1631 Node* src_begin = argument(1);
1632 Node* src_end = argument(2); // exclusive offset (i < src_end)
1633 Node* dst = argument(3);
1634 Node* dst_begin = argument(4);
1635
1636 // Check for allocation before we add nodes that would confuse
1637 // tightly_coupled_allocation()
1638 AllocateArrayNode* alloc = tightly_coupled_allocation(dst);
1639
1640 // Check if a null path was taken unconditionally.
1641 src = must_be_not_null(src, true);
1642 dst = must_be_not_null(dst, true);
1643 if (stopped()) {
1644 return true;
1645 }
1646
1647 // Get length and convert char[] offset to byte[] offset
1648 Node* length = _gvn.transform(new SubINode(src_end, src_begin));
1649 src_begin = _gvn.transform(new LShiftINode(src_begin, intcon(1)));
1650
1651 // Range checks
1652 RegionNode* bailout = create_bailout();
1653 generate_string_range_check(src, src_begin, length, true, bailout);
1654 generate_string_range_check(dst, dst_begin, length, false, bailout);
1655 if (check_bailout(bailout)) {
1656 return true;
1657 }
1658
1659 // Calculate starting addresses.
1660 Node* src_start = array_element_address(src, src_begin, T_BYTE);
1661 Node* dst_start = array_element_address(dst, dst_begin, T_CHAR);
1662
1663 // Check if array addresses are aligned to HeapWordSize
1664 const TypeInt* tsrc = gvn().type(src_begin)->is_int();
1665 const TypeInt* tdst = gvn().type(dst_begin)->is_int();
1666 bool aligned = tsrc->is_con() && ((arrayOopDesc::base_offset_in_bytes(T_BYTE) + tsrc->get_con() * type2aelembytes(T_BYTE)) % HeapWordSize == 0) &&
1667 tdst->is_con() && ((arrayOopDesc::base_offset_in_bytes(T_CHAR) + tdst->get_con() * type2aelembytes(T_CHAR)) % HeapWordSize == 0);
1668
1669 // Figure out which arraycopy runtime method to call (disjoint, uninitialized).
1670 const char* copyfunc_name = "arraycopy";
1671 address copyfunc_addr = StubRoutines::select_arraycopy_function(T_CHAR, aligned, true, copyfunc_name, true);
1672 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
1673 OptoRuntime::fast_arraycopy_Type(),
1674 copyfunc_addr, copyfunc_name, TypeRawPtr::BOTTOM,
1675 src_start, dst_start, ConvI2X(length) XTOP);
1676 // Do not let reads from the cloned object float above the arraycopy.
1677 if (alloc != nullptr) {
1678 if (alloc->maybe_set_complete(&_gvn)) {
1679 // "You break it, you buy it."
1680 InitializeNode* init = alloc->initialization();
1681 assert(init->is_complete(), "we just did this");
1682 init->set_complete_with_arraycopy();
1683 assert(dst->is_CheckCastPP(), "sanity");
1684 assert(dst->in(0)->in(0) == init, "dest pinned");
1685 }
1686 // Do not let stores that initialize this object be reordered with
1687 // a subsequent store that would make this object accessible by
1688 // other threads.
1689 // Record what AllocateNode this StoreStore protects so that
1690 // escape analysis can go from the MemBarStoreStoreNode to the
1691 // AllocateNode and eliminate the MemBarStoreStoreNode if possible
1692 // based on the escape status of the AllocateNode.
1693 insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
1694 } else {
1695 insert_mem_bar(Op_MemBarCPUOrder);
1696 }
1697
1698 C->set_has_split_ifs(true); // Has chance for split-if optimization
1699 return true;
1700 }
1701
1702 //----------------------inline_string_char_access----------------------------
1703 // Store/Load char to/from byte[] array.
1704 // static void StringUTF16.putChar(byte[] val, int index, int c)
1705 // static char StringUTF16.getChar(byte[] val, int index)
1706 bool LibraryCallKit::inline_string_char_access(bool is_store) {
1707 Node* ch;
1708 if (is_store) {
1709 assert(callee()->signature()->size() == 3, "StringUTF16.putChar() has 3 arguments");
1710 ch = argument(2);
1711 } else {
1712 assert(callee()->signature()->size() == 2, "StringUTF16.getChar() has 2 arguments");
1713 ch = nullptr;
1714 }
1715 Node* value = argument(0);
1716 Node* index = argument(1);
1717
1718 // This intrinsic accesses byte[] array as char[] array. Computing the offsets
1719 // correctly requires matched array shapes.
1720 assert (arrayOopDesc::base_offset_in_bytes(T_CHAR) == arrayOopDesc::base_offset_in_bytes(T_BYTE),
1721 "sanity: byte[] and char[] bases agree");
1722 assert (type2aelembytes(T_CHAR) == type2aelembytes(T_BYTE)*2,
1723 "sanity: byte[] and char[] scales agree");
1724
1725 // Bail when getChar over constants is requested: constant folding would
1726 // reject folding mismatched char access over byte[]. A normal inlining for getChar
1727 // Java method would constant fold nicely instead.
1728 if (!is_store && value->is_Con() && index->is_Con()) {
1729 return false;
1730 }
1731
1732 // Save state and restore on bailout
1733 SavedState old_state(this);
1734
1735 value = must_be_not_null(value, true);
1736
1737 Node* adr = array_element_address(value, index, T_CHAR);
1738 if (adr->is_top()) {
1739 return false;
1740 }
1741 old_state.discard();
1742 if (is_store) {
1743 access_store_at(value, adr, TypeAryPtr::BYTES, ch, TypeInt::CHAR, T_CHAR, IN_HEAP | MO_UNORDERED | C2_MISMATCHED);
1744 } else {
1745 ch = access_load_at(value, adr, TypeAryPtr::BYTES, TypeInt::CHAR, T_CHAR, IN_HEAP | MO_UNORDERED | C2_MISMATCHED | C2_CONTROL_DEPENDENT_LOAD | C2_UNKNOWN_CONTROL_LOAD);
1746 set_result(ch);
1747 }
1748 return true;
1749 }
1750
1751
1752 //------------------------------inline_math-----------------------------------
1753 // public static double Math.abs(double)
1754 // public static double Math.sqrt(double)
1755 // public static double Math.log(double)
1756 // public static double Math.log10(double)
1757 // public static double Math.round(double)
1758 bool LibraryCallKit::inline_double_math(vmIntrinsics::ID id) {
1759 Node* arg = argument(0);
1760 Node* n = nullptr;
1761 switch (id) {
1762 case vmIntrinsics::_dabs: n = new AbsDNode( arg); break;
1763 case vmIntrinsics::_dsqrt:
1764 case vmIntrinsics::_dsqrt_strict:
1765 n = new SqrtDNode(C, control(), arg); break;
1766 case vmIntrinsics::_ceil: n = RoundDoubleModeNode::make(_gvn, arg, RoundDoubleModeNode::rmode_ceil); break;
1767 case vmIntrinsics::_floor: n = RoundDoubleModeNode::make(_gvn, arg, RoundDoubleModeNode::rmode_floor); break;
1768 case vmIntrinsics::_rint: n = RoundDoubleModeNode::make(_gvn, arg, RoundDoubleModeNode::rmode_rint); break;
1769 case vmIntrinsics::_roundD: n = new RoundDNode(arg); break;
1770 case vmIntrinsics::_dcopySign: n = CopySignDNode::make(_gvn, arg, argument(2)); break;
1771 case vmIntrinsics::_dsignum: n = SignumDNode::make(_gvn, arg); break;
1772 default: fatal_unexpected_iid(id); break;
1773 }
1774 set_result(_gvn.transform(n));
1775 return true;
1776 }
1777
1778 //------------------------------inline_math-----------------------------------
1779 // public static float Math.abs(float)
1780 // public static int Math.abs(int)
1781 // public static long Math.abs(long)
1782 bool LibraryCallKit::inline_math(vmIntrinsics::ID id) {
1783 Node* arg = argument(0);
1784 Node* n = nullptr;
1785 switch (id) {
1786 case vmIntrinsics::_fabs: n = new AbsFNode( arg); break;
1787 case vmIntrinsics::_iabs: n = new AbsINode( arg); break;
1788 case vmIntrinsics::_labs: n = new AbsLNode( arg); break;
1789 case vmIntrinsics::_fcopySign: n = new CopySignFNode(arg, argument(1)); break;
1790 case vmIntrinsics::_fsignum: n = SignumFNode::make(_gvn, arg); break;
1791 case vmIntrinsics::_roundF: n = new RoundFNode(arg); break;
1792 default: fatal_unexpected_iid(id); break;
1793 }
1794 set_result(_gvn.transform(n));
1795 return true;
1796 }
1797
1798 //------------------------------runtime_math-----------------------------
1799 bool LibraryCallKit::runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName) {
1800 assert(call_type == OptoRuntime::Math_DD_D_Type() || call_type == OptoRuntime::Math_D_D_Type(),
1801 "must be (DD)D or (D)D type");
1802
1803 // Inputs
1804 Node* a = argument(0);
1805 Node* b = (call_type == OptoRuntime::Math_DD_D_Type()) ? argument(2) : nullptr;
1806
1807 const TypePtr* no_memory_effects = nullptr;
1808 Node* trig = make_runtime_call(RC_LEAF | RC_PURE, call_type, funcAddr, funcName,
1809 no_memory_effects,
1810 a, top(), b, b ? top() : nullptr);
1811 Node* value = _gvn.transform(new ProjNode(trig, TypeFunc::Parms+0));
1812 #ifdef ASSERT
1813 Node* value_top = _gvn.transform(new ProjNode(trig, TypeFunc::Parms+1));
1814 assert(value_top == top(), "second value must be top");
1815 #endif
1816
1817 set_result(value);
1818 return true;
1819 }
1820
1821 //------------------------------inline_math_pow-----------------------------
1822 bool LibraryCallKit::inline_math_pow() {
1823 Node* base = argument(0);
1824 Node* exp = argument(2);
1825
1826 CallNode* pow = new PowDNode(C, base, exp);
1827 set_predefined_input_for_runtime_call(pow);
1828 pow = _gvn.transform(pow)->as_CallLeafPure();
1829 set_predefined_output_for_runtime_call(pow);
1830 Node* result = _gvn.transform(new ProjNode(pow, TypeFunc::Parms + 0));
1831 record_for_igvn(pow);
1832 set_result(result);
1833 return true;
1834 }
1835
1836 //------------------------------inline_math_native-----------------------------
1837 bool LibraryCallKit::inline_math_native(vmIntrinsics::ID id) {
1838 switch (id) {
1839 case vmIntrinsics::_dsin:
1840 return StubRoutines::dsin() != nullptr ?
1841 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dsin(), "dsin") :
1842 runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dsin), "SIN");
1843 case vmIntrinsics::_dcos:
1844 return StubRoutines::dcos() != nullptr ?
1845 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dcos(), "dcos") :
1846 runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dcos), "COS");
1847 case vmIntrinsics::_dtan:
1848 return StubRoutines::dtan() != nullptr ?
1849 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dtan(), "dtan") :
1850 runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dtan), "TAN");
1851 case vmIntrinsics::_dsinh:
1852 return StubRoutines::dsinh() != nullptr ?
1853 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dsinh(), "dsinh") : false;
1854 case vmIntrinsics::_dtanh:
1855 return StubRoutines::dtanh() != nullptr ?
1856 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dtanh(), "dtanh") : false;
1857 case vmIntrinsics::_dcbrt:
1858 return StubRoutines::dcbrt() != nullptr ?
1859 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dcbrt(), "dcbrt") : false;
1860 case vmIntrinsics::_dexp:
1861 return StubRoutines::dexp() != nullptr ?
1862 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dexp(), "dexp") :
1863 runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dexp), "EXP");
1864 case vmIntrinsics::_dlog:
1865 return StubRoutines::dlog() != nullptr ?
1866 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dlog(), "dlog") :
1867 runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dlog), "LOG");
1868 case vmIntrinsics::_dlog10:
1869 return StubRoutines::dlog10() != nullptr ?
1870 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dlog10(), "dlog10") :
1871 runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), "LOG10");
1872
1873 case vmIntrinsics::_roundD: return Matcher::match_rule_supported(Op_RoundD) ? inline_double_math(id) : false;
1874 case vmIntrinsics::_ceil:
1875 case vmIntrinsics::_floor:
1876 case vmIntrinsics::_rint: return Matcher::match_rule_supported(Op_RoundDoubleMode) ? inline_double_math(id) : false;
1877
1878 case vmIntrinsics::_dsqrt:
1879 case vmIntrinsics::_dsqrt_strict:
1880 return Matcher::match_rule_supported(Op_SqrtD) ? inline_double_math(id) : false;
1881 case vmIntrinsics::_dabs: return Matcher::has_match_rule(Op_AbsD) ? inline_double_math(id) : false;
1882 case vmIntrinsics::_fabs: return Matcher::match_rule_supported(Op_AbsF) ? inline_math(id) : false;
1883 case vmIntrinsics::_iabs: return Matcher::match_rule_supported(Op_AbsI) ? inline_math(id) : false;
1884 case vmIntrinsics::_labs: return Matcher::match_rule_supported(Op_AbsL) ? inline_math(id) : false;
1885
1886 case vmIntrinsics::_dpow: return inline_math_pow();
1887 case vmIntrinsics::_dcopySign: return inline_double_math(id);
1888 case vmIntrinsics::_fcopySign: return inline_math(id);
1889 case vmIntrinsics::_dsignum: return Matcher::match_rule_supported(Op_SignumD) ? inline_double_math(id) : false;
1890 case vmIntrinsics::_fsignum: return Matcher::match_rule_supported(Op_SignumF) ? inline_math(id) : false;
1891 case vmIntrinsics::_roundF: return Matcher::match_rule_supported(Op_RoundF) ? inline_math(id) : false;
1892
1893 // These intrinsics are not yet correctly implemented
1894 case vmIntrinsics::_datan2:
1895 return false;
1896
1897 default:
1898 fatal_unexpected_iid(id);
1899 return false;
1900 }
1901 }
1902
1903 //----------------------------inline_notify-----------------------------------*
1904 bool LibraryCallKit::inline_notify(vmIntrinsics::ID id) {
1905 const TypeFunc* ftype = OptoRuntime::monitor_notify_Type();
1906 address func;
1907 if (id == vmIntrinsics::_notify) {
1908 func = OptoRuntime::monitor_notify_Java();
1909 } else {
1910 func = OptoRuntime::monitor_notifyAll_Java();
1911 }
1912 Node* call = make_runtime_call(RC_NO_LEAF, ftype, func, nullptr, TypeRawPtr::BOTTOM, argument(0));
1913 make_slow_call_ex(call, env()->Throwable_klass(), false);
1914 return true;
1915 }
1916
1917
1918 //----------------------------inline_min_max-----------------------------------
1919 bool LibraryCallKit::inline_min_max(vmIntrinsics::ID id) {
1920 Node* a = nullptr;
1921 Node* b = nullptr;
1922 Node* n = nullptr;
1923 switch (id) {
1924 case vmIntrinsics::_min:
1925 case vmIntrinsics::_max:
1926 case vmIntrinsics::_minF:
1927 case vmIntrinsics::_maxF:
1928 case vmIntrinsics::_minF_strict:
1929 case vmIntrinsics::_maxF_strict:
1930 case vmIntrinsics::_min_strict:
1931 case vmIntrinsics::_max_strict:
1932 assert(callee()->signature()->size() == 2, "minF/maxF has 2 parameters of size 1 each.");
1933 a = argument(0);
1934 b = argument(1);
1935 break;
1936 case vmIntrinsics::_minD:
1937 case vmIntrinsics::_maxD:
1938 case vmIntrinsics::_minD_strict:
1939 case vmIntrinsics::_maxD_strict:
1940 assert(callee()->signature()->size() == 4, "minD/maxD has 2 parameters of size 2 each.");
1941 a = argument(0);
1942 b = argument(2);
1943 break;
1944 case vmIntrinsics::_minL:
1945 case vmIntrinsics::_maxL:
1946 assert(callee()->signature()->size() == 4, "minL/maxL has 2 parameters of size 2 each.");
1947 a = argument(0);
1948 b = argument(2);
1949 break;
1950 default:
1951 fatal_unexpected_iid(id);
1952 break;
1953 }
1954
1955 switch (id) {
1956 case vmIntrinsics::_min:
1957 case vmIntrinsics::_min_strict:
1958 n = new MinINode(a, b);
1959 break;
1960 case vmIntrinsics::_max:
1961 case vmIntrinsics::_max_strict:
1962 n = new MaxINode(a, b);
1963 break;
1964 case vmIntrinsics::_minF:
1965 case vmIntrinsics::_minF_strict:
1966 n = new MinFNode(a, b);
1967 break;
1968 case vmIntrinsics::_maxF:
1969 case vmIntrinsics::_maxF_strict:
1970 n = new MaxFNode(a, b);
1971 break;
1972 case vmIntrinsics::_minD:
1973 case vmIntrinsics::_minD_strict:
1974 n = new MinDNode(a, b);
1975 break;
1976 case vmIntrinsics::_maxD:
1977 case vmIntrinsics::_maxD_strict:
1978 n = new MaxDNode(a, b);
1979 break;
1980 case vmIntrinsics::_minL:
1981 n = new MinLNode(_gvn.C, a, b);
1982 break;
1983 case vmIntrinsics::_maxL:
1984 n = new MaxLNode(_gvn.C, a, b);
1985 break;
1986 default:
1987 fatal_unexpected_iid(id);
1988 break;
1989 }
1990
1991 set_result(_gvn.transform(n));
1992 return true;
1993 }
1994
1995 bool LibraryCallKit::inline_math_mathExact(Node* math, Node* test) {
1996 if (builtin_throw_too_many_traps(Deoptimization::Reason_intrinsic,
1997 env()->ArithmeticException_instance())) {
1998 // It has been already too many times, but we cannot use builtin_throw (e.g. we care about backtraces),
1999 // so let's bail out intrinsic rather than risking deopting again.
2000 return false;
2001 }
2002
2003 Node* bol = _gvn.transform( new BoolNode(test, BoolTest::overflow) );
2004 IfNode* check = create_and_map_if(control(), bol, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN);
2005 Node* fast_path = _gvn.transform( new IfFalseNode(check));
2006 Node* slow_path = _gvn.transform( new IfTrueNode(check) );
2007
2008 {
2009 PreserveJVMState pjvms(this);
2010 PreserveReexecuteState preexecs(this);
2011 jvms()->set_should_reexecute(true);
2012
2013 set_control(slow_path);
2014 set_i_o(i_o());
2015
2016 builtin_throw(Deoptimization::Reason_intrinsic,
2017 env()->ArithmeticException_instance(),
2018 /*allow_too_many_traps*/ false);
2019 }
2020
2021 set_control(fast_path);
2022 set_result(math);
2023 return true;
2024 }
2025
2026 template <typename OverflowOp>
2027 bool LibraryCallKit::inline_math_overflow(Node* arg1, Node* arg2) {
2028 typedef typename OverflowOp::MathOp MathOp;
2029
2030 MathOp* mathOp = new MathOp(arg1, arg2);
2031 Node* operation = _gvn.transform( mathOp );
2032 Node* ofcheck = _gvn.transform( new OverflowOp(arg1, arg2) );
2033 return inline_math_mathExact(operation, ofcheck);
2034 }
2035
2036 bool LibraryCallKit::inline_math_addExactI(bool is_increment) {
2037 return inline_math_overflow<OverflowAddINode>(argument(0), is_increment ? intcon(1) : argument(1));
2038 }
2039
2040 bool LibraryCallKit::inline_math_addExactL(bool is_increment) {
2041 return inline_math_overflow<OverflowAddLNode>(argument(0), is_increment ? longcon(1) : argument(2));
2042 }
2043
2044 bool LibraryCallKit::inline_math_subtractExactI(bool is_decrement) {
2045 return inline_math_overflow<OverflowSubINode>(argument(0), is_decrement ? intcon(1) : argument(1));
2046 }
2047
2048 bool LibraryCallKit::inline_math_subtractExactL(bool is_decrement) {
2049 return inline_math_overflow<OverflowSubLNode>(argument(0), is_decrement ? longcon(1) : argument(2));
2050 }
2051
2052 bool LibraryCallKit::inline_math_negateExactI() {
2053 return inline_math_overflow<OverflowSubINode>(intcon(0), argument(0));
2054 }
2055
2056 bool LibraryCallKit::inline_math_negateExactL() {
2057 return inline_math_overflow<OverflowSubLNode>(longcon(0), argument(0));
2058 }
2059
2060 bool LibraryCallKit::inline_math_multiplyExactI() {
2061 return inline_math_overflow<OverflowMulINode>(argument(0), argument(1));
2062 }
2063
2064 bool LibraryCallKit::inline_math_multiplyExactL() {
2065 return inline_math_overflow<OverflowMulLNode>(argument(0), argument(2));
2066 }
2067
2068 bool LibraryCallKit::inline_math_multiplyHigh() {
2069 set_result(_gvn.transform(new MulHiLNode(argument(0), argument(2))));
2070 return true;
2071 }
2072
2073 bool LibraryCallKit::inline_math_unsignedMultiplyHigh() {
2074 set_result(_gvn.transform(new UMulHiLNode(argument(0), argument(2))));
2075 return true;
2076 }
2077
2078 inline int
2079 LibraryCallKit::classify_unsafe_addr(Node* &base, Node* &offset, BasicType type) {
2080 const TypePtr* base_type = TypePtr::NULL_PTR;
2081 if (base != nullptr) base_type = _gvn.type(base)->isa_ptr();
2082 if (base_type == nullptr) {
2083 // Unknown type.
2084 return Type::AnyPtr;
2085 } else if (_gvn.type(base->uncast()) == TypePtr::NULL_PTR) {
2086 // Since this is a null+long form, we have to switch to a rawptr.
2087 base = _gvn.transform(new CastX2PNode(offset));
2088 offset = MakeConX(0);
2089 return Type::RawPtr;
2090 } else if (base_type->base() == Type::RawPtr) {
2091 return Type::RawPtr;
2092 } else if (base_type->isa_oopptr()) {
2093 // Base is never null => always a heap address.
2094 if (!TypePtr::NULL_PTR->higher_equal(base_type)) {
2095 return Type::OopPtr;
2096 }
2097 // Offset is small => always a heap address.
2098 const TypeX* offset_type = _gvn.type(offset)->isa_intptr_t();
2099 if (offset_type != nullptr &&
2100 base_type->offset() == 0 && // (should always be?)
2101 offset_type->_lo >= 0 &&
2102 !MacroAssembler::needs_explicit_null_check(offset_type->_hi)) {
2103 return Type::OopPtr;
2104 } else if (type == T_OBJECT) {
2105 // off heap access to an oop doesn't make any sense. Has to be on
2106 // heap.
2107 return Type::OopPtr;
2108 }
2109 // Otherwise, it might either be oop+off or null+addr.
2110 return Type::AnyPtr;
2111 } else {
2112 // No information:
2113 return Type::AnyPtr;
2114 }
2115 }
2116
2117 Node* LibraryCallKit::make_unsafe_address(Node*& base, Node* offset, BasicType type, bool can_cast) {
2118 Node* uncasted_base = base;
2119 int kind = classify_unsafe_addr(uncasted_base, offset, type);
2120 if (kind == Type::RawPtr) {
2121 return off_heap_plus_addr(uncasted_base, offset);
2122 } else if (kind == Type::AnyPtr) {
2123 assert(base == uncasted_base, "unexpected base change");
2124 if (can_cast) {
2125 if (!_gvn.type(base)->speculative_maybe_null() &&
2126 !too_many_traps(Deoptimization::Reason_speculate_null_check)) {
2127 // According to profiling, this access is always on
2128 // heap. Casting the base to not null and thus avoiding membars
2129 // around the access should allow better optimizations
2130 Node* null_ctl = top();
2131 base = null_check_oop(base, &null_ctl, true, true, true);
2132 assert(null_ctl->is_top(), "no null control here");
2133 return basic_plus_adr(base, offset);
2134 } else if (_gvn.type(base)->speculative_always_null() &&
2135 !too_many_traps(Deoptimization::Reason_speculate_null_assert)) {
2136 // According to profiling, this access is always off
2137 // heap.
2138 base = null_assert(base);
2139 Node* raw_base = _gvn.transform(new CastX2PNode(offset));
2140 offset = MakeConX(0);
2141 return off_heap_plus_addr(raw_base, offset);
2142 }
2143 }
2144 // We don't know if it's an on heap or off heap access. Fall back
2145 // to raw memory access.
2146 Node* raw = _gvn.transform(new CheckCastPPNode(control(), base, TypeRawPtr::BOTTOM));
2147 return off_heap_plus_addr(raw, offset);
2148 } else {
2149 assert(base == uncasted_base, "unexpected base change");
2150 // We know it's an on heap access so base can't be null
2151 if (TypePtr::NULL_PTR->higher_equal(_gvn.type(base))) {
2152 base = must_be_not_null(base, true);
2153 }
2154 return basic_plus_adr(base, offset);
2155 }
2156 }
2157
2158 //--------------------------inline_number_methods-----------------------------
2159 // inline int Integer.numberOfLeadingZeros(int)
2160 // inline int Long.numberOfLeadingZeros(long)
2161 //
2162 // inline int Integer.numberOfTrailingZeros(int)
2163 // inline int Long.numberOfTrailingZeros(long)
2164 //
2165 // inline int Integer.bitCount(int)
2166 // inline int Long.bitCount(long)
2167 //
2168 // inline char Character.reverseBytes(char)
2169 // inline short Short.reverseBytes(short)
2170 // inline int Integer.reverseBytes(int)
2171 // inline long Long.reverseBytes(long)
2172 bool LibraryCallKit::inline_number_methods(vmIntrinsics::ID id) {
2173 Node* arg = argument(0);
2174 Node* n = nullptr;
2175 switch (id) {
2176 case vmIntrinsics::_numberOfLeadingZeros_i: n = new CountLeadingZerosINode( arg); break;
2177 case vmIntrinsics::_numberOfLeadingZeros_l: n = new CountLeadingZerosLNode( arg); break;
2178 case vmIntrinsics::_numberOfTrailingZeros_i: n = new CountTrailingZerosINode(arg); break;
2179 case vmIntrinsics::_numberOfTrailingZeros_l: n = new CountTrailingZerosLNode(arg); break;
2180 case vmIntrinsics::_bitCount_i: n = new PopCountINode( arg); break;
2181 case vmIntrinsics::_bitCount_l: n = new PopCountLNode( arg); break;
2182 case vmIntrinsics::_reverseBytes_c: n = new ReverseBytesUSNode( arg); break;
2183 case vmIntrinsics::_reverseBytes_s: n = new ReverseBytesSNode( arg); break;
2184 case vmIntrinsics::_reverseBytes_i: n = new ReverseBytesINode( arg); break;
2185 case vmIntrinsics::_reverseBytes_l: n = new ReverseBytesLNode( arg); break;
2186 case vmIntrinsics::_reverse_i: n = new ReverseINode( arg); break;
2187 case vmIntrinsics::_reverse_l: n = new ReverseLNode( arg); break;
2188 default: fatal_unexpected_iid(id); break;
2189 }
2190 set_result(_gvn.transform(n));
2191 return true;
2192 }
2193
2194 //--------------------------inline_bitshuffle_methods-----------------------------
2195 // inline int Integer.compress(int, int)
2196 // inline int Integer.expand(int, int)
2197 // inline long Long.compress(long, long)
2198 // inline long Long.expand(long, long)
2199 bool LibraryCallKit::inline_bitshuffle_methods(vmIntrinsics::ID id) {
2200 Node* n = nullptr;
2201 switch (id) {
2202 case vmIntrinsics::_compress_i: n = new CompressBitsNode(argument(0), argument(1), TypeInt::INT); break;
2203 case vmIntrinsics::_expand_i: n = new ExpandBitsNode(argument(0), argument(1), TypeInt::INT); break;
2204 case vmIntrinsics::_compress_l: n = new CompressBitsNode(argument(0), argument(2), TypeLong::LONG); break;
2205 case vmIntrinsics::_expand_l: n = new ExpandBitsNode(argument(0), argument(2), TypeLong::LONG); break;
2206 default: fatal_unexpected_iid(id); break;
2207 }
2208 set_result(_gvn.transform(n));
2209 return true;
2210 }
2211
2212 //--------------------------inline_number_methods-----------------------------
2213 // inline int Integer.compareUnsigned(int, int)
2214 // inline int Long.compareUnsigned(long, long)
2215 bool LibraryCallKit::inline_compare_unsigned(vmIntrinsics::ID id) {
2216 Node* arg1 = argument(0);
2217 Node* arg2 = (id == vmIntrinsics::_compareUnsigned_l) ? argument(2) : argument(1);
2218 Node* n = nullptr;
2219 switch (id) {
2220 case vmIntrinsics::_compareUnsigned_i: n = new CmpU3Node(arg1, arg2); break;
2221 case vmIntrinsics::_compareUnsigned_l: n = new CmpUL3Node(arg1, arg2); break;
2222 default: fatal_unexpected_iid(id); break;
2223 }
2224 set_result(_gvn.transform(n));
2225 return true;
2226 }
2227
2228 //--------------------------inline_unsigned_divmod_methods-----------------------------
2229 // inline int Integer.divideUnsigned(int, int)
2230 // inline int Integer.remainderUnsigned(int, int)
2231 // inline long Long.divideUnsigned(long, long)
2232 // inline long Long.remainderUnsigned(long, long)
2233 bool LibraryCallKit::inline_divmod_methods(vmIntrinsics::ID id) {
2234 Node* n = nullptr;
2235 switch (id) {
2236 case vmIntrinsics::_divideUnsigned_i: {
2237 zero_check_int(argument(1));
2238 // Compile-time detect of null-exception
2239 if (stopped()) {
2240 return true; // keep the graph constructed so far
2241 }
2242 n = new UDivINode(control(), argument(0), argument(1));
2243 break;
2244 }
2245 case vmIntrinsics::_divideUnsigned_l: {
2246 zero_check_long(argument(2));
2247 // Compile-time detect of null-exception
2248 if (stopped()) {
2249 return true; // keep the graph constructed so far
2250 }
2251 n = new UDivLNode(control(), argument(0), argument(2));
2252 break;
2253 }
2254 case vmIntrinsics::_remainderUnsigned_i: {
2255 zero_check_int(argument(1));
2256 // Compile-time detect of null-exception
2257 if (stopped()) {
2258 return true; // keep the graph constructed so far
2259 }
2260 n = new UModINode(control(), argument(0), argument(1));
2261 break;
2262 }
2263 case vmIntrinsics::_remainderUnsigned_l: {
2264 zero_check_long(argument(2));
2265 // Compile-time detect of null-exception
2266 if (stopped()) {
2267 return true; // keep the graph constructed so far
2268 }
2269 n = new UModLNode(control(), argument(0), argument(2));
2270 break;
2271 }
2272 default: fatal_unexpected_iid(id); break;
2273 }
2274 set_result(_gvn.transform(n));
2275 return true;
2276 }
2277
2278 //----------------------------inline_unsafe_access----------------------------
2279
2280 const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type) {
2281 // Attempt to infer a sharper value type from the offset and base type.
2282 ciKlass* sharpened_klass = nullptr;
2283
2284 // See if it is an instance field, with an object type.
2285 if (alias_type->field() != nullptr) {
2286 if (alias_type->field()->type()->is_klass()) {
2287 sharpened_klass = alias_type->field()->type()->as_klass();
2288 }
2289 }
2290
2291 const TypeOopPtr* result = nullptr;
2292 // See if it is a narrow oop array.
2293 if (adr_type->isa_aryptr()) {
2294 if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
2295 const TypeOopPtr* elem_type = adr_type->is_aryptr()->elem()->make_oopptr();
2296 if (elem_type != nullptr && elem_type->is_loaded()) {
2297 // Sharpen the value type.
2298 result = elem_type;
2299 }
2300 }
2301 }
2302
2303 // The sharpened class might be unloaded if there is no class loader
2304 // contraint in place.
2305 if (result == nullptr && sharpened_klass != nullptr && sharpened_klass->is_loaded()) {
2306 // Sharpen the value type.
2307 result = TypeOopPtr::make_from_klass(sharpened_klass);
2308 }
2309 if (result != nullptr) {
2310 #ifndef PRODUCT
2311 if (C->print_intrinsics() || C->print_inlining()) {
2312 tty->print(" from base type: "); adr_type->dump(); tty->cr();
2313 tty->print(" sharpened value: "); result->dump(); tty->cr();
2314 }
2315 #endif
2316 }
2317 return result;
2318 }
2319
2320 DecoratorSet LibraryCallKit::mo_decorator_for_access_kind(AccessKind kind) {
2321 switch (kind) {
2322 case Relaxed:
2323 return MO_UNORDERED;
2324 case Opaque:
2325 return MO_RELAXED;
2326 case Acquire:
2327 return MO_ACQUIRE;
2328 case Release:
2329 return MO_RELEASE;
2330 case Volatile:
2331 return MO_SEQ_CST;
2332 default:
2333 ShouldNotReachHere();
2334 return 0;
2335 }
2336 }
2337
2338 bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, const AccessKind kind, const bool unaligned) {
2339 if (callee()->is_static()) return false; // caller must have the capability!
2340 DecoratorSet decorators = C2_UNSAFE_ACCESS;
2341 guarantee(!is_store || kind != Acquire, "Acquire accesses can be produced only for loads");
2342 guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
2343 assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
2344
2345 if (is_reference_type(type)) {
2346 decorators |= ON_UNKNOWN_OOP_REF;
2347 }
2348
2349 if (unaligned) {
2350 decorators |= C2_UNALIGNED;
2351 }
2352
2353 #ifndef PRODUCT
2354 {
2355 ResourceMark rm;
2356 // Check the signatures.
2357 ciSignature* sig = callee()->signature();
2358 #ifdef ASSERT
2359 if (!is_store) {
2360 // Object getReference(Object base, int/long offset), etc.
2361 BasicType rtype = sig->return_type()->basic_type();
2362 assert(rtype == type, "getter must return the expected value");
2363 assert(sig->count() == 2, "oop getter has 2 arguments");
2364 assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2365 assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2366 } else {
2367 // void putReference(Object base, int/long offset, Object x), etc.
2368 assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
2369 assert(sig->count() == 3, "oop putter has 3 arguments");
2370 assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
2371 assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
2372 BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
2373 assert(vtype == type, "putter must accept the expected value");
2374 }
2375 #endif // ASSERT
2376 }
2377 #endif //PRODUCT
2378
2379 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
2380
2381 Node* receiver = argument(0); // type: oop
2382
2383 // Build address expression.
2384 Node* heap_base_oop = top();
2385
2386 // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2387 Node* base = argument(1); // type: oop
2388 // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2389 Node* offset = argument(2); // type: long
2390 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2391 // to be plain byte offsets, which are also the same as those accepted
2392 // by oopDesc::field_addr.
2393 assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2394 "fieldOffset must be byte-scaled");
2395 // 32-bit machines ignore the high half!
2396 offset = ConvL2X(offset);
2397
2398 // Save state and restore on bailout
2399 SavedState old_state(this);
2400
2401 Node* adr = make_unsafe_address(base, offset, type, kind == Relaxed);
2402 assert(!stopped(), "Inlining of unsafe access failed: address construction stopped unexpectedly");
2403
2404 if (_gvn.type(base->uncast())->isa_ptr() == TypePtr::NULL_PTR) {
2405 if (type != T_OBJECT) {
2406 decorators |= IN_NATIVE; // off-heap primitive access
2407 } else {
2408 return false; // off-heap oop accesses are not supported
2409 }
2410 } else {
2411 heap_base_oop = base; // on-heap or mixed access
2412 }
2413
2414 // Can base be null? Otherwise, always on-heap access.
2415 bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(base));
2416
2417 if (!can_access_non_heap) {
2418 decorators |= IN_HEAP;
2419 }
2420
2421 Node* val = is_store ? argument(4) : nullptr;
2422
2423 const TypePtr* adr_type = _gvn.type(adr)->isa_ptr();
2424 if (adr_type == TypePtr::NULL_PTR) {
2425 return false; // off-heap access with zero address
2426 }
2427
2428 // Try to categorize the address.
2429 Compile::AliasType* alias_type = C->alias_type(adr_type);
2430 assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2431
2432 if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2433 alias_type->adr_type() == TypeAryPtr::RANGE) {
2434 return false; // not supported
2435 }
2436
2437 bool mismatched = false;
2438 BasicType bt = alias_type->basic_type();
2439 if (bt != T_ILLEGAL) {
2440 assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
2441 if (bt == T_BYTE && adr_type->isa_aryptr()) {
2442 // Alias type doesn't differentiate between byte[] and boolean[]).
2443 // Use address type to get the element type.
2444 bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2445 }
2446 if (is_reference_type(bt, true)) {
2447 // accessing an array field with getReference is not a mismatch
2448 bt = T_OBJECT;
2449 }
2450 if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2451 // Don't intrinsify mismatched object accesses
2452 return false;
2453 }
2454 mismatched = (bt != type);
2455 } else if (alias_type->adr_type()->isa_oopptr()) {
2456 mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
2457 }
2458
2459 old_state.discard();
2460 assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2461
2462 if (mismatched) {
2463 decorators |= C2_MISMATCHED;
2464 }
2465
2466 // First guess at the value type.
2467 const Type *value_type = Type::get_const_basic_type(type);
2468
2469 // Figure out the memory ordering.
2470 decorators |= mo_decorator_for_access_kind(kind);
2471
2472 if (!is_store && type == T_OBJECT) {
2473 const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2474 if (tjp != nullptr) {
2475 value_type = tjp;
2476 }
2477 }
2478
2479 receiver = null_check(receiver);
2480 if (stopped()) {
2481 return true;
2482 }
2483 // Heap pointers get a null-check from the interpreter,
2484 // as a courtesy. However, this is not guaranteed by Unsafe,
2485 // and it is not possible to fully distinguish unintended nulls
2486 // from intended ones in this API.
2487
2488 if (!is_store) {
2489 Node* p = nullptr;
2490 // Try to constant fold a load from a constant field
2491 ciField* field = alias_type->field();
2492 if (heap_base_oop != top() && field != nullptr && field->is_constant() && !mismatched) {
2493 // final or stable field
2494 p = make_constant_from_field(field, heap_base_oop);
2495 }
2496
2497 if (p == nullptr) { // Could not constant fold the load
2498 p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);
2499 // Normalize the value returned by getBoolean in the following cases
2500 if (type == T_BOOLEAN &&
2501 (mismatched ||
2502 heap_base_oop == top() || // - heap_base_oop is null or
2503 (can_access_non_heap && field == nullptr)) // - heap_base_oop is potentially null
2504 // and the unsafe access is made to large offset
2505 // (i.e., larger than the maximum offset necessary for any
2506 // field access)
2507 ) {
2508 IdealKit ideal = IdealKit(this);
2509 #define __ ideal.
2510 IdealVariable normalized_result(ideal);
2511 __ declarations_done();
2512 __ set(normalized_result, p);
2513 __ if_then(p, BoolTest::ne, ideal.ConI(0));
2514 __ set(normalized_result, ideal.ConI(1));
2515 ideal.end_if();
2516 final_sync(ideal);
2517 p = __ value(normalized_result);
2518 #undef __
2519 }
2520 }
2521 if (type == T_ADDRESS) {
2522 p = gvn().transform(new CastP2XNode(nullptr, p));
2523 p = ConvX2UL(p);
2524 }
2525 // The load node has the control of the preceding MemBarCPUOrder. All
2526 // following nodes will have the control of the MemBarCPUOrder inserted at
2527 // the end of this method. So, pushing the load onto the stack at a later
2528 // point is fine.
2529 set_result(p);
2530 } else {
2531 if (bt == T_ADDRESS) {
2532 // Repackage the long as a pointer.
2533 val = ConvL2X(val);
2534 val = gvn().transform(new CastX2PNode(val));
2535 }
2536 access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators);
2537 }
2538
2539 return true;
2540 }
2541
2542 //----------------------------inline_unsafe_load_store----------------------------
2543 // This method serves a couple of different customers (depending on LoadStoreKind):
2544 //
2545 // LS_cmp_swap:
2546 //
2547 // boolean compareAndSetReference(Object o, long offset, Object expected, Object x);
2548 // boolean compareAndSetInt( Object o, long offset, int expected, int x);
2549 // boolean compareAndSetLong( Object o, long offset, long expected, long x);
2550 //
2551 // LS_cmp_swap_weak:
2552 //
2553 // boolean weakCompareAndSetReference( Object o, long offset, Object expected, Object x);
2554 // boolean weakCompareAndSetReferencePlain( Object o, long offset, Object expected, Object x);
2555 // boolean weakCompareAndSetReferenceAcquire(Object o, long offset, Object expected, Object x);
2556 // boolean weakCompareAndSetReferenceRelease(Object o, long offset, Object expected, Object x);
2557 //
2558 // boolean weakCompareAndSetInt( Object o, long offset, int expected, int x);
2559 // boolean weakCompareAndSetIntPlain( Object o, long offset, int expected, int x);
2560 // boolean weakCompareAndSetIntAcquire( Object o, long offset, int expected, int x);
2561 // boolean weakCompareAndSetIntRelease( Object o, long offset, int expected, int x);
2562 //
2563 // boolean weakCompareAndSetLong( Object o, long offset, long expected, long x);
2564 // boolean weakCompareAndSetLongPlain( Object o, long offset, long expected, long x);
2565 // boolean weakCompareAndSetLongAcquire( Object o, long offset, long expected, long x);
2566 // boolean weakCompareAndSetLongRelease( Object o, long offset, long expected, long x);
2567 //
2568 // LS_cmp_exchange:
2569 //
2570 // Object compareAndExchangeReferenceVolatile(Object o, long offset, Object expected, Object x);
2571 // Object compareAndExchangeReferenceAcquire( Object o, long offset, Object expected, Object x);
2572 // Object compareAndExchangeReferenceRelease( Object o, long offset, Object expected, Object x);
2573 //
2574 // Object compareAndExchangeIntVolatile( Object o, long offset, Object expected, Object x);
2575 // Object compareAndExchangeIntAcquire( Object o, long offset, Object expected, Object x);
2576 // Object compareAndExchangeIntRelease( Object o, long offset, Object expected, Object x);
2577 //
2578 // Object compareAndExchangeLongVolatile( Object o, long offset, Object expected, Object x);
2579 // Object compareAndExchangeLongAcquire( Object o, long offset, Object expected, Object x);
2580 // Object compareAndExchangeLongRelease( Object o, long offset, Object expected, Object x);
2581 //
2582 // LS_get_add:
2583 //
2584 // int getAndAddInt( Object o, long offset, int delta)
2585 // long getAndAddLong(Object o, long offset, long delta)
2586 //
2587 // LS_get_set:
2588 //
2589 // int getAndSet(Object o, long offset, int newValue)
2590 // long getAndSet(Object o, long offset, long newValue)
2591 // Object getAndSet(Object o, long offset, Object newValue)
2592 //
2593 bool LibraryCallKit::inline_unsafe_load_store(const BasicType type, const LoadStoreKind kind, const AccessKind access_kind) {
2594 // This basic scheme here is the same as inline_unsafe_access, but
2595 // differs in enough details that combining them would make the code
2596 // overly confusing. (This is a true fact! I originally combined
2597 // them, but even I was confused by it!) As much code/comments as
2598 // possible are retained from inline_unsafe_access though to make
2599 // the correspondences clearer. - dl
2600
2601 if (callee()->is_static()) return false; // caller must have the capability!
2602
2603 DecoratorSet decorators = C2_UNSAFE_ACCESS;
2604 decorators |= mo_decorator_for_access_kind(access_kind);
2605
2606 #ifndef PRODUCT
2607 BasicType rtype;
2608 {
2609 ResourceMark rm;
2610 // Check the signatures.
2611 ciSignature* sig = callee()->signature();
2612 rtype = sig->return_type()->basic_type();
2613 switch(kind) {
2614 case LS_get_add:
2615 case LS_get_set: {
2616 // Check the signatures.
2617 #ifdef ASSERT
2618 assert(rtype == type, "get and set must return the expected type");
2619 assert(sig->count() == 3, "get and set has 3 arguments");
2620 assert(sig->type_at(0)->basic_type() == T_OBJECT, "get and set base is object");
2621 assert(sig->type_at(1)->basic_type() == T_LONG, "get and set offset is long");
2622 assert(sig->type_at(2)->basic_type() == type, "get and set must take expected type as new value/delta");
2623 assert(access_kind == Volatile, "mo is not passed to intrinsic nodes in current implementation");
2624 #endif // ASSERT
2625 break;
2626 }
2627 case LS_cmp_swap:
2628 case LS_cmp_swap_weak: {
2629 // Check the signatures.
2630 #ifdef ASSERT
2631 assert(rtype == T_BOOLEAN, "CAS must return boolean");
2632 assert(sig->count() == 4, "CAS has 4 arguments");
2633 assert(sig->type_at(0)->basic_type() == T_OBJECT, "CAS base is object");
2634 assert(sig->type_at(1)->basic_type() == T_LONG, "CAS offset is long");
2635 #endif // ASSERT
2636 break;
2637 }
2638 case LS_cmp_exchange: {
2639 // Check the signatures.
2640 #ifdef ASSERT
2641 assert(rtype == type, "CAS must return the expected type");
2642 assert(sig->count() == 4, "CAS has 4 arguments");
2643 assert(sig->type_at(0)->basic_type() == T_OBJECT, "CAS base is object");
2644 assert(sig->type_at(1)->basic_type() == T_LONG, "CAS offset is long");
2645 #endif // ASSERT
2646 break;
2647 }
2648 default:
2649 ShouldNotReachHere();
2650 }
2651 }
2652 #endif //PRODUCT
2653
2654 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
2655
2656 // Get arguments:
2657 Node* receiver = nullptr;
2658 Node* base = nullptr;
2659 Node* offset = nullptr;
2660 Node* oldval = nullptr;
2661 Node* newval = nullptr;
2662 switch(kind) {
2663 case LS_cmp_swap:
2664 case LS_cmp_swap_weak:
2665 case LS_cmp_exchange: {
2666 const bool two_slot_type = type2size[type] == 2;
2667 receiver = argument(0); // type: oop
2668 base = argument(1); // type: oop
2669 offset = argument(2); // type: long
2670 oldval = argument(4); // type: oop, int, or long
2671 newval = argument(two_slot_type ? 6 : 5); // type: oop, int, or long
2672 break;
2673 }
2674 case LS_get_add:
2675 case LS_get_set: {
2676 receiver = argument(0); // type: oop
2677 base = argument(1); // type: oop
2678 offset = argument(2); // type: long
2679 oldval = nullptr;
2680 newval = argument(4); // type: oop, int, or long
2681 break;
2682 }
2683 default:
2684 ShouldNotReachHere();
2685 }
2686
2687 // Build field offset expression.
2688 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2689 // to be plain byte offsets, which are also the same as those accepted
2690 // by oopDesc::field_addr.
2691 assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
2692 // 32-bit machines ignore the high half of long offsets
2693 offset = ConvL2X(offset);
2694 // Save state and restore on bailout
2695 SavedState old_state(this);
2696 Node* adr = make_unsafe_address(base, offset,type, false);
2697 const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2698
2699 Compile::AliasType* alias_type = C->alias_type(adr_type);
2700 BasicType bt = alias_type->basic_type();
2701 if (bt != T_ILLEGAL &&
2702 (is_reference_type(bt) != (type == T_OBJECT))) {
2703 // Don't intrinsify mismatched object accesses.
2704 return false;
2705 }
2706
2707 old_state.discard();
2708
2709 // For CAS, unlike inline_unsafe_access, there seems no point in
2710 // trying to refine types. Just use the coarse types here.
2711 assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2712 const Type *value_type = Type::get_const_basic_type(type);
2713
2714 switch (kind) {
2715 case LS_get_set:
2716 case LS_cmp_exchange: {
2717 if (type == T_OBJECT) {
2718 const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2719 if (tjp != nullptr) {
2720 value_type = tjp;
2721 }
2722 }
2723 break;
2724 }
2725 case LS_cmp_swap:
2726 case LS_cmp_swap_weak:
2727 case LS_get_add:
2728 break;
2729 default:
2730 ShouldNotReachHere();
2731 }
2732
2733 // Null check receiver.
2734 receiver = null_check(receiver);
2735 if (stopped()) {
2736 return true;
2737 }
2738
2739 int alias_idx = C->get_alias_index(adr_type);
2740
2741 if (is_reference_type(type)) {
2742 decorators |= IN_HEAP | ON_UNKNOWN_OOP_REF;
2743
2744 // Transformation of a value which could be null pointer (CastPP #null)
2745 // could be delayed during Parse (for example, in adjust_map_after_if()).
2746 // Execute transformation here to avoid barrier generation in such case.
2747 if (_gvn.type(newval) == TypePtr::NULL_PTR)
2748 newval = _gvn.makecon(TypePtr::NULL_PTR);
2749
2750 if (oldval != nullptr && _gvn.type(oldval) == TypePtr::NULL_PTR) {
2751 // Refine the value to a null constant, when it is known to be null
2752 oldval = _gvn.makecon(TypePtr::NULL_PTR);
2753 }
2754 }
2755
2756 Node* result = nullptr;
2757 switch (kind) {
2758 case LS_cmp_exchange: {
2759 result = access_atomic_cmpxchg_val_at(base, adr, adr_type, alias_idx,
2760 oldval, newval, value_type, type, decorators);
2761 break;
2762 }
2763 case LS_cmp_swap_weak:
2764 decorators |= C2_WEAK_CMPXCHG;
2765 case LS_cmp_swap: {
2766 result = access_atomic_cmpxchg_bool_at(base, adr, adr_type, alias_idx,
2767 oldval, newval, value_type, type, decorators);
2768 break;
2769 }
2770 case LS_get_set: {
2771 result = access_atomic_xchg_at(base, adr, adr_type, alias_idx,
2772 newval, value_type, type, decorators);
2773 break;
2774 }
2775 case LS_get_add: {
2776 result = access_atomic_add_at(base, adr, adr_type, alias_idx,
2777 newval, value_type, type, decorators);
2778 break;
2779 }
2780 default:
2781 ShouldNotReachHere();
2782 }
2783
2784 assert(type2size[result->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
2785 set_result(result);
2786 return true;
2787 }
2788
2789 bool LibraryCallKit::inline_unsafe_fence(vmIntrinsics::ID id) {
2790 // Regardless of form, don't allow previous ld/st to move down,
2791 // then issue acquire, release, or volatile mem_bar.
2792 insert_mem_bar(Op_MemBarCPUOrder);
2793 switch(id) {
2794 case vmIntrinsics::_loadFence:
2795 insert_mem_bar(Op_LoadFence);
2796 return true;
2797 case vmIntrinsics::_storeFence:
2798 insert_mem_bar(Op_StoreFence);
2799 return true;
2800 case vmIntrinsics::_storeStoreFence:
2801 insert_mem_bar(Op_StoreStoreFence);
2802 return true;
2803 case vmIntrinsics::_fullFence:
2804 insert_mem_bar(Op_MemBarFull);
2805 return true;
2806 default:
2807 fatal_unexpected_iid(id);
2808 return false;
2809 }
2810 }
2811
2812 bool LibraryCallKit::inline_onspinwait() {
2813 insert_mem_bar(Op_OnSpinWait);
2814 return true;
2815 }
2816
2817 bool LibraryCallKit::klass_needs_init_guard(Node* kls) {
2818 if (!kls->is_Con()) {
2819 return true;
2820 }
2821 const TypeInstKlassPtr* klsptr = kls->bottom_type()->isa_instklassptr();
2822 if (klsptr == nullptr) {
2823 return true;
2824 }
2825 ciInstanceKlass* ik = klsptr->instance_klass();
2826 // don't need a guard for a klass that is already initialized
2827 return !ik->is_initialized();
2828 }
2829
2830 //----------------------------inline_unsafe_writeback0-------------------------
2831 // public native void Unsafe.writeback0(long address)
2832 bool LibraryCallKit::inline_unsafe_writeback0() {
2833 if (!Matcher::has_match_rule(Op_CacheWB)) {
2834 return false;
2835 }
2836 #ifndef PRODUCT
2837 assert(Matcher::has_match_rule(Op_CacheWBPreSync), "found match rule for CacheWB but not CacheWBPreSync");
2838 assert(Matcher::has_match_rule(Op_CacheWBPostSync), "found match rule for CacheWB but not CacheWBPostSync");
2839 ciSignature* sig = callee()->signature();
2840 assert(sig->type_at(0)->basic_type() == T_LONG, "Unsafe_writeback0 address is long!");
2841 #endif
2842 null_check_receiver(); // null-check, then ignore
2843 Node *addr = argument(1);
2844 addr = new CastX2PNode(addr);
2845 addr = _gvn.transform(addr);
2846 Node *flush = new CacheWBNode(control(), memory(TypeRawPtr::BOTTOM), addr);
2847 flush = _gvn.transform(flush);
2848 set_memory(flush, TypeRawPtr::BOTTOM);
2849 return true;
2850 }
2851
2852 //----------------------------inline_unsafe_writeback0-------------------------
2853 // public native void Unsafe.writeback0(long address)
2854 bool LibraryCallKit::inline_unsafe_writebackSync0(bool is_pre) {
2855 if (is_pre && !Matcher::has_match_rule(Op_CacheWBPreSync)) {
2856 return false;
2857 }
2858 if (!is_pre && !Matcher::has_match_rule(Op_CacheWBPostSync)) {
2859 return false;
2860 }
2861 #ifndef PRODUCT
2862 assert(Matcher::has_match_rule(Op_CacheWB),
2863 (is_pre ? "found match rule for CacheWBPreSync but not CacheWB"
2864 : "found match rule for CacheWBPostSync but not CacheWB"));
2865
2866 #endif
2867 null_check_receiver(); // null-check, then ignore
2868 Node *sync;
2869 if (is_pre) {
2870 sync = new CacheWBPreSyncNode(control(), memory(TypeRawPtr::BOTTOM));
2871 } else {
2872 sync = new CacheWBPostSyncNode(control(), memory(TypeRawPtr::BOTTOM));
2873 }
2874 sync = _gvn.transform(sync);
2875 set_memory(sync, TypeRawPtr::BOTTOM);
2876 return true;
2877 }
2878
2879 //----------------------------inline_unsafe_allocate---------------------------
2880 // public native Object Unsafe.allocateInstance(Class<?> cls);
2881 bool LibraryCallKit::inline_unsafe_allocate() {
2882
2883 #if INCLUDE_JVMTI
2884 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
2885 return false;
2886 }
2887 #endif //INCLUDE_JVMTI
2888
2889 if (callee()->is_static()) return false; // caller must have the capability!
2890
2891 null_check_receiver(); // null-check, then ignore
2892 Node* cls = null_check(argument(1));
2893 if (stopped()) return true;
2894
2895 Node* kls = load_klass_from_mirror(cls, false, nullptr, 0);
2896 kls = null_check(kls);
2897 if (stopped()) return true; // argument was like int.class
2898
2899 #if INCLUDE_JVMTI
2900 // Don't try to access new allocated obj in the intrinsic.
2901 // It causes perfomance issues even when jvmti event VmObjectAlloc is disabled.
2902 // Deoptimize and allocate in interpreter instead.
2903 Node* addr = makecon(TypeRawPtr::make((address) &JvmtiExport::_should_notify_object_alloc));
2904 Node* should_post_vm_object_alloc = make_load(this->control(), addr, TypeInt::INT, T_INT, MemNode::unordered);
2905 Node* chk = _gvn.transform(new CmpINode(should_post_vm_object_alloc, intcon(0)));
2906 Node* tst = _gvn.transform(new BoolNode(chk, BoolTest::eq));
2907 {
2908 BuildCutout unless(this, tst, PROB_MAX);
2909 uncommon_trap(Deoptimization::Reason_intrinsic,
2910 Deoptimization::Action_make_not_entrant);
2911 }
2912 if (stopped()) {
2913 return true;
2914 }
2915 #endif //INCLUDE_JVMTI
2916
2917 Node* test = nullptr;
2918 if (LibraryCallKit::klass_needs_init_guard(kls)) {
2919 // Note: The argument might still be an illegal value like
2920 // Serializable.class or Object[].class. The runtime will handle it.
2921 // But we must make an explicit check for initialization.
2922 Node* insp = off_heap_plus_addr(kls, in_bytes(InstanceKlass::init_state_offset()));
2923 // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
2924 // can generate code to load it as unsigned byte.
2925 Node* inst = make_load(nullptr, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::acquire);
2926 Node* bits = intcon(InstanceKlass::fully_initialized);
2927 test = _gvn.transform(new SubINode(inst, bits));
2928 // The 'test' is non-zero if we need to take a slow path.
2929 }
2930
2931 Node* obj = new_instance(kls, test);
2932 set_result(obj);
2933 return true;
2934 }
2935
2936 //------------------------inline_native_time_funcs--------------
2937 // inline code for System.currentTimeMillis() and System.nanoTime()
2938 // these have the same type and signature
2939 bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
2940 const TypeFunc* tf = OptoRuntime::void_long_Type();
2941 const TypePtr* no_memory_effects = nullptr;
2942 Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
2943 Node* value = _gvn.transform(new ProjNode(time, TypeFunc::Parms+0));
2944 #ifdef ASSERT
2945 Node* value_top = _gvn.transform(new ProjNode(time, TypeFunc::Parms+1));
2946 assert(value_top == top(), "second value must be top");
2947 #endif
2948 set_result(value);
2949 return true;
2950 }
2951
2952 //--------------------inline_native_vthread_start_transition--------------------
2953 // inline void startTransition(boolean is_mount);
2954 // inline void startFinalTransition();
2955 // Pseudocode of implementation:
2956 //
2957 // java_lang_Thread::set_is_in_vthread_transition(vthread, true);
2958 // carrier->set_is_in_vthread_transition(true);
2959 // OrderAccess::storeload();
2960 // int disable_requests = java_lang_Thread::vthread_transition_disable_count(vthread)
2961 // + global_vthread_transition_disable_count();
2962 // if (disable_requests > 0) {
2963 // slow path: runtime call
2964 // }
2965 bool LibraryCallKit::inline_native_vthread_start_transition(address funcAddr, const char* funcName, bool is_final_transition) {
2966 Node* vt_oop = must_be_not_null(argument(0), true); // VirtualThread this argument
2967 IdealKit ideal(this);
2968
2969 Node* thread = ideal.thread();
2970 Node* jt_addr = off_heap_plus_addr(thread, in_bytes(JavaThread::is_in_vthread_transition_offset()));
2971 Node* vt_addr = basic_plus_adr(vt_oop, java_lang_Thread::is_in_vthread_transition_offset());
2972 access_store_at(nullptr, jt_addr, _gvn.type(jt_addr)->is_ptr(), ideal.ConI(1), TypeInt::BOOL, T_BOOLEAN, IN_NATIVE | MO_UNORDERED);
2973 access_store_at(nullptr, vt_addr, _gvn.type(vt_addr)->is_ptr(), ideal.ConI(1), TypeInt::BOOL, T_BOOLEAN, IN_NATIVE | MO_UNORDERED);
2974 insert_mem_bar(Op_MemBarStoreLoad);
2975 ideal.sync_kit(this);
2976
2977 Node* global_disable_addr = makecon(TypeRawPtr::make((address)MountUnmountDisabler::global_vthread_transition_disable_count_address()));
2978 Node* global_disable = ideal.load(ideal.ctrl(), global_disable_addr, TypeInt::INT, T_INT, Compile::AliasIdxRaw, true /*require_atomic_access*/);
2979 Node* vt_disable_addr = basic_plus_adr(vt_oop, java_lang_Thread::vthread_transition_disable_count_offset());
2980 const TypePtr* vt_disable_addr_t = _gvn.type(vt_disable_addr)->is_ptr();
2981 Node* vt_disable = ideal.load(ideal.ctrl(), vt_disable_addr, TypeInt::INT, T_INT, C->get_alias_index(vt_disable_addr_t), true /*require_atomic_access*/);
2982 Node* disabled = _gvn.transform(new AddINode(global_disable, vt_disable));
2983
2984 ideal.if_then(disabled, BoolTest::ne, ideal.ConI(0)); {
2985 sync_kit(ideal);
2986 Node* is_mount = is_final_transition ? ideal.ConI(0) : argument(1);
2987 const TypeFunc* tf = OptoRuntime::vthread_transition_Type();
2988 make_runtime_call(RC_NO_LEAF, tf, funcAddr, funcName, TypePtr::BOTTOM, vt_oop, is_mount);
2989 ideal.sync_kit(this);
2990 }
2991 ideal.end_if();
2992
2993 final_sync(ideal);
2994 return true;
2995 }
2996
2997 bool LibraryCallKit::inline_native_vthread_end_transition(address funcAddr, const char* funcName, bool is_first_transition) {
2998 Node* vt_oop = must_be_not_null(argument(0), true); // VirtualThread this argument
2999 IdealKit ideal(this);
3000
3001 Node* _notify_jvmti_addr = makecon(TypeRawPtr::make((address)MountUnmountDisabler::notify_jvmti_events_address()));
3002 Node* _notify_jvmti = ideal.load(ideal.ctrl(), _notify_jvmti_addr, TypeInt::BOOL, T_BOOLEAN, Compile::AliasIdxRaw);
3003
3004 ideal.if_then(_notify_jvmti, BoolTest::eq, ideal.ConI(1)); {
3005 sync_kit(ideal);
3006 Node* is_mount = is_first_transition ? ideal.ConI(1) : argument(1);
3007 const TypeFunc* tf = OptoRuntime::vthread_transition_Type();
3008 make_runtime_call(RC_NO_LEAF, tf, funcAddr, funcName, TypePtr::BOTTOM, vt_oop, is_mount);
3009 ideal.sync_kit(this);
3010 } ideal.else_(); {
3011 Node* thread = ideal.thread();
3012 Node* jt_addr = off_heap_plus_addr(thread, in_bytes(JavaThread::is_in_vthread_transition_offset()));
3013 Node* vt_addr = basic_plus_adr(vt_oop, java_lang_Thread::is_in_vthread_transition_offset());
3014
3015 sync_kit(ideal);
3016 access_store_at(nullptr, jt_addr, _gvn.type(jt_addr)->is_ptr(), ideal.ConI(0), TypeInt::BOOL, T_BOOLEAN, IN_NATIVE | MO_UNORDERED);
3017 access_store_at(nullptr, vt_addr, _gvn.type(vt_addr)->is_ptr(), ideal.ConI(0), TypeInt::BOOL, T_BOOLEAN, IN_NATIVE | MO_UNORDERED);
3018 ideal.sync_kit(this);
3019 } ideal.end_if();
3020
3021 final_sync(ideal);
3022 return true;
3023 }
3024
3025 #if INCLUDE_JVMTI
3026
3027 // Always update the is_disable_suspend bit.
3028 bool LibraryCallKit::inline_native_notify_jvmti_sync() {
3029 if (!DoJVMTIVirtualThreadTransitions) {
3030 return true;
3031 }
3032 IdealKit ideal(this);
3033
3034 {
3035 // unconditionally update the is_disable_suspend bit in current JavaThread
3036 Node* thread = ideal.thread();
3037 Node* arg = argument(0); // argument for notification
3038 Node* addr = off_heap_plus_addr(thread, in_bytes(JavaThread::is_disable_suspend_offset()));
3039 const TypePtr *addr_type = _gvn.type(addr)->isa_ptr();
3040
3041 sync_kit(ideal);
3042 access_store_at(nullptr, addr, addr_type, arg, _gvn.type(arg), T_BOOLEAN, IN_NATIVE | MO_UNORDERED);
3043 ideal.sync_kit(this);
3044 }
3045 final_sync(ideal);
3046
3047 return true;
3048 }
3049
3050 #endif // INCLUDE_JVMTI
3051
3052 #ifdef JFR_HAVE_INTRINSICS
3053
3054 /**
3055 * if oop->klass != null
3056 * // normal class
3057 * epoch = _epoch_state ? 2 : 1
3058 * if oop->klass->trace_id & ((epoch << META_SHIFT) | epoch)) != epoch {
3059 * ... // enter slow path when the klass is first recorded or the epoch of JFR shifts
3060 * }
3061 * id = oop->klass->trace_id >> TRACE_ID_SHIFT // normal class path
3062 * else
3063 * // primitive class
3064 * if oop->array_klass != null
3065 * id = (oop->array_klass->trace_id >> TRACE_ID_SHIFT) + 1 // primitive class path
3066 * else
3067 * id = LAST_TYPE_ID + 1 // void class path
3068 * if (!signaled)
3069 * signaled = true
3070 */
3071 bool LibraryCallKit::inline_native_classID() {
3072 Node* cls = argument(0);
3073
3074 IdealKit ideal(this);
3075 #define __ ideal.
3076 IdealVariable result(ideal); __ declarations_done();
3077 Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, immutable_memory(),
3078 basic_plus_adr(cls, java_lang_Class::klass_offset()),
3079 TypeRawPtr::BOTTOM, TypeInstKlassPtr::OBJECT_OR_NULL));
3080
3081
3082 __ if_then(kls, BoolTest::ne, null()); {
3083 Node* kls_trace_id_addr = basic_plus_adr(kls, in_bytes(KLASS_TRACE_ID_OFFSET));
3084 Node* kls_trace_id_raw = ideal.load(ideal.ctrl(), kls_trace_id_addr,TypeLong::LONG, T_LONG, Compile::AliasIdxRaw);
3085
3086 Node* epoch_address = makecon(TypeRawPtr::make(JfrIntrinsicSupport::epoch_address()));
3087 Node* epoch = ideal.load(ideal.ctrl(), epoch_address, TypeInt::BOOL, T_BOOLEAN, Compile::AliasIdxRaw);
3088 epoch = _gvn.transform(new LShiftLNode(longcon(1), epoch));
3089 Node* mask = _gvn.transform(new LShiftLNode(epoch, intcon(META_SHIFT)));
3090 mask = _gvn.transform(new OrLNode(mask, epoch));
3091 Node* kls_trace_id_raw_and_mask = _gvn.transform(new AndLNode(kls_trace_id_raw, mask));
3092
3093 float unlikely = PROB_UNLIKELY(0.999);
3094 __ if_then(kls_trace_id_raw_and_mask, BoolTest::ne, epoch, unlikely); {
3095 sync_kit(ideal);
3096 make_runtime_call(RC_LEAF,
3097 OptoRuntime::class_id_load_barrier_Type(),
3098 CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::load_barrier),
3099 "class id load barrier",
3100 TypePtr::BOTTOM,
3101 kls);
3102 ideal.sync_kit(this);
3103 } __ end_if();
3104
3105 ideal.set(result, _gvn.transform(new URShiftLNode(kls_trace_id_raw, ideal.ConI(TRACE_ID_SHIFT))));
3106 } __ else_(); {
3107 Node* array_kls = _gvn.transform(LoadKlassNode::make(_gvn, immutable_memory(),
3108 basic_plus_adr(cls, java_lang_Class::array_klass_offset()),
3109 TypeRawPtr::BOTTOM, TypeInstKlassPtr::OBJECT_OR_NULL));
3110 __ if_then(array_kls, BoolTest::ne, null()); {
3111 Node* array_kls_trace_id_addr = basic_plus_adr(array_kls, in_bytes(KLASS_TRACE_ID_OFFSET));
3112 Node* array_kls_trace_id_raw = ideal.load(ideal.ctrl(), array_kls_trace_id_addr, TypeLong::LONG, T_LONG, Compile::AliasIdxRaw);
3113 Node* array_kls_trace_id = _gvn.transform(new URShiftLNode(array_kls_trace_id_raw, ideal.ConI(TRACE_ID_SHIFT)));
3114 ideal.set(result, _gvn.transform(new AddLNode(array_kls_trace_id, longcon(1))));
3115 } __ else_(); {
3116 // void class case
3117 ideal.set(result, longcon(LAST_TYPE_ID + 1));
3118 } __ end_if();
3119
3120 Node* signaled_flag_address = makecon(TypeRawPtr::make(JfrIntrinsicSupport::signal_address()));
3121 Node* signaled = ideal.load(ideal.ctrl(), signaled_flag_address, TypeInt::BOOL, T_BOOLEAN, Compile::AliasIdxRaw, true, MemNode::acquire);
3122 __ if_then(signaled, BoolTest::ne, ideal.ConI(1)); {
3123 ideal.store(ideal.ctrl(), signaled_flag_address, ideal.ConI(1), T_BOOLEAN, Compile::AliasIdxRaw, MemNode::release, true);
3124 } __ end_if();
3125 } __ end_if();
3126
3127 final_sync(ideal);
3128 set_result(ideal.value(result));
3129 #undef __
3130 return true;
3131 }
3132
3133 //------------------------inline_native_jvm_commit------------------
3134 bool LibraryCallKit::inline_native_jvm_commit() {
3135 enum { _true_path = 1, _false_path = 2, PATH_LIMIT };
3136
3137 // Save input memory and i_o state.
3138 Node* input_memory_state = reset_memory();
3139 set_all_memory(input_memory_state);
3140 Node* input_io_state = i_o();
3141
3142 // TLS.
3143 Node* tls_ptr = _gvn.transform(new ThreadLocalNode());
3144 // Jfr java buffer.
3145 Node* java_buffer_offset = _gvn.transform(AddPNode::make_off_heap(tls_ptr, MakeConX(in_bytes(JAVA_BUFFER_OFFSET_JFR))));
3146 Node* java_buffer = _gvn.transform(new LoadPNode(control(), input_memory_state, java_buffer_offset, TypePtr::BOTTOM, TypeRawPtr::NOTNULL, MemNode::unordered));
3147 Node* java_buffer_pos_offset = _gvn.transform(AddPNode::make_off_heap(java_buffer, MakeConX(in_bytes(JFR_BUFFER_POS_OFFSET))));
3148
3149 // Load the current value of the notified field in the JfrThreadLocal.
3150 Node* notified_offset = off_heap_plus_addr(tls_ptr, in_bytes(NOTIFY_OFFSET_JFR));
3151 Node* notified = make_load(control(), notified_offset, TypeInt::BOOL, T_BOOLEAN, MemNode::unordered);
3152
3153 // Test for notification.
3154 Node* notified_cmp = _gvn.transform(new CmpINode(notified, _gvn.intcon(1)));
3155 Node* test_notified = _gvn.transform(new BoolNode(notified_cmp, BoolTest::eq));
3156 IfNode* iff_notified = create_and_map_if(control(), test_notified, PROB_MIN, COUNT_UNKNOWN);
3157
3158 // True branch, is notified.
3159 Node* is_notified = _gvn.transform(new IfTrueNode(iff_notified));
3160 set_control(is_notified);
3161
3162 // Reset notified state.
3163 store_to_memory(control(), notified_offset, _gvn.intcon(0), T_BOOLEAN, MemNode::unordered);
3164 Node* notified_reset_memory = reset_memory();
3165
3166 // Iff notified, the return address of the commit method is the current position of the backing java buffer. This is used to reset the event writer.
3167 Node* current_pos_X = _gvn.transform(new LoadXNode(control(), input_memory_state, java_buffer_pos_offset, TypeRawPtr::NOTNULL, TypeX_X, MemNode::unordered));
3168 // Convert the machine-word to a long.
3169 Node* current_pos = ConvX2L(current_pos_X);
3170
3171 // False branch, not notified.
3172 Node* not_notified = _gvn.transform(new IfFalseNode(iff_notified));
3173 set_control(not_notified);
3174 set_all_memory(input_memory_state);
3175
3176 // Arg is the next position as a long.
3177 Node* arg = argument(0);
3178 // Convert long to machine-word.
3179 Node* next_pos_X = ConvL2X(arg);
3180
3181 // Store the next_position to the underlying jfr java buffer.
3182 store_to_memory(control(), java_buffer_pos_offset, next_pos_X, LP64_ONLY(T_LONG) NOT_LP64(T_INT), MemNode::release);
3183
3184 Node* commit_memory = reset_memory();
3185 set_all_memory(commit_memory);
3186
3187 // Now load the flags from off the java buffer and decide if the buffer is a lease. If so, it needs to be returned post-commit.
3188 Node* java_buffer_flags_offset = _gvn.transform(AddPNode::make_off_heap(java_buffer, MakeConX(in_bytes(JFR_BUFFER_FLAGS_OFFSET))));
3189 Node* flags = make_load(control(), java_buffer_flags_offset, TypeInt::UBYTE, T_BYTE, MemNode::unordered);
3190 Node* lease_constant = _gvn.intcon(4);
3191
3192 // And flags with lease constant.
3193 Node* lease = _gvn.transform(new AndINode(flags, lease_constant));
3194
3195 // Branch on lease to conditionalize returning the leased java buffer.
3196 Node* lease_cmp = _gvn.transform(new CmpINode(lease, lease_constant));
3197 Node* test_lease = _gvn.transform(new BoolNode(lease_cmp, BoolTest::eq));
3198 IfNode* iff_lease = create_and_map_if(control(), test_lease, PROB_MIN, COUNT_UNKNOWN);
3199
3200 // False branch, not a lease.
3201 Node* not_lease = _gvn.transform(new IfFalseNode(iff_lease));
3202
3203 // True branch, is lease.
3204 Node* is_lease = _gvn.transform(new IfTrueNode(iff_lease));
3205 set_control(is_lease);
3206
3207 // Make a runtime call, which can safepoint, to return the leased buffer. This updates both the JfrThreadLocal and the Java event writer oop.
3208 Node* call_return_lease = make_runtime_call(RC_NO_LEAF,
3209 OptoRuntime::void_void_Type(),
3210 SharedRuntime::jfr_return_lease(),
3211 "return_lease", TypePtr::BOTTOM);
3212 Node* call_return_lease_control = _gvn.transform(new ProjNode(call_return_lease, TypeFunc::Control));
3213
3214 RegionNode* lease_compare_rgn = new RegionNode(PATH_LIMIT);
3215 record_for_igvn(lease_compare_rgn);
3216 PhiNode* lease_compare_mem = new PhiNode(lease_compare_rgn, Type::MEMORY, TypePtr::BOTTOM);
3217 record_for_igvn(lease_compare_mem);
3218 PhiNode* lease_compare_io = new PhiNode(lease_compare_rgn, Type::ABIO);
3219 record_for_igvn(lease_compare_io);
3220 PhiNode* lease_result_value = new PhiNode(lease_compare_rgn, TypeLong::LONG);
3221 record_for_igvn(lease_result_value);
3222
3223 // Update control and phi nodes.
3224 lease_compare_rgn->init_req(_true_path, call_return_lease_control);
3225 lease_compare_rgn->init_req(_false_path, not_lease);
3226
3227 lease_compare_mem->init_req(_true_path, reset_memory());
3228 lease_compare_mem->init_req(_false_path, commit_memory);
3229
3230 lease_compare_io->init_req(_true_path, i_o());
3231 lease_compare_io->init_req(_false_path, input_io_state);
3232
3233 lease_result_value->init_req(_true_path, _gvn.longcon(0)); // if the lease was returned, return 0L.
3234 lease_result_value->init_req(_false_path, arg); // if not lease, return new updated position.
3235
3236 RegionNode* result_rgn = new RegionNode(PATH_LIMIT);
3237 PhiNode* result_mem = new PhiNode(result_rgn, Type::MEMORY, TypePtr::BOTTOM);
3238 PhiNode* result_io = new PhiNode(result_rgn, Type::ABIO);
3239 PhiNode* result_value = new PhiNode(result_rgn, TypeLong::LONG);
3240
3241 // Update control and phi nodes.
3242 result_rgn->init_req(_true_path, is_notified);
3243 result_rgn->init_req(_false_path, _gvn.transform(lease_compare_rgn));
3244
3245 result_mem->init_req(_true_path, notified_reset_memory);
3246 result_mem->init_req(_false_path, _gvn.transform(lease_compare_mem));
3247
3248 result_io->init_req(_true_path, input_io_state);
3249 result_io->init_req(_false_path, _gvn.transform(lease_compare_io));
3250
3251 result_value->init_req(_true_path, current_pos);
3252 result_value->init_req(_false_path, _gvn.transform(lease_result_value));
3253
3254 // Set output state.
3255 set_control(_gvn.transform(result_rgn));
3256 set_all_memory(_gvn.transform(result_mem));
3257 set_i_o(_gvn.transform(result_io));
3258 set_result(result_rgn, result_value);
3259 return true;
3260 }
3261
3262 /*
3263 * The intrinsic is a model of this pseudo-code:
3264 *
3265 * JfrThreadLocal* const tl = Thread::jfr_thread_local()
3266 * jobject h_event_writer = tl->java_event_writer();
3267 * if (h_event_writer == nullptr) {
3268 * return nullptr;
3269 * }
3270 * oop threadObj = Thread::threadObj();
3271 * oop vthread = java_lang_Thread::vthread(threadObj);
3272 * traceid tid;
3273 * bool pinVirtualThread;
3274 * bool excluded;
3275 * if (vthread != threadObj) { // i.e. current thread is virtual
3276 * tid = java_lang_Thread::tid(vthread);
3277 * u2 vthread_epoch_raw = java_lang_Thread::jfr_epoch(vthread);
3278 * pinVirtualThread = VMContinuations;
3279 * excluded = vthread_epoch_raw & excluded_mask;
3280 * if (!excluded) {
3281 * traceid current_epoch = JfrTraceIdEpoch::current_generation();
3282 * u2 vthread_epoch = vthread_epoch_raw & epoch_mask;
3283 * if (vthread_epoch != current_epoch) {
3284 * write_checkpoint();
3285 * }
3286 * }
3287 * } else {
3288 * tid = java_lang_Thread::tid(threadObj);
3289 * u2 thread_epoch_raw = java_lang_Thread::jfr_epoch(threadObj);
3290 * pinVirtualThread = false;
3291 * excluded = thread_epoch_raw & excluded_mask;
3292 * }
3293 * oop event_writer = JNIHandles::resolve_non_null(h_event_writer);
3294 * traceid tid_in_event_writer = getField(event_writer, "threadID");
3295 * if (tid_in_event_writer != tid) {
3296 * setField(event_writer, "pinVirtualThread", pinVirtualThread);
3297 * setField(event_writer, "excluded", excluded);
3298 * setField(event_writer, "threadID", tid);
3299 * }
3300 * return event_writer
3301 */
3302 bool LibraryCallKit::inline_native_getEventWriter() {
3303 enum { _true_path = 1, _false_path = 2, PATH_LIMIT };
3304
3305 // Save input memory and i_o state.
3306 Node* input_memory_state = reset_memory();
3307 set_all_memory(input_memory_state);
3308 Node* input_io_state = i_o();
3309
3310 // The most significant bit of the u2 is used to denote thread exclusion
3311 Node* excluded_shift = _gvn.intcon(15);
3312 Node* excluded_mask = _gvn.intcon(1 << 15);
3313 // The epoch generation is the range [1-32767]
3314 Node* epoch_mask = _gvn.intcon(32767);
3315
3316 // TLS
3317 Node* tls_ptr = _gvn.transform(new ThreadLocalNode());
3318
3319 // Load the address of java event writer jobject handle from the jfr_thread_local structure.
3320 Node* jobj_ptr = off_heap_plus_addr(tls_ptr, in_bytes(THREAD_LOCAL_WRITER_OFFSET_JFR));
3321
3322 // Load the eventwriter jobject handle.
3323 Node* jobj = make_load(control(), jobj_ptr, TypeRawPtr::BOTTOM, T_ADDRESS, MemNode::unordered);
3324
3325 // Null check the jobject handle.
3326 Node* jobj_cmp_null = _gvn.transform(new CmpPNode(jobj, null()));
3327 Node* test_jobj_not_equal_null = _gvn.transform(new BoolNode(jobj_cmp_null, BoolTest::ne));
3328 IfNode* iff_jobj_not_equal_null = create_and_map_if(control(), test_jobj_not_equal_null, PROB_MAX, COUNT_UNKNOWN);
3329
3330 // False path, jobj is null.
3331 Node* jobj_is_null = _gvn.transform(new IfFalseNode(iff_jobj_not_equal_null));
3332
3333 // True path, jobj is not null.
3334 Node* jobj_is_not_null = _gvn.transform(new IfTrueNode(iff_jobj_not_equal_null));
3335
3336 set_control(jobj_is_not_null);
3337
3338 // Load the threadObj for the CarrierThread.
3339 Node* threadObj = generate_current_thread(tls_ptr);
3340
3341 // Load the vthread.
3342 Node* vthread = generate_virtual_thread(tls_ptr);
3343
3344 // If vthread != threadObj, this is a virtual thread.
3345 Node* vthread_cmp_threadObj = _gvn.transform(new CmpPNode(vthread, threadObj));
3346 Node* test_vthread_not_equal_threadObj = _gvn.transform(new BoolNode(vthread_cmp_threadObj, BoolTest::ne));
3347 IfNode* iff_vthread_not_equal_threadObj =
3348 create_and_map_if(jobj_is_not_null, test_vthread_not_equal_threadObj, PROB_FAIR, COUNT_UNKNOWN);
3349
3350 // False branch, fallback to threadObj.
3351 Node* vthread_equal_threadObj = _gvn.transform(new IfFalseNode(iff_vthread_not_equal_threadObj));
3352 set_control(vthread_equal_threadObj);
3353
3354 // Load the tid field from the vthread object.
3355 Node* thread_obj_tid = load_field_from_object(threadObj, "tid", "J");
3356
3357 // Load the raw epoch value from the threadObj.
3358 Node* threadObj_epoch_offset = basic_plus_adr(threadObj, java_lang_Thread::jfr_epoch_offset());
3359 Node* threadObj_epoch_raw = access_load_at(threadObj, threadObj_epoch_offset,
3360 _gvn.type(threadObj_epoch_offset)->isa_ptr(),
3361 TypeInt::CHAR, T_CHAR,
3362 IN_HEAP | MO_UNORDERED | C2_MISMATCHED | C2_CONTROL_DEPENDENT_LOAD);
3363
3364 // Mask off the excluded information from the epoch.
3365 Node * threadObj_is_excluded = _gvn.transform(new AndINode(threadObj_epoch_raw, excluded_mask));
3366
3367 // True branch, this is a virtual thread.
3368 Node* vthread_not_equal_threadObj = _gvn.transform(new IfTrueNode(iff_vthread_not_equal_threadObj));
3369 set_control(vthread_not_equal_threadObj);
3370
3371 // Load the tid field from the vthread object.
3372 Node* vthread_tid = load_field_from_object(vthread, "tid", "J");
3373
3374 // Continuation support determines if a virtual thread should be pinned.
3375 Node* global_addr = makecon(TypeRawPtr::make((address)&VMContinuations));
3376 Node* continuation_support = make_load(control(), global_addr, TypeInt::BOOL, T_BOOLEAN, MemNode::unordered);
3377
3378 // Load the raw epoch value from the vthread.
3379 Node* vthread_epoch_offset = basic_plus_adr(vthread, java_lang_Thread::jfr_epoch_offset());
3380 Node* vthread_epoch_raw = access_load_at(vthread, vthread_epoch_offset, _gvn.type(vthread_epoch_offset)->is_ptr(),
3381 TypeInt::CHAR, T_CHAR,
3382 IN_HEAP | MO_UNORDERED | C2_MISMATCHED | C2_CONTROL_DEPENDENT_LOAD);
3383
3384 // Mask off the excluded information from the epoch.
3385 Node * vthread_is_excluded = _gvn.transform(new AndINode(vthread_epoch_raw, excluded_mask));
3386
3387 // Branch on excluded to conditionalize updating the epoch for the virtual thread.
3388 Node* is_excluded_cmp = _gvn.transform(new CmpINode(vthread_is_excluded, excluded_mask));
3389 Node* test_not_excluded = _gvn.transform(new BoolNode(is_excluded_cmp, BoolTest::ne));
3390 IfNode* iff_not_excluded = create_and_map_if(control(), test_not_excluded, PROB_MAX, COUNT_UNKNOWN);
3391
3392 // False branch, vthread is excluded, no need to write epoch info.
3393 Node* excluded = _gvn.transform(new IfFalseNode(iff_not_excluded));
3394
3395 // True branch, vthread is included, update epoch info.
3396 Node* included = _gvn.transform(new IfTrueNode(iff_not_excluded));
3397 set_control(included);
3398
3399 // Get epoch value.
3400 Node* epoch = _gvn.transform(new AndINode(vthread_epoch_raw, epoch_mask));
3401
3402 // Load the current epoch generation. The value is unsigned 16-bit, so we type it as T_CHAR.
3403 Node* epoch_generation_address = makecon(TypeRawPtr::make(JfrIntrinsicSupport::epoch_generation_address()));
3404 Node* current_epoch_generation = make_load(control(), epoch_generation_address, TypeInt::CHAR, T_CHAR, MemNode::unordered);
3405
3406 // Compare the epoch in the vthread to the current epoch generation.
3407 Node* const epoch_cmp = _gvn.transform(new CmpUNode(current_epoch_generation, epoch));
3408 Node* test_epoch_not_equal = _gvn.transform(new BoolNode(epoch_cmp, BoolTest::ne));
3409 IfNode* iff_epoch_not_equal = create_and_map_if(control(), test_epoch_not_equal, PROB_FAIR, COUNT_UNKNOWN);
3410
3411 // False path, epoch is equal, checkpoint information is valid.
3412 Node* epoch_is_equal = _gvn.transform(new IfFalseNode(iff_epoch_not_equal));
3413
3414 // True path, epoch is not equal, write a checkpoint for the vthread.
3415 Node* epoch_is_not_equal = _gvn.transform(new IfTrueNode(iff_epoch_not_equal));
3416
3417 set_control(epoch_is_not_equal);
3418
3419 // Make a runtime call, which can safepoint, to write a checkpoint for the vthread for this epoch.
3420 // The call also updates the native thread local thread id and the vthread with the current epoch.
3421 Node* call_write_checkpoint = make_runtime_call(RC_NO_LEAF,
3422 OptoRuntime::jfr_write_checkpoint_Type(),
3423 SharedRuntime::jfr_write_checkpoint(),
3424 "write_checkpoint", TypePtr::BOTTOM);
3425 Node* call_write_checkpoint_control = _gvn.transform(new ProjNode(call_write_checkpoint, TypeFunc::Control));
3426
3427 // vthread epoch != current epoch
3428 RegionNode* epoch_compare_rgn = new RegionNode(PATH_LIMIT);
3429 record_for_igvn(epoch_compare_rgn);
3430 PhiNode* epoch_compare_mem = new PhiNode(epoch_compare_rgn, Type::MEMORY, TypePtr::BOTTOM);
3431 record_for_igvn(epoch_compare_mem);
3432 PhiNode* epoch_compare_io = new PhiNode(epoch_compare_rgn, Type::ABIO);
3433 record_for_igvn(epoch_compare_io);
3434
3435 // Update control and phi nodes.
3436 epoch_compare_rgn->init_req(_true_path, call_write_checkpoint_control);
3437 epoch_compare_rgn->init_req(_false_path, epoch_is_equal);
3438 epoch_compare_mem->init_req(_true_path, reset_memory());
3439 epoch_compare_mem->init_req(_false_path, input_memory_state);
3440 epoch_compare_io->init_req(_true_path, i_o());
3441 epoch_compare_io->init_req(_false_path, input_io_state);
3442
3443 // excluded != true
3444 RegionNode* exclude_compare_rgn = new RegionNode(PATH_LIMIT);
3445 record_for_igvn(exclude_compare_rgn);
3446 PhiNode* exclude_compare_mem = new PhiNode(exclude_compare_rgn, Type::MEMORY, TypePtr::BOTTOM);
3447 record_for_igvn(exclude_compare_mem);
3448 PhiNode* exclude_compare_io = new PhiNode(exclude_compare_rgn, Type::ABIO);
3449 record_for_igvn(exclude_compare_io);
3450
3451 // Update control and phi nodes.
3452 exclude_compare_rgn->init_req(_true_path, _gvn.transform(epoch_compare_rgn));
3453 exclude_compare_rgn->init_req(_false_path, excluded);
3454 exclude_compare_mem->init_req(_true_path, _gvn.transform(epoch_compare_mem));
3455 exclude_compare_mem->init_req(_false_path, input_memory_state);
3456 exclude_compare_io->init_req(_true_path, _gvn.transform(epoch_compare_io));
3457 exclude_compare_io->init_req(_false_path, input_io_state);
3458
3459 // vthread != threadObj
3460 RegionNode* vthread_compare_rgn = new RegionNode(PATH_LIMIT);
3461 record_for_igvn(vthread_compare_rgn);
3462 PhiNode* vthread_compare_mem = new PhiNode(vthread_compare_rgn, Type::MEMORY, TypePtr::BOTTOM);
3463 PhiNode* vthread_compare_io = new PhiNode(vthread_compare_rgn, Type::ABIO);
3464 record_for_igvn(vthread_compare_io);
3465 PhiNode* tid = new PhiNode(vthread_compare_rgn, TypeLong::LONG);
3466 record_for_igvn(tid);
3467 PhiNode* exclusion = new PhiNode(vthread_compare_rgn, TypeInt::CHAR);
3468 record_for_igvn(exclusion);
3469 PhiNode* pinVirtualThread = new PhiNode(vthread_compare_rgn, TypeInt::BOOL);
3470 record_for_igvn(pinVirtualThread);
3471
3472 // Update control and phi nodes.
3473 vthread_compare_rgn->init_req(_true_path, _gvn.transform(exclude_compare_rgn));
3474 vthread_compare_rgn->init_req(_false_path, vthread_equal_threadObj);
3475 vthread_compare_mem->init_req(_true_path, _gvn.transform(exclude_compare_mem));
3476 vthread_compare_mem->init_req(_false_path, input_memory_state);
3477 vthread_compare_io->init_req(_true_path, _gvn.transform(exclude_compare_io));
3478 vthread_compare_io->init_req(_false_path, input_io_state);
3479 tid->init_req(_true_path, vthread_tid);
3480 tid->init_req(_false_path, thread_obj_tid);
3481 exclusion->init_req(_true_path, vthread_is_excluded);
3482 exclusion->init_req(_false_path, threadObj_is_excluded);
3483 pinVirtualThread->init_req(_true_path, continuation_support);
3484 pinVirtualThread->init_req(_false_path, _gvn.intcon(0));
3485
3486 // Update branch state.
3487 set_control(_gvn.transform(vthread_compare_rgn));
3488 set_all_memory(_gvn.transform(vthread_compare_mem));
3489 set_i_o(_gvn.transform(vthread_compare_io));
3490
3491 // Load the event writer oop by dereferencing the jobject handle.
3492 ciKlass* klass_EventWriter = env()->find_system_klass(ciSymbol::make("jdk/jfr/internal/event/EventWriter"));
3493 assert(klass_EventWriter->is_loaded(), "invariant");
3494 ciInstanceKlass* const instklass_EventWriter = klass_EventWriter->as_instance_klass();
3495 const TypeKlassPtr* const aklass = TypeKlassPtr::make(instklass_EventWriter);
3496 const TypeOopPtr* const xtype = aklass->as_instance_type();
3497 Node* jobj_untagged = _gvn.transform(AddPNode::make_off_heap(jobj, _gvn.MakeConX(-JNIHandles::TypeTag::global)));
3498 Node* event_writer = access_load(jobj_untagged, xtype, T_OBJECT, IN_NATIVE | C2_CONTROL_DEPENDENT_LOAD);
3499
3500 // Load the current thread id from the event writer object.
3501 Node* const event_writer_tid = load_field_from_object(event_writer, "threadID", "J");
3502 // Get the field offset to, conditionally, store an updated tid value later.
3503 Node* const event_writer_tid_field = field_address_from_object(event_writer, "threadID", "J", false);
3504 // Get the field offset to, conditionally, store an updated exclusion value later.
3505 Node* const event_writer_excluded_field = field_address_from_object(event_writer, "excluded", "Z", false);
3506 // Get the field offset to, conditionally, store an updated pinVirtualThread value later.
3507 Node* const event_writer_pin_field = field_address_from_object(event_writer, "pinVirtualThread", "Z", false);
3508
3509 RegionNode* event_writer_tid_compare_rgn = new RegionNode(PATH_LIMIT);
3510 record_for_igvn(event_writer_tid_compare_rgn);
3511 PhiNode* event_writer_tid_compare_mem = new PhiNode(event_writer_tid_compare_rgn, Type::MEMORY, TypePtr::BOTTOM);
3512 record_for_igvn(event_writer_tid_compare_mem);
3513 PhiNode* event_writer_tid_compare_io = new PhiNode(event_writer_tid_compare_rgn, Type::ABIO);
3514 record_for_igvn(event_writer_tid_compare_io);
3515
3516 // Compare the current tid from the thread object to what is currently stored in the event writer object.
3517 Node* const tid_cmp = _gvn.transform(new CmpLNode(event_writer_tid, _gvn.transform(tid)));
3518 Node* test_tid_not_equal = _gvn.transform(new BoolNode(tid_cmp, BoolTest::ne));
3519 IfNode* iff_tid_not_equal = create_and_map_if(_gvn.transform(vthread_compare_rgn), test_tid_not_equal, PROB_FAIR, COUNT_UNKNOWN);
3520
3521 // False path, tids are the same.
3522 Node* tid_is_equal = _gvn.transform(new IfFalseNode(iff_tid_not_equal));
3523
3524 // True path, tid is not equal, need to update the tid in the event writer.
3525 Node* tid_is_not_equal = _gvn.transform(new IfTrueNode(iff_tid_not_equal));
3526 record_for_igvn(tid_is_not_equal);
3527
3528 // Store the pin state to the event writer.
3529 store_to_memory(tid_is_not_equal, event_writer_pin_field, _gvn.transform(pinVirtualThread), T_BOOLEAN, MemNode::unordered);
3530
3531 // Store the exclusion state to the event writer.
3532 Node* excluded_bool = _gvn.transform(new URShiftINode(_gvn.transform(exclusion), excluded_shift));
3533 store_to_memory(tid_is_not_equal, event_writer_excluded_field, excluded_bool, T_BOOLEAN, MemNode::unordered);
3534
3535 // Store the tid to the event writer.
3536 store_to_memory(tid_is_not_equal, event_writer_tid_field, tid, T_LONG, MemNode::unordered);
3537
3538 // Update control and phi nodes.
3539 event_writer_tid_compare_rgn->init_req(_true_path, tid_is_not_equal);
3540 event_writer_tid_compare_rgn->init_req(_false_path, tid_is_equal);
3541 event_writer_tid_compare_mem->init_req(_true_path, reset_memory());
3542 event_writer_tid_compare_mem->init_req(_false_path, _gvn.transform(vthread_compare_mem));
3543 event_writer_tid_compare_io->init_req(_true_path, i_o());
3544 event_writer_tid_compare_io->init_req(_false_path, _gvn.transform(vthread_compare_io));
3545
3546 // Result of top level CFG, Memory, IO and Value.
3547 RegionNode* result_rgn = new RegionNode(PATH_LIMIT);
3548 PhiNode* result_mem = new PhiNode(result_rgn, Type::MEMORY, TypePtr::BOTTOM);
3549 PhiNode* result_io = new PhiNode(result_rgn, Type::ABIO);
3550 PhiNode* result_value = new PhiNode(result_rgn, TypeInstPtr::BOTTOM);
3551
3552 // Result control.
3553 result_rgn->init_req(_true_path, _gvn.transform(event_writer_tid_compare_rgn));
3554 result_rgn->init_req(_false_path, jobj_is_null);
3555
3556 // Result memory.
3557 result_mem->init_req(_true_path, _gvn.transform(event_writer_tid_compare_mem));
3558 result_mem->init_req(_false_path, input_memory_state);
3559
3560 // Result IO.
3561 result_io->init_req(_true_path, _gvn.transform(event_writer_tid_compare_io));
3562 result_io->init_req(_false_path, input_io_state);
3563
3564 // Result value.
3565 result_value->init_req(_true_path, event_writer); // return event writer oop
3566 result_value->init_req(_false_path, null()); // return null
3567
3568 // Set output state.
3569 set_control(_gvn.transform(result_rgn));
3570 set_all_memory(_gvn.transform(result_mem));
3571 set_i_o(_gvn.transform(result_io));
3572 set_result(result_rgn, result_value);
3573 return true;
3574 }
3575
3576 /*
3577 * The intrinsic is a model of this pseudo-code:
3578 *
3579 * JfrThreadLocal* const tl = thread->jfr_thread_local();
3580 * if (carrierThread != thread) { // is virtual thread
3581 * const u2 vthread_epoch_raw = java_lang_Thread::jfr_epoch(thread);
3582 * bool excluded = vthread_epoch_raw & excluded_mask;
3583 * AtomicAccess::store(&tl->_contextual_tid, java_lang_Thread::tid(thread));
3584 * AtomicAccess::store(&tl->_contextual_thread_excluded, is_excluded);
3585 * if (!excluded) {
3586 * const u2 vthread_epoch = vthread_epoch_raw & epoch_mask;
3587 * AtomicAccess::store(&tl->_vthread_epoch, vthread_epoch);
3588 * }
3589 * AtomicAccess::release_store(&tl->_vthread, true);
3590 * return;
3591 * }
3592 * AtomicAccess::release_store(&tl->_vthread, false);
3593 */
3594 void LibraryCallKit::extend_setCurrentThread(Node* jt, Node* thread) {
3595 enum { _true_path = 1, _false_path = 2, PATH_LIMIT };
3596
3597 Node* input_memory_state = reset_memory();
3598 set_all_memory(input_memory_state);
3599
3600 // The most significant bit of the u2 is used to denote thread exclusion
3601 Node* excluded_mask = _gvn.intcon(1 << 15);
3602 // The epoch generation is the range [1-32767]
3603 Node* epoch_mask = _gvn.intcon(32767);
3604
3605 Node* const carrierThread = generate_current_thread(jt);
3606 // If thread != carrierThread, this is a virtual thread.
3607 Node* thread_cmp_carrierThread = _gvn.transform(new CmpPNode(thread, carrierThread));
3608 Node* test_thread_not_equal_carrierThread = _gvn.transform(new BoolNode(thread_cmp_carrierThread, BoolTest::ne));
3609 IfNode* iff_thread_not_equal_carrierThread =
3610 create_and_map_if(control(), test_thread_not_equal_carrierThread, PROB_FAIR, COUNT_UNKNOWN);
3611
3612 Node* vthread_offset = off_heap_plus_addr(jt, in_bytes(THREAD_LOCAL_OFFSET_JFR + VTHREAD_OFFSET_JFR));
3613
3614 // False branch, is carrierThread.
3615 Node* thread_equal_carrierThread = _gvn.transform(new IfFalseNode(iff_thread_not_equal_carrierThread));
3616 // Store release
3617 Node* vthread_false_memory = store_to_memory(thread_equal_carrierThread, vthread_offset, _gvn.intcon(0), T_BOOLEAN, MemNode::release, true);
3618
3619 set_all_memory(input_memory_state);
3620
3621 // True branch, is virtual thread.
3622 Node* thread_not_equal_carrierThread = _gvn.transform(new IfTrueNode(iff_thread_not_equal_carrierThread));
3623 set_control(thread_not_equal_carrierThread);
3624
3625 // Load the raw epoch value from the vthread.
3626 Node* epoch_offset = basic_plus_adr(thread, java_lang_Thread::jfr_epoch_offset());
3627 Node* epoch_raw = access_load_at(thread, epoch_offset, _gvn.type(epoch_offset)->is_ptr(), TypeInt::CHAR, T_CHAR,
3628 IN_HEAP | MO_UNORDERED | C2_MISMATCHED | C2_CONTROL_DEPENDENT_LOAD);
3629
3630 // Mask off the excluded information from the epoch.
3631 Node * const is_excluded = _gvn.transform(new AndINode(epoch_raw, excluded_mask));
3632
3633 // Load the tid field from the thread.
3634 Node* tid = load_field_from_object(thread, "tid", "J");
3635
3636 // Store the vthread tid to the jfr thread local.
3637 Node* thread_id_offset = off_heap_plus_addr(jt, in_bytes(THREAD_LOCAL_OFFSET_JFR + VTHREAD_ID_OFFSET_JFR));
3638 Node* tid_memory = store_to_memory(control(), thread_id_offset, tid, T_LONG, MemNode::unordered, true);
3639
3640 // Branch is_excluded to conditionalize updating the epoch .
3641 Node* excluded_cmp = _gvn.transform(new CmpINode(is_excluded, excluded_mask));
3642 Node* test_excluded = _gvn.transform(new BoolNode(excluded_cmp, BoolTest::eq));
3643 IfNode* iff_excluded = create_and_map_if(control(), test_excluded, PROB_MIN, COUNT_UNKNOWN);
3644
3645 // True branch, vthread is excluded, no need to write epoch info.
3646 Node* excluded = _gvn.transform(new IfTrueNode(iff_excluded));
3647 set_control(excluded);
3648 Node* vthread_is_excluded = _gvn.intcon(1);
3649
3650 // False branch, vthread is included, update epoch info.
3651 Node* included = _gvn.transform(new IfFalseNode(iff_excluded));
3652 set_control(included);
3653 Node* vthread_is_included = _gvn.intcon(0);
3654
3655 // Get epoch value.
3656 Node* epoch = _gvn.transform(new AndINode(epoch_raw, epoch_mask));
3657
3658 // Store the vthread epoch to the jfr thread local.
3659 Node* vthread_epoch_offset = off_heap_plus_addr(jt, in_bytes(THREAD_LOCAL_OFFSET_JFR + VTHREAD_EPOCH_OFFSET_JFR));
3660 Node* included_memory = store_to_memory(control(), vthread_epoch_offset, epoch, T_CHAR, MemNode::unordered, true);
3661
3662 RegionNode* excluded_rgn = new RegionNode(PATH_LIMIT);
3663 record_for_igvn(excluded_rgn);
3664 PhiNode* excluded_mem = new PhiNode(excluded_rgn, Type::MEMORY, TypePtr::BOTTOM);
3665 record_for_igvn(excluded_mem);
3666 PhiNode* exclusion = new PhiNode(excluded_rgn, TypeInt::BOOL);
3667 record_for_igvn(exclusion);
3668
3669 // Merge the excluded control and memory.
3670 excluded_rgn->init_req(_true_path, excluded);
3671 excluded_rgn->init_req(_false_path, included);
3672 excluded_mem->init_req(_true_path, tid_memory);
3673 excluded_mem->init_req(_false_path, included_memory);
3674 exclusion->init_req(_true_path, vthread_is_excluded);
3675 exclusion->init_req(_false_path, vthread_is_included);
3676
3677 // Set intermediate state.
3678 set_control(_gvn.transform(excluded_rgn));
3679 set_all_memory(excluded_mem);
3680
3681 // Store the vthread exclusion state to the jfr thread local.
3682 Node* thread_local_excluded_offset = off_heap_plus_addr(jt, in_bytes(THREAD_LOCAL_OFFSET_JFR + VTHREAD_EXCLUDED_OFFSET_JFR));
3683 store_to_memory(control(), thread_local_excluded_offset, _gvn.transform(exclusion), T_BOOLEAN, MemNode::unordered, true);
3684
3685 // Store release
3686 Node * vthread_true_memory = store_to_memory(control(), vthread_offset, _gvn.intcon(1), T_BOOLEAN, MemNode::release, true);
3687
3688 RegionNode* thread_compare_rgn = new RegionNode(PATH_LIMIT);
3689 record_for_igvn(thread_compare_rgn);
3690 PhiNode* thread_compare_mem = new PhiNode(thread_compare_rgn, Type::MEMORY, TypePtr::BOTTOM);
3691 record_for_igvn(thread_compare_mem);
3692 PhiNode* vthread = new PhiNode(thread_compare_rgn, TypeInt::BOOL);
3693 record_for_igvn(vthread);
3694
3695 // Merge the thread_compare control and memory.
3696 thread_compare_rgn->init_req(_true_path, control());
3697 thread_compare_rgn->init_req(_false_path, thread_equal_carrierThread);
3698 thread_compare_mem->init_req(_true_path, vthread_true_memory);
3699 thread_compare_mem->init_req(_false_path, vthread_false_memory);
3700
3701 // Set output state.
3702 set_control(_gvn.transform(thread_compare_rgn));
3703 set_all_memory(_gvn.transform(thread_compare_mem));
3704 }
3705
3706 #endif // JFR_HAVE_INTRINSICS
3707
3708 //------------------------inline_native_currentCarrierThread------------------
3709 bool LibraryCallKit::inline_native_currentCarrierThread() {
3710 Node* junk = nullptr;
3711 set_result(generate_current_thread(junk));
3712 return true;
3713 }
3714
3715 //------------------------inline_native_currentThread------------------
3716 bool LibraryCallKit::inline_native_currentThread() {
3717 Node* junk = nullptr;
3718 set_result(generate_virtual_thread(junk));
3719 return true;
3720 }
3721
3722 //------------------------inline_native_setVthread------------------
3723 bool LibraryCallKit::inline_native_setCurrentThread() {
3724 assert(C->method()->changes_current_thread(),
3725 "method changes current Thread but is not annotated ChangesCurrentThread");
3726 Node* arr = argument(1);
3727 Node* thread = _gvn.transform(new ThreadLocalNode());
3728 Node* p = off_heap_plus_addr(thread, in_bytes(JavaThread::vthread_offset()));
3729 Node* thread_obj_handle
3730 = make_load(nullptr, p, p->bottom_type()->is_ptr(), T_OBJECT, MemNode::unordered);
3731 const TypePtr *adr_type = _gvn.type(thread_obj_handle)->isa_ptr();
3732 access_store_at(nullptr, thread_obj_handle, adr_type, arr, _gvn.type(arr), T_OBJECT, IN_NATIVE | MO_UNORDERED);
3733
3734 // Change the _monitor_owner_id of the JavaThread
3735 Node* tid = load_field_from_object(arr, "tid", "J");
3736 Node* monitor_owner_id_offset = off_heap_plus_addr(thread, in_bytes(JavaThread::monitor_owner_id_offset()));
3737 store_to_memory(control(), monitor_owner_id_offset, tid, T_LONG, MemNode::unordered, true);
3738
3739 JFR_ONLY(extend_setCurrentThread(thread, arr);)
3740 return true;
3741 }
3742
3743 const Type* LibraryCallKit::scopedValueCache_type() {
3744 ciKlass* objects_klass = ciObjArrayKlass::make(env()->Object_klass());
3745 const TypeOopPtr* etype = TypeOopPtr::make_from_klass(env()->Object_klass());
3746 const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS);
3747
3748 // Because we create the scopedValue cache lazily we have to make the
3749 // type of the result BotPTR.
3750 bool xk = etype->klass_is_exact();
3751 const Type* objects_type = TypeAryPtr::make(TypePtr::BotPTR, arr0, objects_klass, xk, 0);
3752 return objects_type;
3753 }
3754
3755 Node* LibraryCallKit::scopedValueCache_helper() {
3756 Node* thread = _gvn.transform(new ThreadLocalNode());
3757 Node* p = off_heap_plus_addr(thread, in_bytes(JavaThread::scopedValueCache_offset()));
3758 // We cannot use immutable_memory() because we might flip onto a
3759 // different carrier thread, at which point we'll need to use that
3760 // carrier thread's cache.
3761 // return _gvn.transform(LoadNode::make(_gvn, nullptr, immutable_memory(), p, p->bottom_type()->is_ptr(),
3762 // TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered));
3763 return make_load(nullptr, p, p->bottom_type()->is_ptr(), T_ADDRESS, MemNode::unordered);
3764 }
3765
3766 //------------------------inline_native_scopedValueCache------------------
3767 bool LibraryCallKit::inline_native_scopedValueCache() {
3768 Node* cache_obj_handle = scopedValueCache_helper();
3769 const Type* objects_type = scopedValueCache_type();
3770 set_result(access_load(cache_obj_handle, objects_type, T_OBJECT, IN_NATIVE));
3771
3772 return true;
3773 }
3774
3775 //------------------------inline_native_setScopedValueCache------------------
3776 bool LibraryCallKit::inline_native_setScopedValueCache() {
3777 Node* arr = argument(0);
3778 Node* cache_obj_handle = scopedValueCache_helper();
3779 const Type* objects_type = scopedValueCache_type();
3780
3781 const TypePtr *adr_type = _gvn.type(cache_obj_handle)->isa_ptr();
3782 access_store_at(nullptr, cache_obj_handle, adr_type, arr, objects_type, T_OBJECT, IN_NATIVE | MO_UNORDERED);
3783
3784 return true;
3785 }
3786
3787 //------------------------inline_native_Continuation_pin and unpin-----------
3788
3789 // Shared implementation routine for both pin and unpin.
3790 bool LibraryCallKit::inline_native_Continuation_pinning(bool unpin) {
3791 enum { _true_path = 1, _false_path = 2, PATH_LIMIT };
3792
3793 // Save input memory.
3794 Node* input_memory_state = reset_memory();
3795 set_all_memory(input_memory_state);
3796
3797 // TLS
3798 Node* tls_ptr = _gvn.transform(new ThreadLocalNode());
3799 Node* last_continuation_offset = off_heap_plus_addr(tls_ptr, in_bytes(JavaThread::cont_entry_offset()));
3800 Node* last_continuation = make_load(control(), last_continuation_offset, last_continuation_offset->get_ptr_type(), T_ADDRESS, MemNode::unordered);
3801
3802 // Null check the last continuation object.
3803 Node* continuation_cmp_null = _gvn.transform(new CmpPNode(last_continuation, null()));
3804 Node* test_continuation_not_equal_null = _gvn.transform(new BoolNode(continuation_cmp_null, BoolTest::ne));
3805 IfNode* iff_continuation_not_equal_null = create_and_map_if(control(), test_continuation_not_equal_null, PROB_MAX, COUNT_UNKNOWN);
3806
3807 // False path, last continuation is null.
3808 Node* continuation_is_null = _gvn.transform(new IfFalseNode(iff_continuation_not_equal_null));
3809
3810 // True path, last continuation is not null.
3811 Node* continuation_is_not_null = _gvn.transform(new IfTrueNode(iff_continuation_not_equal_null));
3812
3813 set_control(continuation_is_not_null);
3814
3815 // Load the pin count from the last continuation.
3816 Node* pin_count_offset = off_heap_plus_addr(last_continuation, in_bytes(ContinuationEntry::pin_count_offset()));
3817 Node* pin_count = make_load(control(), pin_count_offset, TypeInt::INT, T_INT, MemNode::unordered);
3818
3819 // The loaded pin count is compared against a context specific rhs for over/underflow detection.
3820 Node* pin_count_rhs;
3821 if (unpin) {
3822 pin_count_rhs = _gvn.intcon(0);
3823 } else {
3824 pin_count_rhs = _gvn.intcon(UINT32_MAX);
3825 }
3826 Node* pin_count_cmp = _gvn.transform(new CmpUNode(pin_count, pin_count_rhs));
3827 Node* test_pin_count_over_underflow = _gvn.transform(new BoolNode(pin_count_cmp, BoolTest::eq));
3828 IfNode* iff_pin_count_over_underflow = create_and_map_if(control(), test_pin_count_over_underflow, PROB_MIN, COUNT_UNKNOWN);
3829
3830 // True branch, pin count over/underflow.
3831 Node* pin_count_over_underflow = _gvn.transform(new IfTrueNode(iff_pin_count_over_underflow));
3832 {
3833 // Trap (but not deoptimize (Action_none)) and continue in the interpreter
3834 // which will throw IllegalStateException for pin count over/underflow.
3835 // No memory changed so far - we can use memory create by reset_memory()
3836 // at the beginning of this intrinsic. No need to call reset_memory() again.
3837 PreserveJVMState pjvms(this);
3838 set_control(pin_count_over_underflow);
3839 uncommon_trap(Deoptimization::Reason_intrinsic,
3840 Deoptimization::Action_none);
3841 assert(stopped(), "invariant");
3842 }
3843
3844 // False branch, no pin count over/underflow. Increment or decrement pin count and store back.
3845 Node* valid_pin_count = _gvn.transform(new IfFalseNode(iff_pin_count_over_underflow));
3846 set_control(valid_pin_count);
3847
3848 Node* next_pin_count;
3849 if (unpin) {
3850 next_pin_count = _gvn.transform(new SubINode(pin_count, _gvn.intcon(1)));
3851 } else {
3852 next_pin_count = _gvn.transform(new AddINode(pin_count, _gvn.intcon(1)));
3853 }
3854
3855 store_to_memory(control(), pin_count_offset, next_pin_count, T_INT, MemNode::unordered);
3856
3857 // Result of top level CFG and Memory.
3858 RegionNode* result_rgn = new RegionNode(PATH_LIMIT);
3859 record_for_igvn(result_rgn);
3860 PhiNode* result_mem = new PhiNode(result_rgn, Type::MEMORY, TypePtr::BOTTOM);
3861 record_for_igvn(result_mem);
3862
3863 result_rgn->init_req(_true_path, valid_pin_count);
3864 result_rgn->init_req(_false_path, continuation_is_null);
3865 result_mem->init_req(_true_path, reset_memory());
3866 result_mem->init_req(_false_path, input_memory_state);
3867
3868 // Set output state.
3869 set_control(_gvn.transform(result_rgn));
3870 set_all_memory(_gvn.transform(result_mem));
3871
3872 return true;
3873 }
3874
3875 //---------------------------load_mirror_from_klass----------------------------
3876 // Given a klass oop, load its java mirror (a java.lang.Class oop).
3877 Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
3878 Node* p = off_heap_plus_addr(klass, in_bytes(Klass::java_mirror_offset()));
3879 Node* load = make_load(nullptr, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
3880 // mirror = ((OopHandle)mirror)->resolve();
3881 return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE);
3882 }
3883
3884 //-----------------------load_klass_from_mirror_common-------------------------
3885 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
3886 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
3887 // and branch to the given path on the region.
3888 // If never_see_null, take an uncommon trap on null, so we can optimistically
3889 // compile for the non-null case.
3890 // If the region is null, force never_see_null = true.
3891 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
3892 bool never_see_null,
3893 RegionNode* region,
3894 int null_path,
3895 int offset) {
3896 if (region == nullptr) never_see_null = true;
3897 Node* p = basic_plus_adr(mirror, offset);
3898 const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
3899 Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
3900 Node* null_ctl = top();
3901 kls = null_check_oop(kls, &null_ctl, never_see_null);
3902 if (region != nullptr) {
3903 // Set region->in(null_path) if the mirror is a primitive (e.g, int.class).
3904 region->init_req(null_path, null_ctl);
3905 } else {
3906 assert(null_ctl == top(), "no loose ends");
3907 }
3908 return kls;
3909 }
3910
3911 //--------------------(inline_native_Class_query helpers)---------------------
3912 // Use this for JVM_ACC_INTERFACE.
3913 // Fall through if (mods & mask) == bits, take the guard otherwise.
3914 Node* LibraryCallKit::generate_klass_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region,
3915 ByteSize offset, const Type* type, BasicType bt) {
3916 // Branch around if the given klass has the given modifier bit set.
3917 // Like generate_guard, adds a new path onto the region.
3918 Node* modp = off_heap_plus_addr(kls, in_bytes(offset));
3919 Node* mods = make_load(nullptr, modp, type, bt, MemNode::unordered);
3920 Node* mask = intcon(modifier_mask);
3921 Node* bits = intcon(modifier_bits);
3922 Node* mbit = _gvn.transform(new AndINode(mods, mask));
3923 Node* cmp = _gvn.transform(new CmpINode(mbit, bits));
3924 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
3925 return generate_fair_guard(bol, region);
3926 }
3927 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
3928 return generate_klass_flags_guard(kls, JVM_ACC_INTERFACE, 0, region,
3929 InstanceKlass::access_flags_offset(), TypeInt::CHAR, T_CHAR);
3930 }
3931
3932 // Use this for testing if Klass is_hidden, has_finalizer, and is_cloneable_fast.
3933 Node* LibraryCallKit::generate_misc_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
3934 return generate_klass_flags_guard(kls, modifier_mask, modifier_bits, region,
3935 Klass::misc_flags_offset(), TypeInt::UBYTE, T_BOOLEAN);
3936 }
3937
3938 Node* LibraryCallKit::generate_hidden_class_guard(Node* kls, RegionNode* region) {
3939 return generate_misc_flags_guard(kls, KlassFlags::_misc_is_hidden_class, 0, region);
3940 }
3941
3942 //-------------------------inline_native_Class_query-------------------
3943 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
3944 const Type* return_type = TypeInt::BOOL;
3945 Node* prim_return_value = top(); // what happens if it's a primitive class?
3946 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3947 bool expect_prim = false; // most of these guys expect to work on refs
3948
3949 enum { _normal_path = 1, _prim_path = 2, PATH_LIMIT };
3950
3951 Node* mirror = argument(0);
3952 Node* obj = top();
3953
3954 switch (id) {
3955 case vmIntrinsics::_isInstance:
3956 // nothing is an instance of a primitive type
3957 prim_return_value = intcon(0);
3958 obj = argument(1);
3959 break;
3960 case vmIntrinsics::_isHidden:
3961 prim_return_value = intcon(0);
3962 break;
3963 case vmIntrinsics::_getSuperclass:
3964 prim_return_value = null();
3965 return_type = TypeInstPtr::MIRROR->cast_to_ptr_type(TypePtr::BotPTR);
3966 break;
3967 default:
3968 fatal_unexpected_iid(id);
3969 break;
3970 }
3971
3972 const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
3973 if (mirror_con == nullptr) return false; // cannot happen?
3974
3975 #ifndef PRODUCT
3976 if (C->print_intrinsics() || C->print_inlining()) {
3977 ciType* k = mirror_con->java_mirror_type();
3978 if (k) {
3979 tty->print("Inlining %s on constant Class ", vmIntrinsics::name_at(intrinsic_id()));
3980 k->print_name();
3981 tty->cr();
3982 }
3983 }
3984 #endif
3985
3986 // Null-check the mirror, and the mirror's klass ptr (in case it is a primitive).
3987 RegionNode* region = new RegionNode(PATH_LIMIT);
3988 record_for_igvn(region);
3989 PhiNode* phi = new PhiNode(region, return_type);
3990
3991 // The mirror will never be null of Reflection.getClassAccessFlags, however
3992 // it may be null for Class.isInstance or Class.getModifiers. Throw a NPE
3993 // if it is. See bug 4774291.
3994
3995 // For Reflection.getClassAccessFlags(), the null check occurs in
3996 // the wrong place; see inline_unsafe_access(), above, for a similar
3997 // situation.
3998 mirror = null_check(mirror);
3999 // If mirror or obj is dead, only null-path is taken.
4000 if (stopped()) return true;
4001
4002 if (expect_prim) never_see_null = false; // expect nulls (meaning prims)
4003
4004 // Now load the mirror's klass metaobject, and null-check it.
4005 // Side-effects region with the control path if the klass is null.
4006 Node* kls = load_klass_from_mirror(mirror, never_see_null, region, _prim_path);
4007 // If kls is null, we have a primitive mirror.
4008 phi->init_req(_prim_path, prim_return_value);
4009 if (stopped()) { set_result(region, phi); return true; }
4010 bool safe_for_replace = (region->in(_prim_path) == top());
4011
4012 Node* p; // handy temp
4013 Node* null_ctl;
4014
4015 // Now that we have the non-null klass, we can perform the real query.
4016 // For constant classes, the query will constant-fold in LoadNode::Value.
4017 Node* query_value = top();
4018 switch (id) {
4019 case vmIntrinsics::_isInstance:
4020 // nothing is an instance of a primitive type
4021 query_value = gen_instanceof(obj, kls, safe_for_replace);
4022 break;
4023
4024 case vmIntrinsics::_isHidden:
4025 // (To verify this code sequence, check the asserts in JVM_IsHiddenClass.)
4026 if (generate_hidden_class_guard(kls, region) != nullptr)
4027 // A guard was added. If the guard is taken, it was an hidden class.
4028 phi->add_req(intcon(1));
4029 // If we fall through, it's a plain class.
4030 query_value = intcon(0);
4031 break;
4032
4033
4034 case vmIntrinsics::_getSuperclass:
4035 // The rules here are somewhat unfortunate, but we can still do better
4036 // with random logic than with a JNI call.
4037 // Interfaces store null or Object as _super, but must report null.
4038 // Arrays store an intermediate super as _super, but must report Object.
4039 // Other types can report the actual _super.
4040 // (To verify this code sequence, check the asserts in JVM_IsInterface.)
4041 if (generate_array_guard(kls, region) != nullptr) {
4042 // A guard was added. If the guard is taken, it was an array.
4043 phi->add_req(makecon(TypeInstPtr::make(env()->Object_klass()->java_mirror())));
4044 }
4045 // Check for interface after array since this checks AccessFlags offset into InstanceKlass.
4046 // In other words, we are accessing subtype-specific information, so we need to determine the subtype first.
4047 if (generate_interface_guard(kls, region) != nullptr) {
4048 // A guard was added. If the guard is taken, it was an interface.
4049 phi->add_req(null());
4050 }
4051 // If we fall through, it's a plain class. Get its _super.
4052 p = off_heap_plus_addr(kls, in_bytes(Klass::super_offset()));
4053 kls = _gvn.transform(LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeInstKlassPtr::OBJECT_OR_NULL));
4054 null_ctl = top();
4055 kls = null_check_oop(kls, &null_ctl);
4056 if (null_ctl != top()) {
4057 // If the guard is taken, Object.superClass is null (both klass and mirror).
4058 region->add_req(null_ctl);
4059 phi ->add_req(null());
4060 }
4061 if (!stopped()) {
4062 query_value = load_mirror_from_klass(kls);
4063 }
4064 break;
4065
4066 default:
4067 fatal_unexpected_iid(id);
4068 break;
4069 }
4070
4071 // Fall-through is the normal case of a query to a real class.
4072 phi->init_req(1, query_value);
4073 region->init_req(1, control());
4074
4075 C->set_has_split_ifs(true); // Has chance for split-if optimization
4076 set_result(region, phi);
4077 return true;
4078 }
4079
4080 //-------------------------inline_Class_cast-------------------
4081 bool LibraryCallKit::inline_Class_cast() {
4082 Node* mirror = argument(0); // Class
4083 Node* obj = argument(1);
4084 const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
4085 if (mirror_con == nullptr) {
4086 return false; // dead path (mirror->is_top()).
4087 }
4088 if (obj == nullptr || obj->is_top()) {
4089 return false; // dead path
4090 }
4091 const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr();
4092
4093 // First, see if Class.cast() can be folded statically.
4094 // java_mirror_type() returns non-null for compile-time Class constants.
4095 ciType* tm = mirror_con->java_mirror_type();
4096 if (tm != nullptr && tm->is_klass() &&
4097 tp != nullptr) {
4098 if (!tp->is_loaded()) {
4099 // Don't use intrinsic when class is not loaded.
4100 return false;
4101 } else {
4102 int static_res = C->static_subtype_check(TypeKlassPtr::make(tm->as_klass(), Type::trust_interfaces), tp->as_klass_type());
4103 if (static_res == Compile::SSC_always_true) {
4104 // isInstance() is true - fold the code.
4105 set_result(obj);
4106 return true;
4107 } else if (static_res == Compile::SSC_always_false) {
4108 // Don't use intrinsic, have to throw ClassCastException.
4109 // If the reference is null, the non-intrinsic bytecode will
4110 // be optimized appropriately.
4111 return false;
4112 }
4113 }
4114 }
4115
4116 // Bailout intrinsic and do normal inlining if exception path is frequent.
4117 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
4118 return false;
4119 }
4120
4121 // Generate dynamic checks.
4122 // Class.cast() is java implementation of _checkcast bytecode.
4123 // Do checkcast (Parse::do_checkcast()) optimizations here.
4124
4125 mirror = null_check(mirror);
4126 // If mirror is dead, only null-path is taken.
4127 if (stopped()) {
4128 return true;
4129 }
4130
4131 // Not-subtype or the mirror's klass ptr is null (in case it is a primitive).
4132 enum { _bad_type_path = 1, _prim_path = 2, PATH_LIMIT };
4133 RegionNode* region = new RegionNode(PATH_LIMIT);
4134 record_for_igvn(region);
4135
4136 // Now load the mirror's klass metaobject, and null-check it.
4137 // If kls is null, we have a primitive mirror and
4138 // nothing is an instance of a primitive type.
4139 Node* kls = load_klass_from_mirror(mirror, false, region, _prim_path);
4140
4141 Node* res = top();
4142 if (!stopped()) {
4143 Node* bad_type_ctrl = top();
4144 // Do checkcast optimizations.
4145 res = gen_checkcast(obj, kls, &bad_type_ctrl);
4146 region->init_req(_bad_type_path, bad_type_ctrl);
4147 }
4148 if (region->in(_prim_path) != top() ||
4149 region->in(_bad_type_path) != top()) {
4150 // Let Interpreter throw ClassCastException.
4151 PreserveJVMState pjvms(this);
4152 set_control(_gvn.transform(region));
4153 uncommon_trap(Deoptimization::Reason_intrinsic,
4154 Deoptimization::Action_maybe_recompile);
4155 }
4156 if (!stopped()) {
4157 set_result(res);
4158 }
4159 return true;
4160 }
4161
4162
4163 //--------------------------inline_native_subtype_check------------------------
4164 // This intrinsic takes the JNI calls out of the heart of
4165 // UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
4166 bool LibraryCallKit::inline_native_subtype_check() {
4167 // Pull both arguments off the stack.
4168 Node* args[2]; // two java.lang.Class mirrors: superc, subc
4169 args[0] = argument(0);
4170 args[1] = argument(1);
4171 Node* klasses[2]; // corresponding Klasses: superk, subk
4172 klasses[0] = klasses[1] = top();
4173
4174 enum {
4175 // A full decision tree on {superc is prim, subc is prim}:
4176 _prim_0_path = 1, // {P,N} => false
4177 // {P,P} & superc!=subc => false
4178 _prim_same_path, // {P,P} & superc==subc => true
4179 _prim_1_path, // {N,P} => false
4180 _ref_subtype_path, // {N,N} & subtype check wins => true
4181 _both_ref_path, // {N,N} & subtype check loses => false
4182 PATH_LIMIT
4183 };
4184
4185 RegionNode* region = new RegionNode(PATH_LIMIT);
4186 Node* phi = new PhiNode(region, TypeInt::BOOL);
4187 record_for_igvn(region);
4188
4189 const TypePtr* adr_type = TypeRawPtr::BOTTOM; // memory type of loads
4190 const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
4191 int class_klass_offset = java_lang_Class::klass_offset();
4192
4193 // First null-check both mirrors and load each mirror's klass metaobject.
4194 int which_arg;
4195 for (which_arg = 0; which_arg <= 1; which_arg++) {
4196 Node* arg = args[which_arg];
4197 arg = null_check(arg);
4198 if (stopped()) break;
4199 args[which_arg] = arg;
4200
4201 Node* p = basic_plus_adr(arg, class_klass_offset);
4202 Node* kls = LoadKlassNode::make(_gvn, immutable_memory(), p, adr_type, kls_type);
4203 klasses[which_arg] = _gvn.transform(kls);
4204 }
4205
4206 // Having loaded both klasses, test each for null.
4207 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
4208 for (which_arg = 0; which_arg <= 1; which_arg++) {
4209 Node* kls = klasses[which_arg];
4210 Node* null_ctl = top();
4211 kls = null_check_oop(kls, &null_ctl, never_see_null);
4212 int prim_path = (which_arg == 0 ? _prim_0_path : _prim_1_path);
4213 region->init_req(prim_path, null_ctl);
4214 if (stopped()) break;
4215 klasses[which_arg] = kls;
4216 }
4217
4218 if (!stopped()) {
4219 // now we have two reference types, in klasses[0..1]
4220 Node* subk = klasses[1]; // the argument to isAssignableFrom
4221 Node* superk = klasses[0]; // the receiver
4222 region->set_req(_both_ref_path, gen_subtype_check(subk, superk));
4223 // now we have a successful reference subtype check
4224 region->set_req(_ref_subtype_path, control());
4225 }
4226
4227 // If both operands are primitive (both klasses null), then
4228 // we must return true when they are identical primitives.
4229 // It is convenient to test this after the first null klass check.
4230 set_control(region->in(_prim_0_path)); // go back to first null check
4231 if (!stopped()) {
4232 // Since superc is primitive, make a guard for the superc==subc case.
4233 Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1]));
4234 Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq));
4235 generate_guard(bol_eq, region, PROB_FAIR);
4236 if (region->req() == PATH_LIMIT+1) {
4237 // A guard was added. If the added guard is taken, superc==subc.
4238 region->swap_edges(PATH_LIMIT, _prim_same_path);
4239 region->del_req(PATH_LIMIT);
4240 }
4241 region->set_req(_prim_0_path, control()); // Not equal after all.
4242 }
4243
4244 // these are the only paths that produce 'true':
4245 phi->set_req(_prim_same_path, intcon(1));
4246 phi->set_req(_ref_subtype_path, intcon(1));
4247
4248 // pull together the cases:
4249 assert(region->req() == PATH_LIMIT, "sane region");
4250 for (uint i = 1; i < region->req(); i++) {
4251 Node* ctl = region->in(i);
4252 if (ctl == nullptr || ctl == top()) {
4253 region->set_req(i, top());
4254 phi ->set_req(i, top());
4255 } else if (phi->in(i) == nullptr) {
4256 phi->set_req(i, intcon(0)); // all other paths produce 'false'
4257 }
4258 }
4259
4260 set_control(_gvn.transform(region));
4261 set_result(_gvn.transform(phi));
4262 return true;
4263 }
4264
4265 //---------------------generate_array_guard_common------------------------
4266 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region,
4267 bool obj_array, bool not_array, Node** obj) {
4268
4269 if (stopped()) {
4270 return nullptr;
4271 }
4272
4273 // If obj_array/non_array==false/false:
4274 // Branch around if the given klass is in fact an array (either obj or prim).
4275 // If obj_array/non_array==false/true:
4276 // Branch around if the given klass is not an array klass of any kind.
4277 // If obj_array/non_array==true/true:
4278 // Branch around if the kls is not an oop array (kls is int[], String, etc.)
4279 // If obj_array/non_array==true/false:
4280 // Branch around if the kls is an oop array (Object[] or subtype)
4281 //
4282 // Like generate_guard, adds a new path onto the region.
4283 jint layout_con = 0;
4284 Node* layout_val = get_layout_helper(kls, layout_con);
4285 if (layout_val == nullptr) {
4286 bool query = (obj_array
4287 ? Klass::layout_helper_is_objArray(layout_con)
4288 : Klass::layout_helper_is_array(layout_con));
4289 if (query == not_array) {
4290 return nullptr; // never a branch
4291 } else { // always a branch
4292 Node* always_branch = control();
4293 if (region != nullptr)
4294 region->add_req(always_branch);
4295 set_control(top());
4296 return always_branch;
4297 }
4298 }
4299 // Now test the correct condition.
4300 jint nval = (obj_array
4301 ? (jint)(Klass::_lh_array_tag_type_value
4302 << Klass::_lh_array_tag_shift)
4303 : Klass::_lh_neutral_value);
4304 Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval)));
4305 BoolTest::mask btest = BoolTest::lt; // correct for testing is_[obj]array
4306 // invert the test if we are looking for a non-array
4307 if (not_array) btest = BoolTest(btest).negate();
4308 Node* bol = _gvn.transform(new BoolNode(cmp, btest));
4309 Node* ctrl = generate_fair_guard(bol, region);
4310 Node* is_array_ctrl = not_array ? control() : ctrl;
4311 if (obj != nullptr && is_array_ctrl != nullptr && is_array_ctrl != top()) {
4312 // Keep track of the fact that 'obj' is an array to prevent
4313 // array specific accesses from floating above the guard.
4314 *obj = _gvn.transform(new CheckCastPPNode(is_array_ctrl, *obj, TypeAryPtr::BOTTOM));
4315 }
4316 return ctrl;
4317 }
4318
4319
4320 //-----------------------inline_native_newArray--------------------------
4321 // private static native Object java.lang.reflect.newArray(Class<?> componentType, int length);
4322 // private native Object Unsafe.allocateUninitializedArray0(Class<?> cls, int size);
4323 bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) {
4324 Node* mirror;
4325 Node* count_val;
4326 if (uninitialized) {
4327 null_check_receiver();
4328 mirror = argument(1);
4329 count_val = argument(2);
4330 } else {
4331 mirror = argument(0);
4332 count_val = argument(1);
4333 }
4334
4335 mirror = null_check(mirror);
4336 // If mirror or obj is dead, only null-path is taken.
4337 if (stopped()) return true;
4338
4339 enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
4340 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4341 PhiNode* result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
4342 PhiNode* result_io = new PhiNode(result_reg, Type::ABIO);
4343 PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4344
4345 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
4346 Node* klass_node = load_array_klass_from_mirror(mirror, never_see_null,
4347 result_reg, _slow_path);
4348 Node* normal_ctl = control();
4349 Node* no_array_ctl = result_reg->in(_slow_path);
4350
4351 // Generate code for the slow case. We make a call to newArray().
4352 set_control(no_array_ctl);
4353 if (!stopped()) {
4354 // Either the input type is void.class, or else the
4355 // array klass has not yet been cached. Either the
4356 // ensuing call will throw an exception, or else it
4357 // will cache the array klass for next time.
4358 PreserveJVMState pjvms(this);
4359 CallJavaNode* slow_call = nullptr;
4360 if (uninitialized) {
4361 // Generate optimized virtual call (holder class 'Unsafe' is final)
4362 slow_call = generate_method_call(vmIntrinsics::_allocateUninitializedArray, false, false, true);
4363 } else {
4364 slow_call = generate_method_call_static(vmIntrinsics::_newArray, true);
4365 }
4366 Node* slow_result = set_results_for_java_call(slow_call);
4367 // this->control() comes from set_results_for_java_call
4368 result_reg->set_req(_slow_path, control());
4369 result_val->set_req(_slow_path, slow_result);
4370 result_io ->set_req(_slow_path, i_o());
4371 result_mem->set_req(_slow_path, reset_memory());
4372 }
4373
4374 set_control(normal_ctl);
4375 if (!stopped()) {
4376 // Normal case: The array type has been cached in the java.lang.Class.
4377 // The following call works fine even if the array type is polymorphic.
4378 // It could be a dynamic mix of int[], boolean[], Object[], etc.
4379 Node* obj = new_array(klass_node, count_val, 0); // no arguments to push
4380 result_reg->init_req(_normal_path, control());
4381 result_val->init_req(_normal_path, obj);
4382 result_io ->init_req(_normal_path, i_o());
4383 result_mem->init_req(_normal_path, reset_memory());
4384
4385 if (uninitialized) {
4386 // Mark the allocation so that zeroing is skipped
4387 AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(obj);
4388 alloc->maybe_set_complete(&_gvn);
4389 }
4390 }
4391
4392 // Return the combined state.
4393 set_i_o( _gvn.transform(result_io) );
4394 set_all_memory( _gvn.transform(result_mem));
4395
4396 C->set_has_split_ifs(true); // Has chance for split-if optimization
4397 set_result(result_reg, result_val);
4398 return true;
4399 }
4400
4401 //----------------------inline_native_getLength--------------------------
4402 // public static native int java.lang.reflect.Array.getLength(Object array);
4403 bool LibraryCallKit::inline_native_getLength() {
4404 if (too_many_traps(Deoptimization::Reason_intrinsic)) return false;
4405
4406 Node* array = null_check(argument(0));
4407 // If array is dead, only null-path is taken.
4408 if (stopped()) return true;
4409
4410 // Deoptimize if it is a non-array.
4411 Node* non_array = generate_non_array_guard(load_object_klass(array), nullptr, &array);
4412
4413 if (non_array != nullptr) {
4414 PreserveJVMState pjvms(this);
4415 set_control(non_array);
4416 uncommon_trap(Deoptimization::Reason_intrinsic,
4417 Deoptimization::Action_maybe_recompile);
4418 }
4419
4420 // If control is dead, only non-array-path is taken.
4421 if (stopped()) return true;
4422
4423 // The works fine even if the array type is polymorphic.
4424 // It could be a dynamic mix of int[], boolean[], Object[], etc.
4425 Node* result = load_array_length(array);
4426
4427 C->set_has_split_ifs(true); // Has chance for split-if optimization
4428 set_result(result);
4429 return true;
4430 }
4431
4432 //------------------------inline_array_copyOf----------------------------
4433 // public static <T,U> T[] java.util.Arrays.copyOf( U[] original, int newLength, Class<? extends T[]> newType);
4434 // public static <T,U> T[] java.util.Arrays.copyOfRange(U[] original, int from, int to, Class<? extends T[]> newType);
4435 bool LibraryCallKit::inline_array_copyOf(bool is_copyOfRange) {
4436 if (too_many_traps(Deoptimization::Reason_intrinsic)) return false;
4437
4438 // Get the arguments.
4439 Node* original = argument(0);
4440 Node* start = is_copyOfRange? argument(1): intcon(0);
4441 Node* end = is_copyOfRange? argument(2): argument(1);
4442 Node* array_type_mirror = is_copyOfRange? argument(3): argument(2);
4443
4444 Node* newcopy = nullptr;
4445
4446 // Set the original stack and the reexecute bit for the interpreter to reexecute
4447 // the bytecode that invokes Arrays.copyOf if deoptimization happens.
4448 { PreserveReexecuteState preexecs(this);
4449 jvms()->set_should_reexecute(true);
4450
4451 array_type_mirror = null_check(array_type_mirror);
4452 original = null_check(original);
4453
4454 // Check if a null path was taken unconditionally.
4455 if (stopped()) return true;
4456
4457 Node* orig_length = load_array_length(original);
4458
4459 Node* klass_node = load_klass_from_mirror(array_type_mirror, false, nullptr, 0);
4460 klass_node = null_check(klass_node);
4461
4462 RegionNode* bailout = new RegionNode(1);
4463 record_for_igvn(bailout);
4464
4465 // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
4466 // Bail out if that is so.
4467 Node* not_objArray = generate_non_objArray_guard(klass_node, bailout);
4468 if (not_objArray != nullptr) {
4469 // Improve the klass node's type from the new optimistic assumption:
4470 ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
4471 const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/);
4472 Node* cast = new CastPPNode(control(), klass_node, akls);
4473 klass_node = _gvn.transform(cast);
4474 }
4475
4476 // Bail out if either start or end is negative.
4477 generate_negative_guard(start, bailout, &start);
4478 generate_negative_guard(end, bailout, &end);
4479
4480 Node* length = end;
4481 if (_gvn.type(start) != TypeInt::ZERO) {
4482 length = _gvn.transform(new SubINode(end, start));
4483 }
4484
4485 // Bail out if length is negative (i.e., if start > end).
4486 // Without this the new_array would throw
4487 // NegativeArraySizeException but IllegalArgumentException is what
4488 // should be thrown
4489 generate_negative_guard(length, bailout, &length);
4490
4491 // Bail out if start is larger than the original length
4492 Node* orig_tail = _gvn.transform(new SubINode(orig_length, start));
4493 generate_negative_guard(orig_tail, bailout, &orig_tail);
4494
4495 if (bailout->req() > 1) {
4496 PreserveJVMState pjvms(this);
4497 set_control(_gvn.transform(bailout));
4498 uncommon_trap(Deoptimization::Reason_intrinsic,
4499 Deoptimization::Action_maybe_recompile);
4500 }
4501
4502 if (!stopped()) {
4503 // How many elements will we copy from the original?
4504 // The answer is MinI(orig_tail, length).
4505 Node* moved = _gvn.transform(new MinINode(orig_tail, length));
4506
4507 // Generate a direct call to the right arraycopy function(s).
4508 // We know the copy is disjoint but we might not know if the
4509 // oop stores need checking.
4510 // Extreme case: Arrays.copyOf((Integer[])x, 10, String[].class).
4511 // This will fail a store-check if x contains any non-nulls.
4512
4513 // ArrayCopyNode:Ideal may transform the ArrayCopyNode to
4514 // loads/stores but it is legal only if we're sure the
4515 // Arrays.copyOf would succeed. So we need all input arguments
4516 // to the copyOf to be validated, including that the copy to the
4517 // new array won't trigger an ArrayStoreException. That subtype
4518 // check can be optimized if we know something on the type of
4519 // the input array from type speculation.
4520 if (_gvn.type(klass_node)->singleton()) {
4521 const TypeKlassPtr* subk = _gvn.type(load_object_klass(original))->is_klassptr();
4522 const TypeKlassPtr* superk = _gvn.type(klass_node)->is_klassptr();
4523
4524 int test = C->static_subtype_check(superk, subk);
4525 if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
4526 const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();
4527 if (t_original->speculative_type() != nullptr) {
4528 original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true);
4529 }
4530 }
4531 }
4532
4533 bool validated = false;
4534 // Reason_class_check rather than Reason_intrinsic because we
4535 // want to intrinsify even if this traps.
4536 if (!too_many_traps(Deoptimization::Reason_class_check)) {
4537 Node* not_subtype_ctrl = gen_subtype_check(original, klass_node);
4538
4539 if (not_subtype_ctrl != top()) {
4540 PreserveJVMState pjvms(this);
4541 set_control(not_subtype_ctrl);
4542 uncommon_trap(Deoptimization::Reason_class_check,
4543 Deoptimization::Action_make_not_entrant);
4544 assert(stopped(), "Should be stopped");
4545 }
4546 validated = true;
4547 }
4548
4549 if (!stopped()) {
4550 newcopy = new_array(klass_node, length, 0); // no arguments to push
4551
4552 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true, true,
4553 load_object_klass(original), klass_node);
4554 if (!is_copyOfRange) {
4555 ac->set_copyof(validated);
4556 } else {
4557 ac->set_copyofrange(validated);
4558 }
4559 Node* n = _gvn.transform(ac);
4560 if (n == ac) {
4561 ac->connect_outputs(this);
4562 } else {
4563 assert(validated, "shouldn't transform if all arguments not validated");
4564 set_all_memory(n);
4565 }
4566 }
4567 }
4568 } // original reexecute is set back here
4569
4570 C->set_has_split_ifs(true); // Has chance for split-if optimization
4571 if (!stopped()) {
4572 set_result(newcopy);
4573 }
4574 return true;
4575 }
4576
4577
4578 //----------------------generate_virtual_guard---------------------------
4579 // Helper for hashCode and clone. Peeks inside the vtable to avoid a call.
4580 Node* LibraryCallKit::generate_virtual_guard(Node* obj_klass,
4581 RegionNode* slow_region) {
4582 ciMethod* method = callee();
4583 int vtable_index = method->vtable_index();
4584 assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
4585 "bad index %d", vtable_index);
4586 // Get the Method* out of the appropriate vtable entry.
4587 int entry_offset = in_bytes(Klass::vtable_start_offset()) +
4588 vtable_index*vtableEntry::size_in_bytes() +
4589 in_bytes(vtableEntry::method_offset());
4590 Node* entry_addr = off_heap_plus_addr(obj_klass, entry_offset);
4591 Node* target_call = make_load(nullptr, entry_addr, TypePtr::NOTNULL, T_ADDRESS, MemNode::unordered);
4592
4593 // Compare the target method with the expected method (e.g., Object.hashCode).
4594 const TypePtr* native_call_addr = TypeMetadataPtr::make(method);
4595
4596 Node* native_call = makecon(native_call_addr);
4597 Node* chk_native = _gvn.transform(new CmpPNode(target_call, native_call));
4598 Node* test_native = _gvn.transform(new BoolNode(chk_native, BoolTest::ne));
4599
4600 return generate_slow_guard(test_native, slow_region);
4601 }
4602
4603 //-----------------------generate_method_call----------------------------
4604 // Use generate_method_call to make a slow-call to the real
4605 // method if the fast path fails. An alternative would be to
4606 // use a stub like OptoRuntime::slow_arraycopy_Java.
4607 // This only works for expanding the current library call,
4608 // not another intrinsic. (E.g., don't use this for making an
4609 // arraycopy call inside of the copyOf intrinsic.)
4610 CallJavaNode*
4611 LibraryCallKit::generate_method_call(vmIntrinsicID method_id, bool is_virtual, bool is_static, bool res_not_null) {
4612 // When compiling the intrinsic method itself, do not use this technique.
4613 guarantee(callee() != C->method(), "cannot make slow-call to self");
4614
4615 ciMethod* method = callee();
4616 // ensure the JVMS we have will be correct for this call
4617 guarantee(method_id == method->intrinsic_id(), "must match");
4618
4619 const TypeFunc* tf = TypeFunc::make(method);
4620 if (res_not_null) {
4621 assert(tf->return_type() == T_OBJECT, "");
4622 const TypeTuple* range = tf->range();
4623 const Type** fields = TypeTuple::fields(range->cnt());
4624 fields[TypeFunc::Parms] = range->field_at(TypeFunc::Parms)->filter_speculative(TypePtr::NOTNULL);
4625 const TypeTuple* new_range = TypeTuple::make(range->cnt(), fields);
4626 tf = TypeFunc::make(tf->domain(), new_range);
4627 }
4628 CallJavaNode* slow_call;
4629 if (is_static) {
4630 assert(!is_virtual, "");
4631 slow_call = new CallStaticJavaNode(C, tf,
4632 SharedRuntime::get_resolve_static_call_stub(), method);
4633 } else if (is_virtual) {
4634 assert(!gvn().type(argument(0))->maybe_null(), "should not be null");
4635 int vtable_index = Method::invalid_vtable_index;
4636 if (UseInlineCaches) {
4637 // Suppress the vtable call
4638 } else {
4639 // hashCode and clone are not a miranda methods,
4640 // so the vtable index is fixed.
4641 // No need to use the linkResolver to get it.
4642 vtable_index = method->vtable_index();
4643 assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
4644 "bad index %d", vtable_index);
4645 }
4646 slow_call = new CallDynamicJavaNode(tf,
4647 SharedRuntime::get_resolve_virtual_call_stub(),
4648 method, vtable_index);
4649 } else { // neither virtual nor static: opt_virtual
4650 assert(!gvn().type(argument(0))->maybe_null(), "should not be null");
4651 slow_call = new CallStaticJavaNode(C, tf,
4652 SharedRuntime::get_resolve_opt_virtual_call_stub(), method);
4653 slow_call->set_optimized_virtual(true);
4654 }
4655 if (CallGenerator::is_inlined_method_handle_intrinsic(this->method(), bci(), callee())) {
4656 // To be able to issue a direct call (optimized virtual or virtual)
4657 // and skip a call to MH.linkTo*/invokeBasic adapter, additional information
4658 // about the method being invoked should be attached to the call site to
4659 // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
4660 slow_call->set_override_symbolic_info(true);
4661 }
4662 set_arguments_for_java_call(slow_call);
4663 set_edges_for_java_call(slow_call);
4664 return slow_call;
4665 }
4666
4667
4668 /**
4669 * Build special case code for calls to hashCode on an object. This call may
4670 * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
4671 * slightly different code.
4672 */
4673 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
4674 assert(is_static == callee()->is_static(), "correct intrinsic selection");
4675 assert(!(is_virtual && is_static), "either virtual, special, or static");
4676
4677 enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
4678
4679 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4680 PhiNode* result_val = new PhiNode(result_reg, TypeInt::INT);
4681 PhiNode* result_io = new PhiNode(result_reg, Type::ABIO);
4682 PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4683 Node* obj = nullptr;
4684 if (!is_static) {
4685 // Check for hashing null object
4686 obj = null_check_receiver();
4687 if (stopped()) return true; // unconditionally null
4688 result_reg->init_req(_null_path, top());
4689 result_val->init_req(_null_path, top());
4690 } else {
4691 // Do a null check, and return zero if null.
4692 // System.identityHashCode(null) == 0
4693 obj = argument(0);
4694 Node* null_ctl = top();
4695 obj = null_check_oop(obj, &null_ctl);
4696 result_reg->init_req(_null_path, null_ctl);
4697 result_val->init_req(_null_path, _gvn.intcon(0));
4698 }
4699
4700 // Unconditionally null? Then return right away.
4701 if (stopped()) {
4702 set_control( result_reg->in(_null_path));
4703 if (!stopped())
4704 set_result(result_val->in(_null_path));
4705 return true;
4706 }
4707
4708 // We only go to the fast case code if we pass a number of guards. The
4709 // paths which do not pass are accumulated in the slow_region.
4710 RegionNode* slow_region = new RegionNode(1);
4711 record_for_igvn(slow_region);
4712
4713 // If this is a virtual call, we generate a funny guard. We pull out
4714 // the vtable entry corresponding to hashCode() from the target object.
4715 // If the target method which we are calling happens to be the native
4716 // Object hashCode() method, we pass the guard. We do not need this
4717 // guard for non-virtual calls -- the caller is known to be the native
4718 // Object hashCode().
4719 if (is_virtual) {
4720 // After null check, get the object's klass.
4721 Node* obj_klass = load_object_klass(obj);
4722 generate_virtual_guard(obj_klass, slow_region);
4723 }
4724
4725 // Get the header out of the object, use LoadMarkNode when available
4726 Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
4727 // The control of the load must be null. Otherwise, the load can move before
4728 // the null check after castPP removal.
4729 Node* no_ctrl = nullptr;
4730 Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
4731
4732 if (!UseObjectMonitorTable) {
4733 // Test the header to see if it is safe to read w.r.t. locking.
4734 Node *lock_mask = _gvn.MakeConX(markWord::lock_mask_in_place);
4735 Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
4736 Node *monitor_val = _gvn.MakeConX(markWord::monitor_value);
4737 Node *chk_monitor = _gvn.transform(new CmpXNode(lmasked_header, monitor_val));
4738 Node *test_monitor = _gvn.transform(new BoolNode(chk_monitor, BoolTest::eq));
4739
4740 generate_slow_guard(test_monitor, slow_region);
4741 }
4742
4743 // Get the hash value and check to see that it has been properly assigned.
4744 // We depend on hash_mask being at most 32 bits and avoid the use of
4745 // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
4746 // vm: see markWord.hpp.
4747 Node *hash_mask = _gvn.intcon(markWord::hash_mask);
4748 Node *hash_shift = _gvn.intcon(markWord::hash_shift);
4749 Node *hshifted_header= _gvn.transform(new URShiftXNode(header, hash_shift));
4750 // This hack lets the hash bits live anywhere in the mark object now, as long
4751 // as the shift drops the relevant bits into the low 32 bits. Note that
4752 // Java spec says that HashCode is an int so there's no point in capturing
4753 // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build).
4754 hshifted_header = ConvX2I(hshifted_header);
4755 Node *hash_val = _gvn.transform(new AndINode(hshifted_header, hash_mask));
4756
4757 Node *no_hash_val = _gvn.intcon(markWord::no_hash);
4758 Node *chk_assigned = _gvn.transform(new CmpINode( hash_val, no_hash_val));
4759 Node *test_assigned = _gvn.transform(new BoolNode( chk_assigned, BoolTest::eq));
4760
4761 generate_slow_guard(test_assigned, slow_region);
4762
4763 Node* init_mem = reset_memory();
4764 // fill in the rest of the null path:
4765 result_io ->init_req(_null_path, i_o());
4766 result_mem->init_req(_null_path, init_mem);
4767
4768 result_val->init_req(_fast_path, hash_val);
4769 result_reg->init_req(_fast_path, control());
4770 result_io ->init_req(_fast_path, i_o());
4771 result_mem->init_req(_fast_path, init_mem);
4772
4773 // Generate code for the slow case. We make a call to hashCode().
4774 set_control(_gvn.transform(slow_region));
4775 if (!stopped()) {
4776 // No need for PreserveJVMState, because we're using up the present state.
4777 set_all_memory(init_mem);
4778 vmIntrinsics::ID hashCode_id = is_static ? vmIntrinsics::_identityHashCode : vmIntrinsics::_hashCode;
4779 CallJavaNode* slow_call = generate_method_call(hashCode_id, is_virtual, is_static, false);
4780 Node* slow_result = set_results_for_java_call(slow_call);
4781 // this->control() comes from set_results_for_java_call
4782 result_reg->init_req(_slow_path, control());
4783 result_val->init_req(_slow_path, slow_result);
4784 result_io ->set_req(_slow_path, i_o());
4785 result_mem ->set_req(_slow_path, reset_memory());
4786 }
4787
4788 // Return the combined state.
4789 set_i_o( _gvn.transform(result_io) );
4790 set_all_memory( _gvn.transform(result_mem));
4791
4792 set_result(result_reg, result_val);
4793 return true;
4794 }
4795
4796 //---------------------------inline_native_getClass----------------------------
4797 // public final native Class<?> java.lang.Object.getClass();
4798 //
4799 // Build special case code for calls to getClass on an object.
4800 bool LibraryCallKit::inline_native_getClass() {
4801 Node* obj = null_check_receiver();
4802 if (stopped()) return true;
4803 set_result(load_mirror_from_klass(load_object_klass(obj)));
4804 return true;
4805 }
4806
4807 //-----------------inline_native_Reflection_getCallerClass---------------------
4808 // public static native Class<?> sun.reflect.Reflection.getCallerClass();
4809 //
4810 // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
4811 //
4812 // NOTE: This code must perform the same logic as JVM_GetCallerClass
4813 // in that it must skip particular security frames and checks for
4814 // caller sensitive methods.
4815 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
4816 #ifndef PRODUCT
4817 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4818 tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
4819 }
4820 #endif
4821
4822 if (!jvms()->has_method()) {
4823 #ifndef PRODUCT
4824 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4825 tty->print_cr(" Bailing out because intrinsic was inlined at top level");
4826 }
4827 #endif
4828 return false;
4829 }
4830
4831 // Walk back up the JVM state to find the caller at the required
4832 // depth.
4833 JVMState* caller_jvms = jvms();
4834
4835 // Cf. JVM_GetCallerClass
4836 // NOTE: Start the loop at depth 1 because the current JVM state does
4837 // not include the Reflection.getCallerClass() frame.
4838 for (int n = 1; caller_jvms != nullptr; caller_jvms = caller_jvms->caller(), n++) {
4839 ciMethod* m = caller_jvms->method();
4840 switch (n) {
4841 case 0:
4842 fatal("current JVM state does not include the Reflection.getCallerClass frame");
4843 break;
4844 case 1:
4845 // Frame 0 and 1 must be caller sensitive (see JVM_GetCallerClass).
4846 if (!m->caller_sensitive()) {
4847 #ifndef PRODUCT
4848 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4849 tty->print_cr(" Bailing out: CallerSensitive annotation expected at frame %d", n);
4850 }
4851 #endif
4852 return false; // bail-out; let JVM_GetCallerClass do the work
4853 }
4854 break;
4855 default:
4856 if (!m->is_ignored_by_security_stack_walk()) {
4857 // We have reached the desired frame; return the holder class.
4858 // Acquire method holder as java.lang.Class and push as constant.
4859 ciInstanceKlass* caller_klass = caller_jvms->method()->holder();
4860 ciInstance* caller_mirror = caller_klass->java_mirror();
4861 set_result(makecon(TypeInstPtr::make(caller_mirror)));
4862
4863 #ifndef PRODUCT
4864 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4865 tty->print_cr(" Succeeded: caller = %d) %s.%s, JVMS depth = %d", n, caller_klass->name()->as_utf8(), caller_jvms->method()->name()->as_utf8(), jvms()->depth());
4866 tty->print_cr(" JVM state at this point:");
4867 for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
4868 ciMethod* m = jvms()->of_depth(i)->method();
4869 tty->print_cr(" %d) %s.%s", n, m->holder()->name()->as_utf8(), m->name()->as_utf8());
4870 }
4871 }
4872 #endif
4873 return true;
4874 }
4875 break;
4876 }
4877 }
4878
4879 #ifndef PRODUCT
4880 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4881 tty->print_cr(" Bailing out because caller depth exceeded inlining depth = %d", jvms()->depth());
4882 tty->print_cr(" JVM state at this point:");
4883 for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
4884 ciMethod* m = jvms()->of_depth(i)->method();
4885 tty->print_cr(" %d) %s.%s", n, m->holder()->name()->as_utf8(), m->name()->as_utf8());
4886 }
4887 }
4888 #endif
4889
4890 return false; // bail-out; let JVM_GetCallerClass do the work
4891 }
4892
4893 bool LibraryCallKit::inline_fp_conversions(vmIntrinsics::ID id) {
4894 Node* arg = argument(0);
4895 Node* result = nullptr;
4896
4897 switch (id) {
4898 case vmIntrinsics::_floatToRawIntBits: result = new MoveF2INode(arg); break;
4899 case vmIntrinsics::_intBitsToFloat: result = new MoveI2FNode(arg); break;
4900 case vmIntrinsics::_doubleToRawLongBits: result = new MoveD2LNode(arg); break;
4901 case vmIntrinsics::_longBitsToDouble: result = new MoveL2DNode(arg); break;
4902 case vmIntrinsics::_floatToFloat16: result = new ConvF2HFNode(arg); break;
4903 case vmIntrinsics::_float16ToFloat: result = new ConvHF2FNode(arg); break;
4904
4905 case vmIntrinsics::_doubleToLongBits: {
4906 // two paths (plus control) merge in a wood
4907 RegionNode *r = new RegionNode(3);
4908 Node *phi = new PhiNode(r, TypeLong::LONG);
4909
4910 Node *cmpisnan = _gvn.transform(new CmpDNode(arg, arg));
4911 // Build the boolean node
4912 Node *bolisnan = _gvn.transform(new BoolNode(cmpisnan, BoolTest::ne));
4913
4914 // Branch either way.
4915 // NaN case is less traveled, which makes all the difference.
4916 IfNode *ifisnan = create_and_xform_if(control(), bolisnan, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
4917 Node *opt_isnan = _gvn.transform(ifisnan);
4918 assert( opt_isnan->is_If(), "Expect an IfNode");
4919 IfNode *opt_ifisnan = (IfNode*)opt_isnan;
4920 Node *iftrue = _gvn.transform(new IfTrueNode(opt_ifisnan));
4921
4922 set_control(iftrue);
4923
4924 static const jlong nan_bits = CONST64(0x7ff8000000000000);
4925 Node *slow_result = longcon(nan_bits); // return NaN
4926 phi->init_req(1, _gvn.transform( slow_result ));
4927 r->init_req(1, iftrue);
4928
4929 // Else fall through
4930 Node *iffalse = _gvn.transform(new IfFalseNode(opt_ifisnan));
4931 set_control(iffalse);
4932
4933 phi->init_req(2, _gvn.transform(new MoveD2LNode(arg)));
4934 r->init_req(2, iffalse);
4935
4936 // Post merge
4937 set_control(_gvn.transform(r));
4938 record_for_igvn(r);
4939
4940 C->set_has_split_ifs(true); // Has chance for split-if optimization
4941 result = phi;
4942 assert(result->bottom_type()->isa_long(), "must be");
4943 break;
4944 }
4945
4946 case vmIntrinsics::_floatToIntBits: {
4947 // two paths (plus control) merge in a wood
4948 RegionNode *r = new RegionNode(3);
4949 Node *phi = new PhiNode(r, TypeInt::INT);
4950
4951 Node *cmpisnan = _gvn.transform(new CmpFNode(arg, arg));
4952 // Build the boolean node
4953 Node *bolisnan = _gvn.transform(new BoolNode(cmpisnan, BoolTest::ne));
4954
4955 // Branch either way.
4956 // NaN case is less traveled, which makes all the difference.
4957 IfNode *ifisnan = create_and_xform_if(control(), bolisnan, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
4958 Node *opt_isnan = _gvn.transform(ifisnan);
4959 assert( opt_isnan->is_If(), "Expect an IfNode");
4960 IfNode *opt_ifisnan = (IfNode*)opt_isnan;
4961 Node *iftrue = _gvn.transform(new IfTrueNode(opt_ifisnan));
4962
4963 set_control(iftrue);
4964
4965 static const jint nan_bits = 0x7fc00000;
4966 Node *slow_result = makecon(TypeInt::make(nan_bits)); // return NaN
4967 phi->init_req(1, _gvn.transform( slow_result ));
4968 r->init_req(1, iftrue);
4969
4970 // Else fall through
4971 Node *iffalse = _gvn.transform(new IfFalseNode(opt_ifisnan));
4972 set_control(iffalse);
4973
4974 phi->init_req(2, _gvn.transform(new MoveF2INode(arg)));
4975 r->init_req(2, iffalse);
4976
4977 // Post merge
4978 set_control(_gvn.transform(r));
4979 record_for_igvn(r);
4980
4981 C->set_has_split_ifs(true); // Has chance for split-if optimization
4982 result = phi;
4983 assert(result->bottom_type()->isa_int(), "must be");
4984 break;
4985 }
4986
4987 default:
4988 fatal_unexpected_iid(id);
4989 break;
4990 }
4991 set_result(_gvn.transform(result));
4992 return true;
4993 }
4994
4995 bool LibraryCallKit::inline_fp_range_check(vmIntrinsics::ID id) {
4996 Node* arg = argument(0);
4997 Node* result = nullptr;
4998
4999 switch (id) {
5000 case vmIntrinsics::_floatIsInfinite:
5001 result = new IsInfiniteFNode(arg);
5002 break;
5003 case vmIntrinsics::_floatIsFinite:
5004 result = new IsFiniteFNode(arg);
5005 break;
5006 case vmIntrinsics::_doubleIsInfinite:
5007 result = new IsInfiniteDNode(arg);
5008 break;
5009 case vmIntrinsics::_doubleIsFinite:
5010 result = new IsFiniteDNode(arg);
5011 break;
5012 default:
5013 fatal_unexpected_iid(id);
5014 break;
5015 }
5016 set_result(_gvn.transform(result));
5017 return true;
5018 }
5019
5020 //----------------------inline_unsafe_copyMemory-------------------------
5021 // public native void Unsafe.copyMemory0(Object srcBase, long srcOffset, Object destBase, long destOffset, long bytes);
5022
5023 static bool has_wide_mem(PhaseGVN& gvn, Node* addr, Node* base) {
5024 const TypeAryPtr* addr_t = gvn.type(addr)->isa_aryptr();
5025 const Type* base_t = gvn.type(base);
5026
5027 bool in_native = (base_t == TypePtr::NULL_PTR);
5028 bool in_heap = !TypePtr::NULL_PTR->higher_equal(base_t);
5029 bool is_mixed = !in_heap && !in_native;
5030
5031 if (is_mixed) {
5032 return true; // mixed accesses can touch both on-heap and off-heap memory
5033 }
5034 if (in_heap) {
5035 bool is_prim_array = (addr_t != nullptr) && (addr_t->elem() != Type::BOTTOM);
5036 if (!is_prim_array) {
5037 // Though Unsafe.copyMemory() ensures at runtime for on-heap accesses that base is a primitive array,
5038 // there's not enough type information available to determine proper memory slice for it.
5039 return true;
5040 }
5041 }
5042 return false;
5043 }
5044
5045 bool LibraryCallKit::inline_unsafe_copyMemory() {
5046 if (callee()->is_static()) return false; // caller must have the capability!
5047 null_check_receiver(); // null-check receiver
5048 if (stopped()) return true;
5049
5050 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
5051
5052 Node* src_base = argument(1); // type: oop
5053 Node* src_off = ConvL2X(argument(2)); // type: long
5054 Node* dst_base = argument(4); // type: oop
5055 Node* dst_off = ConvL2X(argument(5)); // type: long
5056 Node* size = ConvL2X(argument(7)); // type: long
5057
5058 assert(Unsafe_field_offset_to_byte_offset(11) == 11,
5059 "fieldOffset must be byte-scaled");
5060
5061 Node* src_addr = make_unsafe_address(src_base, src_off);
5062 Node* dst_addr = make_unsafe_address(dst_base, dst_off);
5063
5064 Node* thread = _gvn.transform(new ThreadLocalNode());
5065 Node* doing_unsafe_access_addr = off_heap_plus_addr(thread, in_bytes(JavaThread::doing_unsafe_access_offset()));
5066 BasicType doing_unsafe_access_bt = T_BYTE;
5067 assert((sizeof(bool) * CHAR_BIT) == 8, "not implemented");
5068
5069 // update volatile field
5070 store_to_memory(control(), doing_unsafe_access_addr, intcon(1), doing_unsafe_access_bt, MemNode::unordered);
5071
5072 int flags = RC_LEAF | RC_NO_FP;
5073
5074 const TypePtr* dst_type = TypePtr::BOTTOM;
5075
5076 // Adjust memory effects of the runtime call based on input values.
5077 if (!has_wide_mem(_gvn, src_addr, src_base) &&
5078 !has_wide_mem(_gvn, dst_addr, dst_base)) {
5079 dst_type = _gvn.type(dst_addr)->is_ptr(); // narrow out memory
5080
5081 const TypePtr* src_type = _gvn.type(src_addr)->is_ptr();
5082 if (C->get_alias_index(src_type) == C->get_alias_index(dst_type)) {
5083 flags |= RC_NARROW_MEM; // narrow in memory
5084 }
5085 }
5086
5087 // Call it. Note that the length argument is not scaled.
5088 make_runtime_call(flags,
5089 OptoRuntime::fast_arraycopy_Type(),
5090 StubRoutines::unsafe_arraycopy(),
5091 "unsafe_arraycopy",
5092 dst_type,
5093 src_addr, dst_addr, size XTOP);
5094
5095 store_to_memory(control(), doing_unsafe_access_addr, intcon(0), doing_unsafe_access_bt, MemNode::unordered);
5096
5097 return true;
5098 }
5099
5100 // unsafe_setmemory(void *base, ulong offset, size_t length, char fill_value);
5101 // Fill 'length' bytes starting from 'base[offset]' with 'fill_value'
5102 bool LibraryCallKit::inline_unsafe_setMemory() {
5103 if (callee()->is_static()) return false; // caller must have the capability!
5104 null_check_receiver(); // null-check receiver
5105 if (stopped()) return true;
5106
5107 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
5108
5109 Node* dst_base = argument(1); // type: oop
5110 Node* dst_off = ConvL2X(argument(2)); // type: long
5111 Node* size = ConvL2X(argument(4)); // type: long
5112 Node* byte = argument(6); // type: byte
5113
5114 assert(Unsafe_field_offset_to_byte_offset(11) == 11,
5115 "fieldOffset must be byte-scaled");
5116
5117 Node* dst_addr = make_unsafe_address(dst_base, dst_off);
5118
5119 Node* thread = _gvn.transform(new ThreadLocalNode());
5120 Node* doing_unsafe_access_addr = off_heap_plus_addr(thread, in_bytes(JavaThread::doing_unsafe_access_offset()));
5121 BasicType doing_unsafe_access_bt = T_BYTE;
5122 assert((sizeof(bool) * CHAR_BIT) == 8, "not implemented");
5123
5124 // update volatile field
5125 store_to_memory(control(), doing_unsafe_access_addr, intcon(1), doing_unsafe_access_bt, MemNode::unordered);
5126
5127 int flags = RC_LEAF | RC_NO_FP;
5128
5129 const TypePtr* dst_type = TypePtr::BOTTOM;
5130
5131 // Adjust memory effects of the runtime call based on input values.
5132 if (!has_wide_mem(_gvn, dst_addr, dst_base)) {
5133 dst_type = _gvn.type(dst_addr)->is_ptr(); // narrow out memory
5134
5135 flags |= RC_NARROW_MEM; // narrow in memory
5136 }
5137
5138 // Call it. Note that the length argument is not scaled.
5139 make_runtime_call(flags,
5140 OptoRuntime::unsafe_setmemory_Type(),
5141 StubRoutines::unsafe_setmemory(),
5142 "unsafe_setmemory",
5143 dst_type,
5144 dst_addr, size XTOP, byte);
5145
5146 store_to_memory(control(), doing_unsafe_access_addr, intcon(0), doing_unsafe_access_bt, MemNode::unordered);
5147
5148 return true;
5149 }
5150
5151 #undef XTOP
5152
5153 //------------------------clone_coping-----------------------------------
5154 // Helper function for inline_native_clone.
5155 void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array) {
5156 assert(obj_size != nullptr, "");
5157 Node* raw_obj = alloc_obj->in(1);
5158 assert(alloc_obj->is_CheckCastPP() && raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
5159
5160 AllocateNode* alloc = nullptr;
5161 if (ReduceBulkZeroing &&
5162 // If we are implementing an array clone without knowing its source type
5163 // (can happen when compiling the array-guarded branch of a reflective
5164 // Object.clone() invocation), initialize the array within the allocation.
5165 // This is needed because some GCs (e.g. ZGC) might fall back in this case
5166 // to a runtime clone call that assumes fully initialized source arrays.
5167 (!is_array || obj->get_ptr_type()->isa_aryptr() != nullptr)) {
5168 // We will be completely responsible for initializing this object -
5169 // mark Initialize node as complete.
5170 alloc = AllocateNode::Ideal_allocation(alloc_obj);
5171 // The object was just allocated - there should be no any stores!
5172 guarantee(alloc != nullptr && alloc->maybe_set_complete(&_gvn), "");
5173 // Mark as complete_with_arraycopy so that on AllocateNode
5174 // expansion, we know this AllocateNode is initialized by an array
5175 // copy and a StoreStore barrier exists after the array copy.
5176 alloc->initialization()->set_complete_with_arraycopy();
5177 }
5178
5179 Node* size = _gvn.transform(obj_size);
5180 access_clone(obj, alloc_obj, size, is_array);
5181
5182 // Do not let reads from the cloned object float above the arraycopy.
5183 if (alloc != nullptr) {
5184 // Do not let stores that initialize this object be reordered with
5185 // a subsequent store that would make this object accessible by
5186 // other threads.
5187 // Record what AllocateNode this StoreStore protects so that
5188 // escape analysis can go from the MemBarStoreStoreNode to the
5189 // AllocateNode and eliminate the MemBarStoreStoreNode if possible
5190 // based on the escape status of the AllocateNode.
5191 insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
5192 } else {
5193 insert_mem_bar(Op_MemBarCPUOrder);
5194 }
5195 }
5196
5197 //------------------------inline_native_clone----------------------------
5198 // protected native Object java.lang.Object.clone();
5199 //
5200 // Here are the simple edge cases:
5201 // null receiver => normal trap
5202 // virtual and clone was overridden => slow path to out-of-line clone
5203 // not cloneable or finalizer => slow path to out-of-line Object.clone
5204 //
5205 // The general case has two steps, allocation and copying.
5206 // Allocation has two cases, and uses GraphKit::new_instance or new_array.
5207 //
5208 // Copying also has two cases, oop arrays and everything else.
5209 // Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
5210 // Everything else uses the tight inline loop supplied by CopyArrayNode.
5211 //
5212 // These steps fold up nicely if and when the cloned object's klass
5213 // can be sharply typed as an object array, a type array, or an instance.
5214 //
5215 bool LibraryCallKit::inline_native_clone(bool is_virtual) {
5216 PhiNode* result_val;
5217
5218 // Set the reexecute bit for the interpreter to reexecute
5219 // the bytecode that invokes Object.clone if deoptimization happens.
5220 { PreserveReexecuteState preexecs(this);
5221 jvms()->set_should_reexecute(true);
5222
5223 Node* obj = null_check_receiver();
5224 if (stopped()) return true;
5225
5226 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
5227
5228 // If we are going to clone an instance, we need its exact type to
5229 // know the number and types of fields to convert the clone to
5230 // loads/stores. Maybe a speculative type can help us.
5231 if (!obj_type->klass_is_exact() &&
5232 obj_type->speculative_type() != nullptr &&
5233 obj_type->speculative_type()->is_instance_klass()) {
5234 ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass();
5235 if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem &&
5236 !spec_ik->has_injected_fields()) {
5237 if (!obj_type->isa_instptr() ||
5238 obj_type->is_instptr()->instance_klass()->has_subklass()) {
5239 obj = maybe_cast_profiled_obj(obj, obj_type->speculative_type(), false);
5240 }
5241 }
5242 }
5243
5244 // Conservatively insert a memory barrier on all memory slices.
5245 // Do not let writes into the original float below the clone.
5246 insert_mem_bar(Op_MemBarCPUOrder);
5247
5248 // paths into result_reg:
5249 enum {
5250 _slow_path = 1, // out-of-line call to clone method (virtual or not)
5251 _objArray_path, // plain array allocation, plus arrayof_oop_arraycopy
5252 _array_path, // plain array allocation, plus arrayof_long_arraycopy
5253 _instance_path, // plain instance allocation, plus arrayof_long_arraycopy
5254 PATH_LIMIT
5255 };
5256 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
5257 result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
5258 PhiNode* result_i_o = new PhiNode(result_reg, Type::ABIO);
5259 PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
5260 record_for_igvn(result_reg);
5261
5262 Node* obj_klass = load_object_klass(obj);
5263 Node* array_obj = obj;
5264 Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)nullptr, &array_obj);
5265 if (array_ctl != nullptr) {
5266 // It's an array.
5267 PreserveJVMState pjvms(this);
5268 set_control(array_ctl);
5269 Node* obj_length = load_array_length(array_obj);
5270 Node* array_size = nullptr; // Size of the array without object alignment padding.
5271 Node* alloc_obj = new_array(obj_klass, obj_length, 0, &array_size, /*deoptimize_on_exception=*/true);
5272
5273 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
5274 if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Parsing)) {
5275 // If it is an oop array, it requires very special treatment,
5276 // because gc barriers are required when accessing the array.
5277 Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)nullptr);
5278 if (is_obja != nullptr) {
5279 PreserveJVMState pjvms2(this);
5280 set_control(is_obja);
5281 // Generate a direct call to the right arraycopy function(s).
5282 // Clones are always tightly coupled.
5283 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, array_obj, intcon(0), alloc_obj, intcon(0), obj_length, true, false);
5284 ac->set_clone_oop_array();
5285 Node* n = _gvn.transform(ac);
5286 assert(n == ac, "cannot disappear");
5287 ac->connect_outputs(this, /*deoptimize_on_exception=*/true);
5288
5289 result_reg->init_req(_objArray_path, control());
5290 result_val->init_req(_objArray_path, alloc_obj);
5291 result_i_o ->set_req(_objArray_path, i_o());
5292 result_mem ->set_req(_objArray_path, reset_memory());
5293 }
5294 }
5295 // Otherwise, there are no barriers to worry about.
5296 // (We can dispense with card marks if we know the allocation
5297 // comes out of eden (TLAB)... In fact, ReduceInitialCardMarks
5298 // causes the non-eden paths to take compensating steps to
5299 // simulate a fresh allocation, so that no further
5300 // card marks are required in compiled code to initialize
5301 // the object.)
5302
5303 if (!stopped()) {
5304 copy_to_clone(array_obj, alloc_obj, array_size, true);
5305
5306 // Present the results of the copy.
5307 result_reg->init_req(_array_path, control());
5308 result_val->init_req(_array_path, alloc_obj);
5309 result_i_o ->set_req(_array_path, i_o());
5310 result_mem ->set_req(_array_path, reset_memory());
5311 }
5312 }
5313
5314 // We only go to the instance fast case code if we pass a number of guards.
5315 // The paths which do not pass are accumulated in the slow_region.
5316 RegionNode* slow_region = new RegionNode(1);
5317 record_for_igvn(slow_region);
5318 if (!stopped()) {
5319 // It's an instance (we did array above). Make the slow-path tests.
5320 // If this is a virtual call, we generate a funny guard. We grab
5321 // the vtable entry corresponding to clone() from the target object.
5322 // If the target method which we are calling happens to be the
5323 // Object clone() method, we pass the guard. We do not need this
5324 // guard for non-virtual calls; the caller is known to be the native
5325 // Object clone().
5326 if (is_virtual) {
5327 generate_virtual_guard(obj_klass, slow_region);
5328 }
5329
5330 // The object must be easily cloneable and must not have a finalizer.
5331 // Both of these conditions may be checked in a single test.
5332 // We could optimize the test further, but we don't care.
5333 generate_misc_flags_guard(obj_klass,
5334 // Test both conditions:
5335 KlassFlags::_misc_is_cloneable_fast | KlassFlags::_misc_has_finalizer,
5336 // Must be cloneable but not finalizer:
5337 KlassFlags::_misc_is_cloneable_fast,
5338 slow_region);
5339 }
5340
5341 if (!stopped()) {
5342 // It's an instance, and it passed the slow-path tests.
5343 PreserveJVMState pjvms(this);
5344 Node* obj_size = nullptr; // Total object size, including object alignment padding.
5345 // Need to deoptimize on exception from allocation since Object.clone intrinsic
5346 // is reexecuted if deoptimization occurs and there could be problems when merging
5347 // exception state between multiple Object.clone versions (reexecute=true vs reexecute=false).
5348 Node* alloc_obj = new_instance(obj_klass, nullptr, &obj_size, /*deoptimize_on_exception=*/true);
5349
5350 copy_to_clone(obj, alloc_obj, obj_size, false);
5351
5352 // Present the results of the slow call.
5353 result_reg->init_req(_instance_path, control());
5354 result_val->init_req(_instance_path, alloc_obj);
5355 result_i_o ->set_req(_instance_path, i_o());
5356 result_mem ->set_req(_instance_path, reset_memory());
5357 }
5358
5359 // Generate code for the slow case. We make a call to clone().
5360 set_control(_gvn.transform(slow_region));
5361 if (!stopped()) {
5362 PreserveJVMState pjvms(this);
5363 CallJavaNode* slow_call = generate_method_call(vmIntrinsics::_clone, is_virtual, false, true);
5364 // We need to deoptimize on exception (see comment above)
5365 Node* slow_result = set_results_for_java_call(slow_call, false, /* deoptimize */ true);
5366 // this->control() comes from set_results_for_java_call
5367 result_reg->init_req(_slow_path, control());
5368 result_val->init_req(_slow_path, slow_result);
5369 result_i_o ->set_req(_slow_path, i_o());
5370 result_mem ->set_req(_slow_path, reset_memory());
5371 }
5372
5373 // Return the combined state.
5374 set_control( _gvn.transform(result_reg));
5375 set_i_o( _gvn.transform(result_i_o));
5376 set_all_memory( _gvn.transform(result_mem));
5377 } // original reexecute is set back here
5378
5379 set_result(_gvn.transform(result_val));
5380 return true;
5381 }
5382
5383 // If we have a tightly coupled allocation, the arraycopy may take care
5384 // of the array initialization. If one of the guards we insert between
5385 // the allocation and the arraycopy causes a deoptimization, an
5386 // uninitialized array will escape the compiled method. To prevent that
5387 // we set the JVM state for uncommon traps between the allocation and
5388 // the arraycopy to the state before the allocation so, in case of
5389 // deoptimization, we'll reexecute the allocation and the
5390 // initialization.
5391 JVMState* LibraryCallKit::arraycopy_restore_alloc_state(AllocateArrayNode* alloc, int& saved_reexecute_sp) {
5392 if (alloc != nullptr) {
5393 ciMethod* trap_method = alloc->jvms()->method();
5394 int trap_bci = alloc->jvms()->bci();
5395
5396 if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
5397 !C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_null_check)) {
5398 // Make sure there's no store between the allocation and the
5399 // arraycopy otherwise visible side effects could be rexecuted
5400 // in case of deoptimization and cause incorrect execution.
5401 bool no_interfering_store = true;
5402 Node* mem = alloc->in(TypeFunc::Memory);
5403 if (mem->is_MergeMem()) {
5404 for (MergeMemStream mms(merged_memory(), mem->as_MergeMem()); mms.next_non_empty2(); ) {
5405 Node* n = mms.memory();
5406 if (n != mms.memory2() && !(n->is_Proj() && n->in(0) == alloc->initialization())) {
5407 assert(n->is_Store(), "what else?");
5408 no_interfering_store = false;
5409 break;
5410 }
5411 }
5412 } else {
5413 for (MergeMemStream mms(merged_memory()); mms.next_non_empty(); ) {
5414 Node* n = mms.memory();
5415 if (n != mem && !(n->is_Proj() && n->in(0) == alloc->initialization())) {
5416 assert(n->is_Store(), "what else?");
5417 no_interfering_store = false;
5418 break;
5419 }
5420 }
5421 }
5422
5423 if (no_interfering_store) {
5424 SafePointNode* sfpt = create_safepoint_with_state_before_array_allocation(alloc);
5425
5426 JVMState* saved_jvms = jvms();
5427 saved_reexecute_sp = _reexecute_sp;
5428
5429 set_jvms(sfpt->jvms());
5430 _reexecute_sp = jvms()->sp();
5431
5432 return saved_jvms;
5433 }
5434 }
5435 }
5436 return nullptr;
5437 }
5438
5439 // Clone the JVMState of the array allocation and create a new safepoint with it. Re-push the array length to the stack
5440 // such that uncommon traps can be emitted to re-execute the array allocation in the interpreter.
5441 SafePointNode* LibraryCallKit::create_safepoint_with_state_before_array_allocation(const AllocateArrayNode* alloc) const {
5442 JVMState* old_jvms = alloc->jvms()->clone_shallow(C);
5443 uint size = alloc->req();
5444 SafePointNode* sfpt = new SafePointNode(size, old_jvms);
5445 old_jvms->set_map(sfpt);
5446 for (uint i = 0; i < size; i++) {
5447 sfpt->init_req(i, alloc->in(i));
5448 }
5449 // re-push array length for deoptimization
5450 sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp(), alloc->in(AllocateNode::ALength));
5451 old_jvms->set_sp(old_jvms->sp()+1);
5452 old_jvms->set_monoff(old_jvms->monoff()+1);
5453 old_jvms->set_scloff(old_jvms->scloff()+1);
5454 old_jvms->set_endoff(old_jvms->endoff()+1);
5455 old_jvms->set_should_reexecute(true);
5456
5457 sfpt->set_i_o(map()->i_o());
5458 sfpt->set_memory(map()->memory());
5459 sfpt->set_control(map()->control());
5460 return sfpt;
5461 }
5462
5463 // In case of a deoptimization, we restart execution at the
5464 // allocation, allocating a new array. We would leave an uninitialized
5465 // array in the heap that GCs wouldn't expect. Move the allocation
5466 // after the traps so we don't allocate the array if we
5467 // deoptimize. This is possible because tightly_coupled_allocation()
5468 // guarantees there's no observer of the allocated array at this point
5469 // and the control flow is simple enough.
5470 void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms_before_guards,
5471 int saved_reexecute_sp, uint new_idx) {
5472 if (saved_jvms_before_guards != nullptr && !stopped()) {
5473 replace_unrelated_uncommon_traps_with_alloc_state(alloc, saved_jvms_before_guards);
5474
5475 assert(alloc != nullptr, "only with a tightly coupled allocation");
5476 // restore JVM state to the state at the arraycopy
5477 saved_jvms_before_guards->map()->set_control(map()->control());
5478 assert(saved_jvms_before_guards->map()->memory() == map()->memory(), "memory state changed?");
5479 assert(saved_jvms_before_guards->map()->i_o() == map()->i_o(), "IO state changed?");
5480 // If we've improved the types of some nodes (null check) while
5481 // emitting the guards, propagate them to the current state
5482 map()->replaced_nodes().apply(saved_jvms_before_guards->map(), new_idx);
5483 set_jvms(saved_jvms_before_guards);
5484 _reexecute_sp = saved_reexecute_sp;
5485
5486 // Remove the allocation from above the guards
5487 CallProjections callprojs;
5488 alloc->extract_projections(&callprojs, true);
5489 InitializeNode* init = alloc->initialization();
5490 Node* alloc_mem = alloc->in(TypeFunc::Memory);
5491 C->gvn_replace_by(callprojs.fallthrough_ioproj, alloc->in(TypeFunc::I_O));
5492 init->replace_mem_projs_by(alloc_mem, C);
5493
5494 // The CastIINode created in GraphKit::new_array (in AllocateArrayNode::make_ideal_length) must stay below
5495 // the allocation (i.e. is only valid if the allocation succeeds):
5496 // 1) replace CastIINode with AllocateArrayNode's length here
5497 // 2) Create CastIINode again once allocation has moved (see below) at the end of this method
5498 //
5499 // Multiple identical CastIINodes might exist here. Each GraphKit::load_array_length() call will generate
5500 // new separate CastIINode (arraycopy guard checks or any array length use between array allocation and ararycopy)
5501 Node* init_control = init->proj_out(TypeFunc::Control);
5502 Node* alloc_length = alloc->Ideal_length();
5503 #ifdef ASSERT
5504 Node* prev_cast = nullptr;
5505 #endif
5506 for (uint i = 0; i < init_control->outcnt(); i++) {
5507 Node* init_out = init_control->raw_out(i);
5508 if (init_out->is_CastII() && init_out->in(TypeFunc::Control) == init_control && init_out->in(1) == alloc_length) {
5509 #ifdef ASSERT
5510 if (prev_cast == nullptr) {
5511 prev_cast = init_out;
5512 } else {
5513 if (prev_cast->cmp(*init_out) == false) {
5514 prev_cast->dump();
5515 init_out->dump();
5516 assert(false, "not equal CastIINode");
5517 }
5518 }
5519 #endif
5520 C->gvn_replace_by(init_out, alloc_length);
5521 }
5522 }
5523 C->gvn_replace_by(init->proj_out(TypeFunc::Control), alloc->in(0));
5524
5525 // move the allocation here (after the guards)
5526 _gvn.hash_delete(alloc);
5527 alloc->set_req(TypeFunc::Control, control());
5528 alloc->set_req(TypeFunc::I_O, i_o());
5529 Node *mem = reset_memory();
5530 set_all_memory(mem);
5531 alloc->set_req(TypeFunc::Memory, mem);
5532 set_control(init->proj_out_or_null(TypeFunc::Control));
5533 set_i_o(callprojs.fallthrough_ioproj);
5534
5535 // Update memory as done in GraphKit::set_output_for_allocation()
5536 const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
5537 const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
5538 if (ary_type->isa_aryptr() && length_type != nullptr) {
5539 ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
5540 }
5541 const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot);
5542 int elemidx = C->get_alias_index(telemref);
5543 // Need to properly move every memory projection for the Initialize
5544 #ifdef ASSERT
5545 int mark_idx = C->get_alias_index(ary_type->add_offset(oopDesc::mark_offset_in_bytes()));
5546 int klass_idx = C->get_alias_index(ary_type->add_offset(oopDesc::klass_offset_in_bytes()));
5547 #endif
5548 auto move_proj = [&](ProjNode* proj) {
5549 int alias_idx = C->get_alias_index(proj->adr_type());
5550 assert(alias_idx == Compile::AliasIdxRaw ||
5551 alias_idx == elemidx ||
5552 alias_idx == mark_idx ||
5553 alias_idx == klass_idx, "should be raw memory or array element type");
5554 set_memory(proj, alias_idx);
5555 };
5556 init->for_each_proj(move_proj, TypeFunc::Memory);
5557
5558 Node* allocx = _gvn.transform(alloc);
5559 assert(allocx == alloc, "where has the allocation gone?");
5560 assert(dest->is_CheckCastPP(), "not an allocation result?");
5561
5562 _gvn.hash_delete(dest);
5563 dest->set_req(0, control());
5564 Node* destx = _gvn.transform(dest);
5565 assert(destx == dest, "where has the allocation result gone?");
5566
5567 array_ideal_length(alloc, ary_type, true);
5568 }
5569 }
5570
5571 // Unrelated UCTs between the array allocation and the array copy, which are considered safe by tightly_coupled_allocation(),
5572 // need to be replaced by an UCT with a state before the array allocation (including the array length). This is necessary
5573 // because we could hit one of these UCTs (which are executed before the emitted array copy guards and the actual array
5574 // allocation which is moved down in arraycopy_move_allocation_here()). When later resuming execution in the interpreter,
5575 // we would have wrongly skipped the array allocation. To prevent this, we resume execution at the array allocation in
5576 // the interpreter similar to what we are doing for the newly emitted guards for the array copy.
5577 void LibraryCallKit::replace_unrelated_uncommon_traps_with_alloc_state(AllocateArrayNode* alloc,
5578 JVMState* saved_jvms_before_guards) {
5579 if (saved_jvms_before_guards->map()->control()->is_IfProj()) {
5580 // There is at least one unrelated uncommon trap which needs to be replaced.
5581 SafePointNode* sfpt = create_safepoint_with_state_before_array_allocation(alloc);
5582
5583 JVMState* saved_jvms = jvms();
5584 const int saved_reexecute_sp = _reexecute_sp;
5585 set_jvms(sfpt->jvms());
5586 _reexecute_sp = jvms()->sp();
5587
5588 replace_unrelated_uncommon_traps_with_alloc_state(saved_jvms_before_guards);
5589
5590 // Restore state
5591 set_jvms(saved_jvms);
5592 _reexecute_sp = saved_reexecute_sp;
5593 }
5594 }
5595
5596 // Replace the unrelated uncommon traps with new uncommon trap nodes by reusing the action and reason. The new uncommon
5597 // traps will have the state of the array allocation. Let the old uncommon trap nodes die.
5598 void LibraryCallKit::replace_unrelated_uncommon_traps_with_alloc_state(JVMState* saved_jvms_before_guards) {
5599 Node* if_proj = saved_jvms_before_guards->map()->control(); // Start the search right before the newly emitted guards
5600 while (if_proj->is_IfProj()) {
5601 CallStaticJavaNode* uncommon_trap = get_uncommon_trap_from_success_proj(if_proj);
5602 if (uncommon_trap != nullptr) {
5603 create_new_uncommon_trap(uncommon_trap);
5604 }
5605 assert(if_proj->in(0)->is_If(), "must be If");
5606 if_proj = if_proj->in(0)->in(0);
5607 }
5608 assert(if_proj->is_Proj() && if_proj->in(0)->is_Initialize(),
5609 "must have reached control projection of init node");
5610 }
5611
5612 void LibraryCallKit::create_new_uncommon_trap(CallStaticJavaNode* uncommon_trap_call) {
5613 const int trap_request = uncommon_trap_call->uncommon_trap_request();
5614 assert(trap_request != 0, "no valid UCT trap request");
5615 PreserveJVMState pjvms(this);
5616 set_control(uncommon_trap_call->in(0));
5617 uncommon_trap(Deoptimization::trap_request_reason(trap_request),
5618 Deoptimization::trap_request_action(trap_request));
5619 assert(stopped(), "Should be stopped");
5620 _gvn.hash_delete(uncommon_trap_call);
5621 uncommon_trap_call->set_req(0, top()); // not used anymore, kill it
5622 }
5623
5624 // Common checks for array sorting intrinsics arguments.
5625 // Returns `true` if checks passed.
5626 bool LibraryCallKit::check_array_sort_arguments(Node* elementType, Node* obj, BasicType& bt) {
5627 // check address of the class
5628 if (elementType == nullptr || elementType->is_top()) {
5629 return false; // dead path
5630 }
5631 const TypeInstPtr* elem_klass = gvn().type(elementType)->isa_instptr();
5632 if (elem_klass == nullptr) {
5633 return false; // dead path
5634 }
5635 // java_mirror_type() returns non-null for compile-time Class constants only
5636 ciType* elem_type = elem_klass->java_mirror_type();
5637 if (elem_type == nullptr) {
5638 return false;
5639 }
5640 bt = elem_type->basic_type();
5641 // Disable the intrinsic if the CPU does not support SIMD sort
5642 if (!Matcher::supports_simd_sort(bt)) {
5643 return false;
5644 }
5645 // check address of the array
5646 if (obj == nullptr || obj->is_top()) {
5647 return false; // dead path
5648 }
5649 const TypeAryPtr* obj_t = _gvn.type(obj)->isa_aryptr();
5650 if (obj_t == nullptr || obj_t->elem() == Type::BOTTOM) {
5651 return false; // failed input validation
5652 }
5653 return true;
5654 }
5655
5656 //------------------------------inline_array_partition-----------------------
5657 bool LibraryCallKit::inline_array_partition() {
5658 address stubAddr = StubRoutines::select_array_partition_function();
5659 if (stubAddr == nullptr) {
5660 return false; // Intrinsic's stub is not implemented on this platform
5661 }
5662 assert(callee()->signature()->size() == 9, "arrayPartition has 8 parameters (one long)");
5663
5664 // no receiver because it is a static method
5665 Node* elementType = argument(0);
5666 Node* obj = argument(1);
5667 Node* offset = argument(2); // long
5668 Node* fromIndex = argument(4);
5669 Node* toIndex = argument(5);
5670 Node* indexPivot1 = argument(6);
5671 Node* indexPivot2 = argument(7);
5672 // PartitionOperation: argument(8) is ignored
5673
5674 Node* pivotIndices = nullptr;
5675 BasicType bt = T_ILLEGAL;
5676
5677 if (!check_array_sort_arguments(elementType, obj, bt)) {
5678 return false;
5679 }
5680 null_check(obj);
5681 // If obj is dead, only null-path is taken.
5682 if (stopped()) {
5683 return true;
5684 }
5685 // Set the original stack and the reexecute bit for the interpreter to reexecute
5686 // the bytecode that invokes DualPivotQuicksort.partition() if deoptimization happens.
5687 { PreserveReexecuteState preexecs(this);
5688 jvms()->set_should_reexecute(true);
5689
5690 Node* obj_adr = make_unsafe_address(obj, offset);
5691
5692 // create the pivotIndices array of type int and size = 2
5693 Node* size = intcon(2);
5694 Node* klass_node = makecon(TypeKlassPtr::make(ciTypeArrayKlass::make(T_INT)));
5695 pivotIndices = new_array(klass_node, size, 0); // no arguments to push
5696 AllocateArrayNode* alloc = tightly_coupled_allocation(pivotIndices);
5697 guarantee(alloc != nullptr, "created above");
5698 Node* pivotIndices_adr = basic_plus_adr(pivotIndices, arrayOopDesc::base_offset_in_bytes(T_INT));
5699
5700 // pass the basic type enum to the stub
5701 Node* elemType = intcon(bt);
5702
5703 // Call the stub
5704 const char *stubName = "array_partition_stub";
5705 make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::array_partition_Type(),
5706 stubAddr, stubName, TypePtr::BOTTOM,
5707 obj_adr, elemType, fromIndex, toIndex, pivotIndices_adr,
5708 indexPivot1, indexPivot2);
5709
5710 } // original reexecute is set back here
5711
5712 if (!stopped()) {
5713 set_result(pivotIndices);
5714 }
5715
5716 return true;
5717 }
5718
5719
5720 //------------------------------inline_array_sort-----------------------
5721 bool LibraryCallKit::inline_array_sort() {
5722 address stubAddr = StubRoutines::select_arraysort_function();
5723 if (stubAddr == nullptr) {
5724 return false; // Intrinsic's stub is not implemented on this platform
5725 }
5726 assert(callee()->signature()->size() == 7, "arraySort has 6 parameters (one long)");
5727
5728 // no receiver because it is a static method
5729 Node* elementType = argument(0);
5730 Node* obj = argument(1);
5731 Node* offset = argument(2); // long
5732 Node* fromIndex = argument(4);
5733 Node* toIndex = argument(5);
5734 // SortOperation: argument(6) is ignored
5735
5736 BasicType bt = T_ILLEGAL;
5737
5738 if (!check_array_sort_arguments(elementType, obj, bt)) {
5739 return false;
5740 }
5741 null_check(obj);
5742 // If obj is dead, only null-path is taken.
5743 if (stopped()) {
5744 return true;
5745 }
5746 Node* obj_adr = make_unsafe_address(obj, offset);
5747
5748 // pass the basic type enum to the stub
5749 Node* elemType = intcon(bt);
5750
5751 // Call the stub.
5752 const char *stubName = "arraysort_stub";
5753 make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::array_sort_Type(),
5754 stubAddr, stubName, TypePtr::BOTTOM,
5755 obj_adr, elemType, fromIndex, toIndex);
5756
5757 return true;
5758 }
5759
5760
5761 //------------------------------inline_arraycopy-----------------------
5762 // public static native void java.lang.System.arraycopy(Object src, int srcPos,
5763 // Object dest, int destPos,
5764 // int length);
5765 bool LibraryCallKit::inline_arraycopy() {
5766 // Get the arguments.
5767 Node* src = argument(0); // type: oop
5768 Node* src_offset = argument(1); // type: int
5769 Node* dest = argument(2); // type: oop
5770 Node* dest_offset = argument(3); // type: int
5771 Node* length = argument(4); // type: int
5772
5773 uint new_idx = C->unique();
5774
5775 // Check for allocation before we add nodes that would confuse
5776 // tightly_coupled_allocation()
5777 AllocateArrayNode* alloc = tightly_coupled_allocation(dest);
5778
5779 int saved_reexecute_sp = -1;
5780 JVMState* saved_jvms_before_guards = arraycopy_restore_alloc_state(alloc, saved_reexecute_sp);
5781 // See arraycopy_restore_alloc_state() comment
5782 // if alloc == null we don't have to worry about a tightly coupled allocation so we can emit all needed guards
5783 // if saved_jvms_before_guards is not null (then alloc is not null) then we can handle guards and a tightly coupled allocation
5784 // if saved_jvms_before_guards is null and alloc is not null, we can't emit any guards
5785 bool can_emit_guards = (alloc == nullptr || saved_jvms_before_guards != nullptr);
5786
5787 // The following tests must be performed
5788 // (1) src and dest are arrays.
5789 // (2) src and dest arrays must have elements of the same BasicType
5790 // (3) src and dest must not be null.
5791 // (4) src_offset must not be negative.
5792 // (5) dest_offset must not be negative.
5793 // (6) length must not be negative.
5794 // (7) src_offset + length must not exceed length of src.
5795 // (8) dest_offset + length must not exceed length of dest.
5796 // (9) each element of an oop array must be assignable
5797
5798 // (3) src and dest must not be null.
5799 // always do this here because we need the JVM state for uncommon traps
5800 Node* null_ctl = top();
5801 src = saved_jvms_before_guards != nullptr ? null_check_oop(src, &null_ctl, true, true) : null_check(src, T_ARRAY);
5802 assert(null_ctl->is_top(), "no null control here");
5803 dest = null_check(dest, T_ARRAY);
5804
5805 if (!can_emit_guards) {
5806 // if saved_jvms_before_guards is null and alloc is not null, we don't emit any
5807 // guards but the arraycopy node could still take advantage of a
5808 // tightly allocated allocation. tightly_coupled_allocation() is
5809 // called again to make sure it takes the null check above into
5810 // account: the null check is mandatory and if it caused an
5811 // uncommon trap to be emitted then the allocation can't be
5812 // considered tightly coupled in this context.
5813 alloc = tightly_coupled_allocation(dest);
5814 }
5815
5816 bool validated = false;
5817
5818 const Type* src_type = _gvn.type(src);
5819 const Type* dest_type = _gvn.type(dest);
5820 const TypeAryPtr* top_src = src_type->isa_aryptr();
5821 const TypeAryPtr* top_dest = dest_type->isa_aryptr();
5822
5823 // Do we have the type of src?
5824 bool has_src = (top_src != nullptr && top_src->elem() != Type::BOTTOM);
5825 // Do we have the type of dest?
5826 bool has_dest = (top_dest != nullptr && top_dest->elem() != Type::BOTTOM);
5827 // Is the type for src from speculation?
5828 bool src_spec = false;
5829 // Is the type for dest from speculation?
5830 bool dest_spec = false;
5831
5832 if ((!has_src || !has_dest) && can_emit_guards) {
5833 // We don't have sufficient type information, let's see if
5834 // speculative types can help. We need to have types for both src
5835 // and dest so that it pays off.
5836
5837 // Do we already have or could we have type information for src
5838 bool could_have_src = has_src;
5839 // Do we already have or could we have type information for dest
5840 bool could_have_dest = has_dest;
5841
5842 ciKlass* src_k = nullptr;
5843 if (!has_src) {
5844 src_k = src_type->speculative_type_not_null();
5845 if (src_k != nullptr && src_k->is_array_klass()) {
5846 could_have_src = true;
5847 }
5848 }
5849
5850 ciKlass* dest_k = nullptr;
5851 if (!has_dest) {
5852 dest_k = dest_type->speculative_type_not_null();
5853 if (dest_k != nullptr && dest_k->is_array_klass()) {
5854 could_have_dest = true;
5855 }
5856 }
5857
5858 if (could_have_src && could_have_dest) {
5859 // This is going to pay off so emit the required guards
5860 if (!has_src) {
5861 src = maybe_cast_profiled_obj(src, src_k, true);
5862 src_type = _gvn.type(src);
5863 top_src = src_type->isa_aryptr();
5864 has_src = (top_src != nullptr && top_src->elem() != Type::BOTTOM);
5865 src_spec = true;
5866 }
5867 if (!has_dest) {
5868 dest = maybe_cast_profiled_obj(dest, dest_k, true);
5869 dest_type = _gvn.type(dest);
5870 top_dest = dest_type->isa_aryptr();
5871 has_dest = (top_dest != nullptr && top_dest->elem() != Type::BOTTOM);
5872 dest_spec = true;
5873 }
5874 }
5875 }
5876
5877 if (has_src && has_dest && can_emit_guards) {
5878 BasicType src_elem = top_src->isa_aryptr()->elem()->array_element_basic_type();
5879 BasicType dest_elem = top_dest->isa_aryptr()->elem()->array_element_basic_type();
5880 if (is_reference_type(src_elem, true)) src_elem = T_OBJECT;
5881 if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT;
5882
5883 if (src_elem == dest_elem && src_elem == T_OBJECT) {
5884 // If both arrays are object arrays then having the exact types
5885 // for both will remove the need for a subtype check at runtime
5886 // before the call and may make it possible to pick a faster copy
5887 // routine (without a subtype check on every element)
5888 // Do we have the exact type of src?
5889 bool could_have_src = src_spec;
5890 // Do we have the exact type of dest?
5891 bool could_have_dest = dest_spec;
5892 ciKlass* src_k = nullptr;
5893 ciKlass* dest_k = nullptr;
5894 if (!src_spec) {
5895 src_k = src_type->speculative_type_not_null();
5896 if (src_k != nullptr && src_k->is_array_klass()) {
5897 could_have_src = true;
5898 }
5899 }
5900 if (!dest_spec) {
5901 dest_k = dest_type->speculative_type_not_null();
5902 if (dest_k != nullptr && dest_k->is_array_klass()) {
5903 could_have_dest = true;
5904 }
5905 }
5906 if (could_have_src && could_have_dest) {
5907 // If we can have both exact types, emit the missing guards
5908 if (could_have_src && !src_spec) {
5909 src = maybe_cast_profiled_obj(src, src_k, true);
5910 }
5911 if (could_have_dest && !dest_spec) {
5912 dest = maybe_cast_profiled_obj(dest, dest_k, true);
5913 }
5914 }
5915 }
5916 }
5917
5918 ciMethod* trap_method = method();
5919 int trap_bci = bci();
5920 if (saved_jvms_before_guards != nullptr) {
5921 trap_method = alloc->jvms()->method();
5922 trap_bci = alloc->jvms()->bci();
5923 }
5924
5925 bool negative_length_guard_generated = false;
5926
5927 if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
5928 can_emit_guards &&
5929 !src->is_top() && !dest->is_top()) {
5930 // validate arguments: enables transformation the ArrayCopyNode
5931 validated = true;
5932
5933 RegionNode* slow_region = new RegionNode(1);
5934 record_for_igvn(slow_region);
5935
5936 // (1) src and dest are arrays.
5937 generate_non_array_guard(load_object_klass(src), slow_region, &src);
5938 generate_non_array_guard(load_object_klass(dest), slow_region, &dest);
5939
5940 // (2) src and dest arrays must have elements of the same BasicType
5941 // done at macro expansion or at Ideal transformation time
5942
5943 // (4) src_offset must not be negative.
5944 generate_negative_guard(src_offset, slow_region);
5945
5946 // (5) dest_offset must not be negative.
5947 generate_negative_guard(dest_offset, slow_region);
5948
5949 // (7) src_offset + length must not exceed length of src.
5950 generate_limit_guard(src_offset, length,
5951 load_array_length(src),
5952 slow_region);
5953
5954 // (8) dest_offset + length must not exceed length of dest.
5955 generate_limit_guard(dest_offset, length,
5956 load_array_length(dest),
5957 slow_region);
5958
5959 // (6) length must not be negative.
5960 // This is also checked in generate_arraycopy() during macro expansion, but
5961 // we also have to check it here for the case where the ArrayCopyNode will
5962 // be eliminated by Escape Analysis.
5963 if (EliminateAllocations) {
5964 generate_negative_guard(length, slow_region);
5965 negative_length_guard_generated = true;
5966 }
5967
5968 // (9) each element of an oop array must be assignable
5969 Node* dest_klass = load_object_klass(dest);
5970 if (src != dest) {
5971 Node* not_subtype_ctrl = gen_subtype_check(src, dest_klass);
5972
5973 if (not_subtype_ctrl != top()) {
5974 PreserveJVMState pjvms(this);
5975 set_control(not_subtype_ctrl);
5976 uncommon_trap(Deoptimization::Reason_intrinsic,
5977 Deoptimization::Action_make_not_entrant);
5978 assert(stopped(), "Should be stopped");
5979 }
5980 }
5981 {
5982 PreserveJVMState pjvms(this);
5983 set_control(_gvn.transform(slow_region));
5984 uncommon_trap(Deoptimization::Reason_intrinsic,
5985 Deoptimization::Action_make_not_entrant);
5986 assert(stopped(), "Should be stopped");
5987 }
5988
5989 const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr();
5990 const Type *toop = dest_klass_t->cast_to_exactness(false)->as_instance_type();
5991 src = _gvn.transform(new CheckCastPPNode(control(), src, toop));
5992 arraycopy_move_allocation_here(alloc, dest, saved_jvms_before_guards, saved_reexecute_sp, new_idx);
5993 }
5994
5995 if (stopped()) {
5996 return true;
5997 }
5998
5999 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != nullptr, negative_length_guard_generated,
6000 // Create LoadRange and LoadKlass nodes for use during macro expansion here
6001 // so the compiler has a chance to eliminate them: during macro expansion,
6002 // we have to set their control (CastPP nodes are eliminated).
6003 load_object_klass(src), load_object_klass(dest),
6004 load_array_length(src), load_array_length(dest));
6005
6006 ac->set_arraycopy(validated);
6007
6008 Node* n = _gvn.transform(ac);
6009 if (n == ac) {
6010 ac->connect_outputs(this);
6011 } else {
6012 assert(validated, "shouldn't transform if all arguments not validated");
6013 set_all_memory(n);
6014 }
6015 clear_upper_avx();
6016
6017
6018 return true;
6019 }
6020
6021
6022 // Helper function which determines if an arraycopy immediately follows
6023 // an allocation, with no intervening tests or other escapes for the object.
6024 AllocateArrayNode*
6025 LibraryCallKit::tightly_coupled_allocation(Node* ptr) {
6026 if (stopped()) return nullptr; // no fast path
6027 if (!C->do_aliasing()) return nullptr; // no MergeMems around
6028
6029 AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(ptr);
6030 if (alloc == nullptr) return nullptr;
6031
6032 Node* rawmem = memory(Compile::AliasIdxRaw);
6033 // Is the allocation's memory state untouched?
6034 if (!(rawmem->is_Proj() && rawmem->in(0)->is_Initialize())) {
6035 // Bail out if there have been raw-memory effects since the allocation.
6036 // (Example: There might have been a call or safepoint.)
6037 return nullptr;
6038 }
6039 rawmem = rawmem->in(0)->as_Initialize()->memory(Compile::AliasIdxRaw);
6040 if (!(rawmem->is_Proj() && rawmem->in(0) == alloc)) {
6041 return nullptr;
6042 }
6043
6044 // There must be no unexpected observers of this allocation.
6045 for (DUIterator_Fast imax, i = ptr->fast_outs(imax); i < imax; i++) {
6046 Node* obs = ptr->fast_out(i);
6047 if (obs != this->map()) {
6048 return nullptr;
6049 }
6050 }
6051
6052 // This arraycopy must unconditionally follow the allocation of the ptr.
6053 Node* alloc_ctl = ptr->in(0);
6054 Node* ctl = control();
6055 while (ctl != alloc_ctl) {
6056 // There may be guards which feed into the slow_region.
6057 // Any other control flow means that we might not get a chance
6058 // to finish initializing the allocated object.
6059 // Various low-level checks bottom out in uncommon traps. These
6060 // are considered safe since we've already checked above that
6061 // there is no unexpected observer of this allocation.
6062 if (get_uncommon_trap_from_success_proj(ctl) != nullptr) {
6063 assert(ctl->in(0)->is_If(), "must be If");
6064 ctl = ctl->in(0)->in(0);
6065 } else {
6066 return nullptr;
6067 }
6068 }
6069
6070 // If we get this far, we have an allocation which immediately
6071 // precedes the arraycopy, and we can take over zeroing the new object.
6072 // The arraycopy will finish the initialization, and provide
6073 // a new control state to which we will anchor the destination pointer.
6074
6075 return alloc;
6076 }
6077
6078 CallStaticJavaNode* LibraryCallKit::get_uncommon_trap_from_success_proj(Node* node) {
6079 if (node->is_IfProj()) {
6080 IfProjNode* other_proj = node->as_IfProj()->other_if_proj();
6081 for (DUIterator_Fast jmax, j = other_proj->fast_outs(jmax); j < jmax; j++) {
6082 Node* obs = other_proj->fast_out(j);
6083 if (obs->in(0) == other_proj && obs->is_CallStaticJava() &&
6084 (obs->as_CallStaticJava()->entry_point() == OptoRuntime::uncommon_trap_blob()->entry_point())) {
6085 return obs->as_CallStaticJava();
6086 }
6087 }
6088 }
6089 return nullptr;
6090 }
6091
6092 //-------------inline_encodeISOArray-----------------------------------
6093 // int sun.nio.cs.ISO_8859_1.Encoder#encodeISOArray0(byte[] sa, int sp, byte[] da, int dp, int len)
6094 // int java.lang.StringCoding#encodeISOArray0(byte[] sa, int sp, byte[] da, int dp, int len)
6095 // int java.lang.StringCoding#encodeAsciiArray0(char[] sa, int sp, byte[] da, int dp, int len)
6096 // encode char[] to byte[] in ISO_8859_1 or ASCII
6097 bool LibraryCallKit::inline_encodeISOArray(bool ascii) {
6098 assert(callee()->signature()->size() == 5, "encodeISOArray has 5 parameters");
6099 // no receiver since it is static method
6100 Node *src = argument(0);
6101 Node *src_offset = argument(1);
6102 Node *dst = argument(2);
6103 Node *dst_offset = argument(3);
6104 Node *length = argument(4);
6105
6106 // Cast source & target arrays to not-null
6107 src = must_be_not_null(src, true);
6108 dst = must_be_not_null(dst, true);
6109 if (stopped()) {
6110 return true;
6111 }
6112
6113 const TypeAryPtr* src_type = src->Value(&_gvn)->isa_aryptr();
6114 const TypeAryPtr* dst_type = dst->Value(&_gvn)->isa_aryptr();
6115 if (src_type == nullptr || src_type->elem() == Type::BOTTOM ||
6116 dst_type == nullptr || dst_type->elem() == Type::BOTTOM) {
6117 // failed array check
6118 return false;
6119 }
6120
6121 // Figure out the size and type of the elements we will be copying.
6122 BasicType src_elem = src_type->elem()->array_element_basic_type();
6123 BasicType dst_elem = dst_type->elem()->array_element_basic_type();
6124 if (!((src_elem == T_CHAR) || (src_elem== T_BYTE)) || dst_elem != T_BYTE) {
6125 return false;
6126 }
6127
6128 // Check source & target bounds
6129 RegionNode* bailout = create_bailout();
6130 generate_string_range_check(src, src_offset, length, src_elem == T_BYTE, bailout);
6131 generate_string_range_check(dst, dst_offset, length, false, bailout);
6132 if (check_bailout(bailout)) {
6133 return true;
6134 }
6135
6136 Node* src_start = array_element_address(src, src_offset, T_CHAR);
6137 Node* dst_start = array_element_address(dst, dst_offset, dst_elem);
6138 // 'src_start' points to src array + scaled offset
6139 // 'dst_start' points to dst array + scaled offset
6140
6141 const TypeAryPtr* mtype = TypeAryPtr::BYTES;
6142 Node* enc = new EncodeISOArrayNode(control(), memory(mtype), src_start, dst_start, length, ascii);
6143 enc = _gvn.transform(enc);
6144 Node* res_mem = _gvn.transform(new SCMemProjNode(enc));
6145 set_memory(res_mem, mtype);
6146 set_result(enc);
6147 clear_upper_avx();
6148
6149 return true;
6150 }
6151
6152 //-------------inline_multiplyToLen-----------------------------------
6153 bool LibraryCallKit::inline_multiplyToLen() {
6154 assert(UseMultiplyToLenIntrinsic, "not implemented on this platform");
6155
6156 address stubAddr = StubRoutines::multiplyToLen();
6157 if (stubAddr == nullptr) {
6158 return false; // Intrinsic's stub is not implemented on this platform
6159 }
6160 const char* stubName = "multiplyToLen";
6161
6162 assert(callee()->signature()->size() == 5, "multiplyToLen has 5 parameters");
6163
6164 // no receiver because it is a static method
6165 Node* x = argument(0);
6166 Node* xlen = argument(1);
6167 Node* y = argument(2);
6168 Node* ylen = argument(3);
6169 Node* z = argument(4);
6170
6171 x = must_be_not_null(x, true);
6172 y = must_be_not_null(y, true);
6173
6174 const TypeAryPtr* x_type = x->Value(&_gvn)->isa_aryptr();
6175 const TypeAryPtr* y_type = y->Value(&_gvn)->isa_aryptr();
6176 if (x_type == nullptr || x_type->elem() == Type::BOTTOM ||
6177 y_type == nullptr || y_type->elem() == Type::BOTTOM) {
6178 // failed array check
6179 return false;
6180 }
6181
6182 BasicType x_elem = x_type->elem()->array_element_basic_type();
6183 BasicType y_elem = y_type->elem()->array_element_basic_type();
6184 if (x_elem != T_INT || y_elem != T_INT) {
6185 return false;
6186 }
6187
6188 Node* x_start = array_element_address(x, intcon(0), x_elem);
6189 Node* y_start = array_element_address(y, intcon(0), y_elem);
6190 // 'x_start' points to x array + scaled xlen
6191 // 'y_start' points to y array + scaled ylen
6192
6193 Node* z_start = array_element_address(z, intcon(0), T_INT);
6194
6195 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
6196 OptoRuntime::multiplyToLen_Type(),
6197 stubAddr, stubName, TypePtr::BOTTOM,
6198 x_start, xlen, y_start, ylen, z_start);
6199
6200 C->set_has_split_ifs(true); // Has chance for split-if optimization
6201 set_result(z);
6202 return true;
6203 }
6204
6205 //-------------inline_squareToLen------------------------------------
6206 bool LibraryCallKit::inline_squareToLen() {
6207 assert(UseSquareToLenIntrinsic, "not implemented on this platform");
6208
6209 address stubAddr = StubRoutines::squareToLen();
6210 if (stubAddr == nullptr) {
6211 return false; // Intrinsic's stub is not implemented on this platform
6212 }
6213 const char* stubName = "squareToLen";
6214
6215 assert(callee()->signature()->size() == 4, "implSquareToLen has 4 parameters");
6216
6217 Node* x = argument(0);
6218 Node* len = argument(1);
6219 Node* z = argument(2);
6220 Node* zlen = argument(3);
6221
6222 x = must_be_not_null(x, true);
6223 z = must_be_not_null(z, true);
6224
6225 const TypeAryPtr* x_type = x->Value(&_gvn)->isa_aryptr();
6226 const TypeAryPtr* z_type = z->Value(&_gvn)->isa_aryptr();
6227 if (x_type == nullptr || x_type->elem() == Type::BOTTOM ||
6228 z_type == nullptr || z_type->elem() == Type::BOTTOM) {
6229 // failed array check
6230 return false;
6231 }
6232
6233 BasicType x_elem = x_type->elem()->array_element_basic_type();
6234 BasicType z_elem = z_type->elem()->array_element_basic_type();
6235 if (x_elem != T_INT || z_elem != T_INT) {
6236 return false;
6237 }
6238
6239
6240 Node* x_start = array_element_address(x, intcon(0), x_elem);
6241 Node* z_start = array_element_address(z, intcon(0), z_elem);
6242
6243 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
6244 OptoRuntime::squareToLen_Type(),
6245 stubAddr, stubName, TypePtr::BOTTOM,
6246 x_start, len, z_start, zlen);
6247
6248 set_result(z);
6249 return true;
6250 }
6251
6252 //-------------inline_mulAdd------------------------------------------
6253 bool LibraryCallKit::inline_mulAdd() {
6254 assert(UseMulAddIntrinsic, "not implemented on this platform");
6255
6256 address stubAddr = StubRoutines::mulAdd();
6257 if (stubAddr == nullptr) {
6258 return false; // Intrinsic's stub is not implemented on this platform
6259 }
6260 const char* stubName = "mulAdd";
6261
6262 assert(callee()->signature()->size() == 5, "mulAdd has 5 parameters");
6263
6264 Node* out = argument(0);
6265 Node* in = argument(1);
6266 Node* offset = argument(2);
6267 Node* len = argument(3);
6268 Node* k = argument(4);
6269
6270 in = must_be_not_null(in, true);
6271 out = must_be_not_null(out, true);
6272
6273 const TypeAryPtr* out_type = out->Value(&_gvn)->isa_aryptr();
6274 const TypeAryPtr* in_type = in->Value(&_gvn)->isa_aryptr();
6275 if (out_type == nullptr || out_type->elem() == Type::BOTTOM ||
6276 in_type == nullptr || in_type->elem() == Type::BOTTOM) {
6277 // failed array check
6278 return false;
6279 }
6280
6281 BasicType out_elem = out_type->elem()->array_element_basic_type();
6282 BasicType in_elem = in_type->elem()->array_element_basic_type();
6283 if (out_elem != T_INT || in_elem != T_INT) {
6284 return false;
6285 }
6286
6287 Node* outlen = load_array_length(out);
6288 Node* new_offset = _gvn.transform(new SubINode(outlen, offset));
6289 Node* out_start = array_element_address(out, intcon(0), out_elem);
6290 Node* in_start = array_element_address(in, intcon(0), in_elem);
6291
6292 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
6293 OptoRuntime::mulAdd_Type(),
6294 stubAddr, stubName, TypePtr::BOTTOM,
6295 out_start,in_start, new_offset, len, k);
6296 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
6297 set_result(result);
6298 return true;
6299 }
6300
6301 //-------------inline_montgomeryMultiply-----------------------------------
6302 bool LibraryCallKit::inline_montgomeryMultiply() {
6303 address stubAddr = StubRoutines::montgomeryMultiply();
6304 if (stubAddr == nullptr) {
6305 return false; // Intrinsic's stub is not implemented on this platform
6306 }
6307
6308 assert(UseMontgomeryMultiplyIntrinsic, "not implemented on this platform");
6309 const char* stubName = "montgomery_multiply";
6310
6311 assert(callee()->signature()->size() == 7, "montgomeryMultiply has 7 parameters");
6312
6313 Node* a = argument(0);
6314 Node* b = argument(1);
6315 Node* n = argument(2);
6316 Node* len = argument(3);
6317 Node* inv = argument(4);
6318 Node* m = argument(6);
6319
6320 const TypeAryPtr* a_type = a->Value(&_gvn)->isa_aryptr();
6321 const TypeAryPtr* b_type = b->Value(&_gvn)->isa_aryptr();
6322 const TypeAryPtr* n_type = n->Value(&_gvn)->isa_aryptr();
6323 const TypeAryPtr* m_type = m->Value(&_gvn)->isa_aryptr();
6324 if (a_type == nullptr || a_type->elem() == Type::BOTTOM ||
6325 b_type == nullptr || b_type->elem() == Type::BOTTOM ||
6326 n_type == nullptr || n_type->elem() == Type::BOTTOM ||
6327 m_type == nullptr || m_type->elem() == Type::BOTTOM) {
6328 // failed array check
6329 return false;
6330 }
6331
6332 BasicType a_elem = a_type->elem()->array_element_basic_type();
6333 BasicType b_elem = b_type->elem()->array_element_basic_type();
6334 BasicType n_elem = n_type->elem()->array_element_basic_type();
6335 BasicType m_elem = m_type->elem()->array_element_basic_type();
6336 if (a_elem != T_INT || b_elem != T_INT || n_elem != T_INT || m_elem != T_INT) {
6337 return false;
6338 }
6339
6340 // Make the call
6341 {
6342 Node* a_start = array_element_address(a, intcon(0), a_elem);
6343 Node* b_start = array_element_address(b, intcon(0), b_elem);
6344 Node* n_start = array_element_address(n, intcon(0), n_elem);
6345 Node* m_start = array_element_address(m, intcon(0), m_elem);
6346
6347 Node* call = make_runtime_call(RC_LEAF,
6348 OptoRuntime::montgomeryMultiply_Type(),
6349 stubAddr, stubName, TypePtr::BOTTOM,
6350 a_start, b_start, n_start, len, inv, top(),
6351 m_start);
6352 set_result(m);
6353 }
6354
6355 return true;
6356 }
6357
6358 bool LibraryCallKit::inline_montgomerySquare() {
6359 address stubAddr = StubRoutines::montgomerySquare();
6360 if (stubAddr == nullptr) {
6361 return false; // Intrinsic's stub is not implemented on this platform
6362 }
6363
6364 assert(UseMontgomerySquareIntrinsic, "not implemented on this platform");
6365 const char* stubName = "montgomery_square";
6366
6367 assert(callee()->signature()->size() == 6, "montgomerySquare has 6 parameters");
6368
6369 Node* a = argument(0);
6370 Node* n = argument(1);
6371 Node* len = argument(2);
6372 Node* inv = argument(3);
6373 Node* m = argument(5);
6374
6375 const TypeAryPtr* a_type = a->Value(&_gvn)->isa_aryptr();
6376 const TypeAryPtr* n_type = n->Value(&_gvn)->isa_aryptr();
6377 const TypeAryPtr* m_type = m->Value(&_gvn)->isa_aryptr();
6378 if (a_type == nullptr || a_type->elem() == Type::BOTTOM ||
6379 n_type == nullptr || n_type->elem() == Type::BOTTOM ||
6380 m_type == nullptr || m_type->elem() == Type::BOTTOM) {
6381 // failed array check
6382 return false;
6383 }
6384
6385 BasicType a_elem = a_type->elem()->array_element_basic_type();
6386 BasicType n_elem = n_type->elem()->array_element_basic_type();
6387 BasicType m_elem = m_type->elem()->array_element_basic_type();
6388 if (a_elem != T_INT || n_elem != T_INT || m_elem != T_INT) {
6389 return false;
6390 }
6391
6392 // Make the call
6393 {
6394 Node* a_start = array_element_address(a, intcon(0), a_elem);
6395 Node* n_start = array_element_address(n, intcon(0), n_elem);
6396 Node* m_start = array_element_address(m, intcon(0), m_elem);
6397
6398 Node* call = make_runtime_call(RC_LEAF,
6399 OptoRuntime::montgomerySquare_Type(),
6400 stubAddr, stubName, TypePtr::BOTTOM,
6401 a_start, n_start, len, inv, top(),
6402 m_start);
6403 set_result(m);
6404 }
6405
6406 return true;
6407 }
6408
6409 bool LibraryCallKit::inline_bigIntegerShift(bool isRightShift) {
6410 address stubAddr = nullptr;
6411 const char* stubName = nullptr;
6412
6413 stubAddr = isRightShift? StubRoutines::bigIntegerRightShift(): StubRoutines::bigIntegerLeftShift();
6414 if (stubAddr == nullptr) {
6415 return false; // Intrinsic's stub is not implemented on this platform
6416 }
6417
6418 stubName = isRightShift? "bigIntegerRightShiftWorker" : "bigIntegerLeftShiftWorker";
6419
6420 assert(callee()->signature()->size() == 5, "expected 5 arguments");
6421
6422 Node* newArr = argument(0);
6423 Node* oldArr = argument(1);
6424 Node* newIdx = argument(2);
6425 Node* shiftCount = argument(3);
6426 Node* numIter = argument(4);
6427
6428 const TypeAryPtr* newArr_type = newArr->Value(&_gvn)->isa_aryptr();
6429 const TypeAryPtr* oldArr_type = oldArr->Value(&_gvn)->isa_aryptr();
6430 if (newArr_type == nullptr || newArr_type->elem() == Type::BOTTOM ||
6431 oldArr_type == nullptr || oldArr_type->elem() == Type::BOTTOM) {
6432 return false;
6433 }
6434
6435 BasicType newArr_elem = newArr_type->elem()->array_element_basic_type();
6436 BasicType oldArr_elem = oldArr_type->elem()->array_element_basic_type();
6437 if (newArr_elem != T_INT || oldArr_elem != T_INT) {
6438 return false;
6439 }
6440
6441 // Make the call
6442 {
6443 Node* newArr_start = array_element_address(newArr, intcon(0), newArr_elem);
6444 Node* oldArr_start = array_element_address(oldArr, intcon(0), oldArr_elem);
6445
6446 Node* call = make_runtime_call(RC_LEAF,
6447 OptoRuntime::bigIntegerShift_Type(),
6448 stubAddr,
6449 stubName,
6450 TypePtr::BOTTOM,
6451 newArr_start,
6452 oldArr_start,
6453 newIdx,
6454 shiftCount,
6455 numIter);
6456 }
6457
6458 return true;
6459 }
6460
6461 //-------------inline_vectorizedMismatch------------------------------
6462 bool LibraryCallKit::inline_vectorizedMismatch() {
6463 assert(UseVectorizedMismatchIntrinsic, "not implemented on this platform");
6464
6465 assert(callee()->signature()->size() == 8, "vectorizedMismatch has 6 parameters");
6466 Node* obja = argument(0); // Object
6467 Node* aoffset = argument(1); // long
6468 Node* objb = argument(3); // Object
6469 Node* boffset = argument(4); // long
6470 Node* length = argument(6); // int
6471 Node* scale = argument(7); // int
6472
6473 const TypeAryPtr* obja_t = _gvn.type(obja)->isa_aryptr();
6474 const TypeAryPtr* objb_t = _gvn.type(objb)->isa_aryptr();
6475 if (obja_t == nullptr || obja_t->elem() == Type::BOTTOM ||
6476 objb_t == nullptr || objb_t->elem() == Type::BOTTOM ||
6477 scale == top()) {
6478 return false; // failed input validation
6479 }
6480
6481 Node* obja_adr = make_unsafe_address(obja, aoffset);
6482 Node* objb_adr = make_unsafe_address(objb, boffset);
6483
6484 // Partial inlining handling for inputs smaller than ArrayOperationPartialInlineSize bytes in size.
6485 //
6486 // inline_limit = ArrayOperationPartialInlineSize / element_size;
6487 // if (length <= inline_limit) {
6488 // inline_path:
6489 // vmask = VectorMaskGen length
6490 // vload1 = LoadVectorMasked obja, vmask
6491 // vload2 = LoadVectorMasked objb, vmask
6492 // result1 = VectorCmpMasked vload1, vload2, vmask
6493 // } else {
6494 // call_stub_path:
6495 // result2 = call vectorizedMismatch_stub(obja, objb, length, scale)
6496 // }
6497 // exit_block:
6498 // return Phi(result1, result2);
6499 //
6500 enum { inline_path = 1, // input is small enough to process it all at once
6501 stub_path = 2, // input is too large; call into the VM
6502 PATH_LIMIT = 3
6503 };
6504
6505 Node* exit_block = new RegionNode(PATH_LIMIT);
6506 Node* result_phi = new PhiNode(exit_block, TypeInt::INT);
6507 Node* memory_phi = new PhiNode(exit_block, Type::MEMORY, TypePtr::BOTTOM);
6508
6509 Node* call_stub_path = control();
6510
6511 BasicType elem_bt = T_ILLEGAL;
6512
6513 const TypeInt* scale_t = _gvn.type(scale)->is_int();
6514 if (scale_t->is_con()) {
6515 switch (scale_t->get_con()) {
6516 case 0: elem_bt = T_BYTE; break;
6517 case 1: elem_bt = T_SHORT; break;
6518 case 2: elem_bt = T_INT; break;
6519 case 3: elem_bt = T_LONG; break;
6520
6521 default: elem_bt = T_ILLEGAL; break; // not supported
6522 }
6523 }
6524
6525 int inline_limit = 0;
6526 bool do_partial_inline = false;
6527
6528 if (elem_bt != T_ILLEGAL && ArrayOperationPartialInlineSize > 0) {
6529 inline_limit = ArrayOperationPartialInlineSize / type2aelembytes(elem_bt);
6530 do_partial_inline = inline_limit >= 16;
6531 }
6532
6533 if (do_partial_inline) {
6534 assert(elem_bt != T_ILLEGAL, "sanity");
6535
6536 if (Matcher::match_rule_supported_vector(Op_VectorMaskGen, inline_limit, elem_bt) &&
6537 Matcher::match_rule_supported_vector(Op_LoadVectorMasked, inline_limit, elem_bt) &&
6538 Matcher::match_rule_supported_vector(Op_VectorCmpMasked, inline_limit, elem_bt)) {
6539
6540 const TypeVect* vt = TypeVect::make(elem_bt, inline_limit);
6541 Node* cmp_length = _gvn.transform(new CmpINode(length, intcon(inline_limit)));
6542 Node* bol_gt = _gvn.transform(new BoolNode(cmp_length, BoolTest::gt));
6543
6544 call_stub_path = generate_guard(bol_gt, nullptr, PROB_MIN);
6545
6546 if (!stopped()) {
6547 Node* casted_length = _gvn.transform(new CastIINode(control(), length, TypeInt::make(0, inline_limit, Type::WidenMin)));
6548
6549 const TypePtr* obja_adr_t = _gvn.type(obja_adr)->isa_ptr();
6550 const TypePtr* objb_adr_t = _gvn.type(objb_adr)->isa_ptr();
6551 Node* obja_adr_mem = memory(C->get_alias_index(obja_adr_t));
6552 Node* objb_adr_mem = memory(C->get_alias_index(objb_adr_t));
6553
6554 Node* vmask = _gvn.transform(VectorMaskGenNode::make(ConvI2X(casted_length), elem_bt));
6555 Node* vload_obja = _gvn.transform(new LoadVectorMaskedNode(control(), obja_adr_mem, obja_adr, obja_adr_t, vt, vmask));
6556 Node* vload_objb = _gvn.transform(new LoadVectorMaskedNode(control(), objb_adr_mem, objb_adr, objb_adr_t, vt, vmask));
6557 Node* result = _gvn.transform(new VectorCmpMaskedNode(vload_obja, vload_objb, vmask, TypeInt::INT));
6558
6559 exit_block->init_req(inline_path, control());
6560 memory_phi->init_req(inline_path, map()->memory());
6561 result_phi->init_req(inline_path, result);
6562
6563 C->set_max_vector_size(MAX2((uint)ArrayOperationPartialInlineSize, C->max_vector_size()));
6564 clear_upper_avx();
6565 }
6566 }
6567 }
6568
6569 if (call_stub_path != nullptr) {
6570 set_control(call_stub_path);
6571
6572 Node* call = make_runtime_call(RC_LEAF,
6573 OptoRuntime::vectorizedMismatch_Type(),
6574 StubRoutines::vectorizedMismatch(), "vectorizedMismatch", TypePtr::BOTTOM,
6575 obja_adr, objb_adr, length, scale);
6576
6577 exit_block->init_req(stub_path, control());
6578 memory_phi->init_req(stub_path, map()->memory());
6579 result_phi->init_req(stub_path, _gvn.transform(new ProjNode(call, TypeFunc::Parms)));
6580 }
6581
6582 exit_block = _gvn.transform(exit_block);
6583 memory_phi = _gvn.transform(memory_phi);
6584 result_phi = _gvn.transform(result_phi);
6585
6586 record_for_igvn(exit_block);
6587 record_for_igvn(memory_phi);
6588 record_for_igvn(result_phi);
6589
6590 set_control(exit_block);
6591 set_all_memory(memory_phi);
6592 set_result(result_phi);
6593
6594 return true;
6595 }
6596
6597 //------------------------------inline_vectorizedHashcode----------------------------
6598 bool LibraryCallKit::inline_vectorizedHashCode() {
6599 assert(UseVectorizedHashCodeIntrinsic, "not implemented on this platform");
6600
6601 assert(callee()->signature()->size() == 5, "vectorizedHashCode has 5 parameters");
6602 Node* array = argument(0);
6603 Node* offset = argument(1);
6604 Node* length = argument(2);
6605 Node* initialValue = argument(3);
6606 Node* basic_type = argument(4);
6607
6608 if (basic_type == top()) {
6609 return false; // failed input validation
6610 }
6611
6612 const TypeInt* basic_type_t = _gvn.type(basic_type)->is_int();
6613 if (!basic_type_t->is_con()) {
6614 return false; // Only intrinsify if mode argument is constant
6615 }
6616
6617 array = must_be_not_null(array, true);
6618
6619 BasicType bt = (BasicType)basic_type_t->get_con();
6620
6621 // Resolve address of first element
6622 Node* array_start = array_element_address(array, offset, bt);
6623
6624 set_result(_gvn.transform(new VectorizedHashCodeNode(control(), memory(TypeAryPtr::get_array_body_type(bt)),
6625 array_start, length, initialValue, basic_type)));
6626 clear_upper_avx();
6627
6628 return true;
6629 }
6630
6631 /**
6632 * Calculate CRC32 for byte.
6633 * int java.util.zip.CRC32.update(int crc, int b)
6634 */
6635 bool LibraryCallKit::inline_updateCRC32() {
6636 assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions support");
6637 assert(callee()->signature()->size() == 2, "update has 2 parameters");
6638 // no receiver since it is static method
6639 Node* crc = argument(0); // type: int
6640 Node* b = argument(1); // type: int
6641
6642 /*
6643 * int c = ~ crc;
6644 * b = timesXtoThe32[(b ^ c) & 0xFF];
6645 * b = b ^ (c >>> 8);
6646 * crc = ~b;
6647 */
6648
6649 Node* M1 = intcon(-1);
6650 crc = _gvn.transform(new XorINode(crc, M1));
6651 Node* result = _gvn.transform(new XorINode(crc, b));
6652 result = _gvn.transform(new AndINode(result, intcon(0xFF)));
6653
6654 Node* base = makecon(TypeRawPtr::make(StubRoutines::crc_table_addr()));
6655 Node* offset = _gvn.transform(new LShiftINode(result, intcon(0x2)));
6656 Node* adr = off_heap_plus_addr(base, ConvI2X(offset));
6657 result = make_load(control(), adr, TypeInt::INT, T_INT, MemNode::unordered);
6658
6659 crc = _gvn.transform(new URShiftINode(crc, intcon(8)));
6660 result = _gvn.transform(new XorINode(crc, result));
6661 result = _gvn.transform(new XorINode(result, M1));
6662 set_result(result);
6663 return true;
6664 }
6665
6666 /**
6667 * Calculate CRC32 for byte[] array.
6668 * int java.util.zip.CRC32.updateBytes(int crc, byte[] buf, int off, int len)
6669 */
6670 bool LibraryCallKit::inline_updateBytesCRC32() {
6671 assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions support");
6672 assert(callee()->signature()->size() == 4, "updateBytes has 4 parameters");
6673 // no receiver since it is static method
6674 Node* crc = argument(0); // type: int
6675 Node* src = argument(1); // type: oop
6676 Node* offset = argument(2); // type: int
6677 Node* length = argument(3); // type: int
6678
6679 const TypeAryPtr* src_type = src->Value(&_gvn)->isa_aryptr();
6680 if (src_type == nullptr || src_type->elem() == Type::BOTTOM) {
6681 // failed array check
6682 return false;
6683 }
6684
6685 // Figure out the size and type of the elements we will be copying.
6686 BasicType src_elem = src_type->elem()->array_element_basic_type();
6687 if (src_elem != T_BYTE) {
6688 return false;
6689 }
6690
6691 // 'src_start' points to src array + scaled offset
6692 src = must_be_not_null(src, true);
6693 Node* src_start = array_element_address(src, offset, src_elem);
6694
6695 // We assume that range check is done by caller.
6696 // TODO: generate range check (offset+length < src.length) in debug VM.
6697
6698 // Call the stub.
6699 address stubAddr = StubRoutines::updateBytesCRC32();
6700 const char *stubName = "updateBytesCRC32";
6701
6702 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::updateBytesCRC32_Type(),
6703 stubAddr, stubName, TypePtr::BOTTOM,
6704 crc, src_start, length);
6705 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
6706 set_result(result);
6707 return true;
6708 }
6709
6710 /**
6711 * Calculate CRC32 for ByteBuffer.
6712 * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
6713 */
6714 bool LibraryCallKit::inline_updateByteBufferCRC32() {
6715 assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions support");
6716 assert(callee()->signature()->size() == 5, "updateByteBuffer has 4 parameters and one is long");
6717 // no receiver since it is static method
6718 Node* crc = argument(0); // type: int
6719 Node* src = argument(1); // type: long
6720 Node* offset = argument(3); // type: int
6721 Node* length = argument(4); // type: int
6722
6723 src = ConvL2X(src); // adjust Java long to machine word
6724 Node* base = _gvn.transform(new CastX2PNode(src));
6725 offset = ConvI2X(offset);
6726
6727 // 'src_start' points to src array + scaled offset
6728 Node* src_start = off_heap_plus_addr(base, offset);
6729
6730 // Call the stub.
6731 address stubAddr = StubRoutines::updateBytesCRC32();
6732 const char *stubName = "updateBytesCRC32";
6733
6734 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::updateBytesCRC32_Type(),
6735 stubAddr, stubName, TypePtr::BOTTOM,
6736 crc, src_start, length);
6737 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
6738 set_result(result);
6739 return true;
6740 }
6741
6742 //------------------------------get_table_from_crc32c_class-----------------------
6743 Node * LibraryCallKit::get_table_from_crc32c_class(ciInstanceKlass *crc32c_class) {
6744 Node* table = load_field_from_object(nullptr, "byteTable", "[I", /*decorators*/ IN_HEAP, /*is_static*/ true, crc32c_class);
6745 assert (table != nullptr, "wrong version of java.util.zip.CRC32C");
6746
6747 return table;
6748 }
6749
6750 //------------------------------inline_updateBytesCRC32C-----------------------
6751 //
6752 // Calculate CRC32C for byte[] array.
6753 // int java.util.zip.CRC32C.updateBytes(int crc, byte[] buf, int off, int end)
6754 //
6755 bool LibraryCallKit::inline_updateBytesCRC32C() {
6756 assert(UseCRC32CIntrinsics, "need CRC32C instruction support");
6757 assert(callee()->signature()->size() == 4, "updateBytes has 4 parameters");
6758 assert(callee()->holder()->is_loaded(), "CRC32C class must be loaded");
6759 // no receiver since it is a static method
6760 Node* crc = argument(0); // type: int
6761 Node* src = argument(1); // type: oop
6762 Node* offset = argument(2); // type: int
6763 Node* end = argument(3); // type: int
6764
6765 Node* length = _gvn.transform(new SubINode(end, offset));
6766
6767 const TypeAryPtr* src_type = src->Value(&_gvn)->isa_aryptr();
6768 if (src_type == nullptr || src_type->elem() == Type::BOTTOM) {
6769 // failed array check
6770 return false;
6771 }
6772
6773 // Figure out the size and type of the elements we will be copying.
6774 BasicType src_elem = src_type->elem()->array_element_basic_type();
6775 if (src_elem != T_BYTE) {
6776 return false;
6777 }
6778
6779 // 'src_start' points to src array + scaled offset
6780 src = must_be_not_null(src, true);
6781 Node* src_start = array_element_address(src, offset, src_elem);
6782
6783 // static final int[] byteTable in class CRC32C
6784 Node* table = get_table_from_crc32c_class(callee()->holder());
6785 table = must_be_not_null(table, true);
6786 Node* table_start = array_element_address(table, intcon(0), T_INT);
6787
6788 // We assume that range check is done by caller.
6789 // TODO: generate range check (offset+length < src.length) in debug VM.
6790
6791 // Call the stub.
6792 address stubAddr = StubRoutines::updateBytesCRC32C();
6793 const char *stubName = "updateBytesCRC32C";
6794
6795 Node* call = make_runtime_call(RC_LEAF, OptoRuntime::updateBytesCRC32C_Type(),
6796 stubAddr, stubName, TypePtr::BOTTOM,
6797 crc, src_start, length, table_start);
6798 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
6799 set_result(result);
6800 return true;
6801 }
6802
6803 //------------------------------inline_updateDirectByteBufferCRC32C-----------------------
6804 //
6805 // Calculate CRC32C for DirectByteBuffer.
6806 // int java.util.zip.CRC32C.updateDirectByteBuffer(int crc, long buf, int off, int end)
6807 //
6808 bool LibraryCallKit::inline_updateDirectByteBufferCRC32C() {
6809 assert(UseCRC32CIntrinsics, "need CRC32C instruction support");
6810 assert(callee()->signature()->size() == 5, "updateDirectByteBuffer has 4 parameters and one is long");
6811 assert(callee()->holder()->is_loaded(), "CRC32C class must be loaded");
6812 // no receiver since it is a static method
6813 Node* crc = argument(0); // type: int
6814 Node* src = argument(1); // type: long
6815 Node* offset = argument(3); // type: int
6816 Node* end = argument(4); // type: int
6817
6818 Node* length = _gvn.transform(new SubINode(end, offset));
6819
6820 src = ConvL2X(src); // adjust Java long to machine word
6821 Node* base = _gvn.transform(new CastX2PNode(src));
6822 offset = ConvI2X(offset);
6823
6824 // 'src_start' points to src array + scaled offset
6825 Node* src_start = off_heap_plus_addr(base, offset);
6826
6827 // static final int[] byteTable in class CRC32C
6828 Node* table = get_table_from_crc32c_class(callee()->holder());
6829 table = must_be_not_null(table, true);
6830 Node* table_start = array_element_address(table, intcon(0), T_INT);
6831
6832 // Call the stub.
6833 address stubAddr = StubRoutines::updateBytesCRC32C();
6834 const char *stubName = "updateBytesCRC32C";
6835
6836 Node* call = make_runtime_call(RC_LEAF, OptoRuntime::updateBytesCRC32C_Type(),
6837 stubAddr, stubName, TypePtr::BOTTOM,
6838 crc, src_start, length, table_start);
6839 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
6840 set_result(result);
6841 return true;
6842 }
6843
6844 //------------------------------inline_updateBytesAdler32----------------------
6845 //
6846 // Calculate Adler32 checksum for byte[] array.
6847 // int java.util.zip.Adler32.updateBytes(int crc, byte[] buf, int off, int len)
6848 //
6849 bool LibraryCallKit::inline_updateBytesAdler32() {
6850 assert(UseAdler32Intrinsics, "Adler32 Intrinsic support need"); // check if we actually need to check this flag or check a different one
6851 assert(callee()->signature()->size() == 4, "updateBytes has 4 parameters");
6852 assert(callee()->holder()->is_loaded(), "Adler32 class must be loaded");
6853 // no receiver since it is static method
6854 Node* crc = argument(0); // type: int
6855 Node* src = argument(1); // type: oop
6856 Node* offset = argument(2); // type: int
6857 Node* length = argument(3); // type: int
6858
6859 const TypeAryPtr* src_type = src->Value(&_gvn)->isa_aryptr();
6860 if (src_type == nullptr || src_type->elem() == Type::BOTTOM) {
6861 // failed array check
6862 return false;
6863 }
6864
6865 // Figure out the size and type of the elements we will be copying.
6866 BasicType src_elem = src_type->elem()->array_element_basic_type();
6867 if (src_elem != T_BYTE) {
6868 return false;
6869 }
6870
6871 // 'src_start' points to src array + scaled offset
6872 Node* src_start = array_element_address(src, offset, src_elem);
6873
6874 // We assume that range check is done by caller.
6875 // TODO: generate range check (offset+length < src.length) in debug VM.
6876
6877 // Call the stub.
6878 address stubAddr = StubRoutines::updateBytesAdler32();
6879 const char *stubName = "updateBytesAdler32";
6880
6881 Node* call = make_runtime_call(RC_LEAF, OptoRuntime::updateBytesAdler32_Type(),
6882 stubAddr, stubName, TypePtr::BOTTOM,
6883 crc, src_start, length);
6884 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
6885 set_result(result);
6886 return true;
6887 }
6888
6889 //------------------------------inline_updateByteBufferAdler32---------------
6890 //
6891 // Calculate Adler32 checksum for DirectByteBuffer.
6892 // int java.util.zip.Adler32.updateByteBuffer(int crc, long buf, int off, int len)
6893 //
6894 bool LibraryCallKit::inline_updateByteBufferAdler32() {
6895 assert(UseAdler32Intrinsics, "Adler32 Intrinsic support need"); // check if we actually need to check this flag or check a different one
6896 assert(callee()->signature()->size() == 5, "updateByteBuffer has 4 parameters and one is long");
6897 assert(callee()->holder()->is_loaded(), "Adler32 class must be loaded");
6898 // no receiver since it is static method
6899 Node* crc = argument(0); // type: int
6900 Node* src = argument(1); // type: long
6901 Node* offset = argument(3); // type: int
6902 Node* length = argument(4); // type: int
6903
6904 src = ConvL2X(src); // adjust Java long to machine word
6905 Node* base = _gvn.transform(new CastX2PNode(src));
6906 offset = ConvI2X(offset);
6907
6908 // 'src_start' points to src array + scaled offset
6909 Node* src_start = off_heap_plus_addr(base, offset);
6910
6911 // Call the stub.
6912 address stubAddr = StubRoutines::updateBytesAdler32();
6913 const char *stubName = "updateBytesAdler32";
6914
6915 Node* call = make_runtime_call(RC_LEAF, OptoRuntime::updateBytesAdler32_Type(),
6916 stubAddr, stubName, TypePtr::BOTTOM,
6917 crc, src_start, length);
6918
6919 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
6920 set_result(result);
6921 return true;
6922 }
6923
6924 //----------------------------inline_reference_get0----------------------------
6925 // public T java.lang.ref.Reference.get();
6926 bool LibraryCallKit::inline_reference_get0() {
6927 const int referent_offset = java_lang_ref_Reference::referent_offset();
6928
6929 // Get the argument:
6930 Node* reference_obj = null_check_receiver();
6931 if (stopped()) return true;
6932
6933 DecoratorSet decorators = IN_HEAP | ON_WEAK_OOP_REF;
6934 Node* result = load_field_from_object(reference_obj, "referent", "Ljava/lang/Object;",
6935 decorators, /*is_static*/ false, nullptr);
6936 if (result == nullptr) return false;
6937
6938 // Add memory barrier to prevent commoning reads from this field
6939 // across safepoint since GC can change its value.
6940 insert_mem_bar(Op_MemBarCPUOrder);
6941
6942 set_result(result);
6943 return true;
6944 }
6945
6946 //----------------------------inline_reference_refersTo0----------------------------
6947 // bool java.lang.ref.Reference.refersTo0();
6948 // bool java.lang.ref.PhantomReference.refersTo0();
6949 bool LibraryCallKit::inline_reference_refersTo0(bool is_phantom) {
6950 // Get arguments:
6951 Node* reference_obj = null_check_receiver();
6952 Node* other_obj = argument(1);
6953 if (stopped()) return true;
6954
6955 DecoratorSet decorators = IN_HEAP | AS_NO_KEEPALIVE;
6956 decorators |= (is_phantom ? ON_PHANTOM_OOP_REF : ON_WEAK_OOP_REF);
6957 Node* referent = load_field_from_object(reference_obj, "referent", "Ljava/lang/Object;",
6958 decorators, /*is_static*/ false, nullptr);
6959 if (referent == nullptr) return false;
6960
6961 // Add memory barrier to prevent commoning reads from this field
6962 // across safepoint since GC can change its value.
6963 insert_mem_bar(Op_MemBarCPUOrder);
6964
6965 Node* cmp = _gvn.transform(new CmpPNode(referent, other_obj));
6966 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
6967 IfNode* if_node = create_and_map_if(control(), bol, PROB_FAIR, COUNT_UNKNOWN);
6968
6969 RegionNode* region = new RegionNode(3);
6970 PhiNode* phi = new PhiNode(region, TypeInt::BOOL);
6971
6972 Node* if_true = _gvn.transform(new IfTrueNode(if_node));
6973 region->init_req(1, if_true);
6974 phi->init_req(1, intcon(1));
6975
6976 Node* if_false = _gvn.transform(new IfFalseNode(if_node));
6977 region->init_req(2, if_false);
6978 phi->init_req(2, intcon(0));
6979
6980 set_control(_gvn.transform(region));
6981 record_for_igvn(region);
6982 set_result(_gvn.transform(phi));
6983 return true;
6984 }
6985
6986 //----------------------------inline_reference_clear0----------------------------
6987 // void java.lang.ref.Reference.clear0();
6988 // void java.lang.ref.PhantomReference.clear0();
6989 bool LibraryCallKit::inline_reference_clear0(bool is_phantom) {
6990 // This matches the implementation in JVM_ReferenceClear, see the comments there.
6991
6992 // Get arguments
6993 Node* reference_obj = null_check_receiver();
6994 if (stopped()) return true;
6995
6996 // Common access parameters
6997 DecoratorSet decorators = IN_HEAP | AS_NO_KEEPALIVE;
6998 decorators |= (is_phantom ? ON_PHANTOM_OOP_REF : ON_WEAK_OOP_REF);
6999 Node* referent_field_addr = basic_plus_adr(reference_obj, java_lang_ref_Reference::referent_offset());
7000 const TypePtr* referent_field_addr_type = _gvn.type(referent_field_addr)->isa_ptr();
7001 const Type* val_type = TypeOopPtr::make_from_klass(env()->Object_klass());
7002
7003 Node* referent = access_load_at(reference_obj,
7004 referent_field_addr,
7005 referent_field_addr_type,
7006 val_type,
7007 T_OBJECT,
7008 decorators);
7009
7010 IdealKit ideal(this);
7011 #define __ ideal.
7012 __ if_then(referent, BoolTest::ne, null());
7013 sync_kit(ideal);
7014 access_store_at(reference_obj,
7015 referent_field_addr,
7016 referent_field_addr_type,
7017 null(),
7018 val_type,
7019 T_OBJECT,
7020 decorators);
7021 __ sync_kit(this);
7022 __ end_if();
7023 final_sync(ideal);
7024 #undef __
7025
7026 return true;
7027 }
7028
7029 //-----------------------inline_reference_reachabilityFence-----------------
7030 // bool java.lang.ref.Reference.reachabilityFence();
7031 bool LibraryCallKit::inline_reference_reachabilityFence() {
7032 Node* referent = argument(0);
7033 insert_reachability_fence(referent);
7034 return true;
7035 }
7036
7037 Node* LibraryCallKit::load_field_from_object(Node* fromObj, const char* fieldName, const char* fieldTypeString,
7038 DecoratorSet decorators, bool is_static,
7039 ciInstanceKlass* fromKls) {
7040 if (fromKls == nullptr) {
7041 const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();
7042 assert(tinst != nullptr, "obj is null");
7043 assert(tinst->is_loaded(), "obj is not loaded");
7044 fromKls = tinst->instance_klass();
7045 } else {
7046 assert(is_static, "only for static field access");
7047 }
7048 ciField* field = fromKls->get_field_by_name(ciSymbol::make(fieldName),
7049 ciSymbol::make(fieldTypeString),
7050 is_static);
7051
7052 assert(field != nullptr, "undefined field %s %s %s", fieldTypeString, fromKls->name()->as_utf8(), fieldName);
7053 if (field == nullptr) return (Node *) nullptr;
7054
7055 if (is_static) {
7056 const TypeInstPtr* tip = TypeInstPtr::make(fromKls->java_mirror());
7057 fromObj = makecon(tip);
7058 }
7059
7060 // Next code copied from Parse::do_get_xxx():
7061
7062 // Compute address and memory type.
7063 int offset = field->offset_in_bytes();
7064 bool is_vol = field->is_volatile();
7065 ciType* field_klass = field->type();
7066 assert(field_klass->is_loaded(), "should be loaded");
7067 const TypePtr* adr_type = C->alias_type(field)->adr_type();
7068 Node *adr = basic_plus_adr(fromObj, fromObj, offset);
7069 assert(C->get_alias_index(adr_type) == C->get_alias_index(_gvn.type(adr)->isa_ptr()),
7070 "slice of address and input slice don't match");
7071 BasicType bt = field->layout_type();
7072
7073 // Build the resultant type of the load
7074 const Type *type;
7075 if (bt == T_OBJECT) {
7076 type = TypeOopPtr::make_from_klass(field_klass->as_klass());
7077 } else {
7078 type = Type::get_const_basic_type(bt);
7079 }
7080
7081 if (is_vol) {
7082 decorators |= MO_SEQ_CST;
7083 }
7084
7085 return access_load_at(fromObj, adr, adr_type, type, bt, decorators);
7086 }
7087
7088 Node * LibraryCallKit::field_address_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString,
7089 bool is_exact /* true */, bool is_static /* false */,
7090 ciInstanceKlass * fromKls /* nullptr */) {
7091 if (fromKls == nullptr) {
7092 const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();
7093 assert(tinst != nullptr, "obj is null");
7094 assert(tinst->is_loaded(), "obj is not loaded");
7095 assert(!is_exact || tinst->klass_is_exact(), "klass not exact");
7096 fromKls = tinst->instance_klass();
7097 }
7098 else {
7099 assert(is_static, "only for static field access");
7100 }
7101 ciField* field = fromKls->get_field_by_name(ciSymbol::make(fieldName),
7102 ciSymbol::make(fieldTypeString),
7103 is_static);
7104
7105 assert(field != nullptr, "undefined field");
7106 assert(!field->is_volatile(), "not defined for volatile fields");
7107
7108 if (is_static) {
7109 const TypeInstPtr* tip = TypeInstPtr::make(fromKls->java_mirror());
7110 fromObj = makecon(tip);
7111 }
7112
7113 // Next code copied from Parse::do_get_xxx():
7114
7115 // Compute address and memory type.
7116 int offset = field->offset_in_bytes();
7117 Node *adr = basic_plus_adr(fromObj, fromObj, offset);
7118
7119 return adr;
7120 }
7121
7122 //------------------------------inline_aescrypt_Block-----------------------
7123 bool LibraryCallKit::inline_aescrypt_Block(vmIntrinsics::ID id) {
7124 address stubAddr = nullptr;
7125 const char *stubName;
7126 bool is_decrypt = false;
7127 assert(UseAES, "need AES instruction support");
7128
7129 switch(id) {
7130 case vmIntrinsics::_aescrypt_encryptBlock:
7131 stubAddr = StubRoutines::aescrypt_encryptBlock();
7132 stubName = "aescrypt_encryptBlock";
7133 break;
7134 case vmIntrinsics::_aescrypt_decryptBlock:
7135 stubAddr = StubRoutines::aescrypt_decryptBlock();
7136 stubName = "aescrypt_decryptBlock";
7137 is_decrypt = true;
7138 break;
7139 default:
7140 break;
7141 }
7142 if (stubAddr == nullptr) return false;
7143
7144 Node* aescrypt_object = argument(0);
7145 Node* src = argument(1);
7146 Node* src_offset = argument(2);
7147 Node* dest = argument(3);
7148 Node* dest_offset = argument(4);
7149
7150 src = must_be_not_null(src, true);
7151 dest = must_be_not_null(dest, true);
7152
7153 // (1) src and dest are arrays.
7154 const TypeAryPtr* src_type = src->Value(&_gvn)->isa_aryptr();
7155 const TypeAryPtr* dest_type = dest->Value(&_gvn)->isa_aryptr();
7156 assert( src_type != nullptr && src_type->elem() != Type::BOTTOM &&
7157 dest_type != nullptr && dest_type->elem() != Type::BOTTOM, "args are strange");
7158
7159 // for the quick and dirty code we will skip all the checks.
7160 // we are just trying to get the call to be generated.
7161 Node* src_start = src;
7162 Node* dest_start = dest;
7163 if (src_offset != nullptr || dest_offset != nullptr) {
7164 assert(src_offset != nullptr && dest_offset != nullptr, "");
7165 src_start = array_element_address(src, src_offset, T_BYTE);
7166 dest_start = array_element_address(dest, dest_offset, T_BYTE);
7167 }
7168
7169 // now need to get the start of its expanded key array
7170 // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java
7171 Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object, is_decrypt);
7172 if (k_start == nullptr) return false;
7173
7174 // Call the stub.
7175 make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::aescrypt_block_Type(),
7176 stubAddr, stubName, TypePtr::BOTTOM,
7177 src_start, dest_start, k_start);
7178
7179 return true;
7180 }
7181
7182 //------------------------------inline_cipherBlockChaining_AESCrypt-----------------------
7183 bool LibraryCallKit::inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id) {
7184 address stubAddr = nullptr;
7185 const char *stubName = nullptr;
7186 bool is_decrypt = false;
7187 assert(UseAES, "need AES instruction support");
7188
7189 switch(id) {
7190 case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
7191 stubAddr = StubRoutines::cipherBlockChaining_encryptAESCrypt();
7192 stubName = "cipherBlockChaining_encryptAESCrypt";
7193 break;
7194 case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
7195 stubAddr = StubRoutines::cipherBlockChaining_decryptAESCrypt();
7196 stubName = "cipherBlockChaining_decryptAESCrypt";
7197 is_decrypt = true;
7198 break;
7199 default:
7200 break;
7201 }
7202 if (stubAddr == nullptr) return false;
7203
7204 Node* cipherBlockChaining_object = argument(0);
7205 Node* src = argument(1);
7206 Node* src_offset = argument(2);
7207 Node* len = argument(3);
7208 Node* dest = argument(4);
7209 Node* dest_offset = argument(5);
7210
7211 src = must_be_not_null(src, false);
7212 dest = must_be_not_null(dest, false);
7213
7214 // (1) src and dest are arrays.
7215 const TypeAryPtr* src_type = src->Value(&_gvn)->isa_aryptr();
7216 const TypeAryPtr* dest_type = dest->Value(&_gvn)->isa_aryptr();
7217 assert( src_type != nullptr && src_type->elem() != Type::BOTTOM &&
7218 dest_type != nullptr && dest_type->elem() != Type::BOTTOM, "args are strange");
7219
7220 // checks are the responsibility of the caller
7221 Node* src_start = src;
7222 Node* dest_start = dest;
7223 if (src_offset != nullptr || dest_offset != nullptr) {
7224 assert(src_offset != nullptr && dest_offset != nullptr, "");
7225 src_start = array_element_address(src, src_offset, T_BYTE);
7226 dest_start = array_element_address(dest, dest_offset, T_BYTE);
7227 }
7228
7229 // if we are in this set of code, we "know" the embeddedCipher is an AESCrypt object
7230 // (because of the predicated logic executed earlier).
7231 // so we cast it here safely.
7232 // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java
7233
7234 Node* embeddedCipherObj = load_field_from_object(cipherBlockChaining_object, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;");
7235 if (embeddedCipherObj == nullptr) return false;
7236
7237 // cast it to what we know it will be at runtime
7238 const TypeInstPtr* tinst = _gvn.type(cipherBlockChaining_object)->isa_instptr();
7239 assert(tinst != nullptr, "CBC obj is null");
7240 assert(tinst->is_loaded(), "CBC obj is not loaded");
7241 ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AES_Crypt"));
7242 assert(klass_AESCrypt->is_loaded(), "predicate checks that this class is loaded");
7243
7244 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
7245 const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_AESCrypt);
7246 const TypeOopPtr* xtype = aklass->as_instance_type()->cast_to_ptr_type(TypePtr::NotNull);
7247 Node* aescrypt_object = new CheckCastPPNode(control(), embeddedCipherObj, xtype);
7248 aescrypt_object = _gvn.transform(aescrypt_object);
7249
7250 // we need to get the start of the aescrypt_object's expanded key array
7251 Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object, is_decrypt);
7252 if (k_start == nullptr) return false;
7253
7254 // similarly, get the start address of the r vector
7255 Node* objRvec = load_field_from_object(cipherBlockChaining_object, "r", "[B");
7256 if (objRvec == nullptr) return false;
7257 Node* r_start = array_element_address(objRvec, intcon(0), T_BYTE);
7258
7259 // Call the stub, passing src_start, dest_start, k_start, r_start and src_len
7260 Node* cbcCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
7261 OptoRuntime::cipherBlockChaining_aescrypt_Type(),
7262 stubAddr, stubName, TypePtr::BOTTOM,
7263 src_start, dest_start, k_start, r_start, len);
7264
7265 // return cipher length (int)
7266 Node* retvalue = _gvn.transform(new ProjNode(cbcCrypt, TypeFunc::Parms));
7267 set_result(retvalue);
7268 return true;
7269 }
7270
7271 //------------------------------inline_electronicCodeBook_AESCrypt-----------------------
7272 bool LibraryCallKit::inline_electronicCodeBook_AESCrypt(vmIntrinsics::ID id) {
7273 address stubAddr = nullptr;
7274 const char *stubName = nullptr;
7275 bool is_decrypt = false;
7276 assert(UseAES, "need AES instruction support");
7277
7278 switch (id) {
7279 case vmIntrinsics::_electronicCodeBook_encryptAESCrypt:
7280 stubAddr = StubRoutines::electronicCodeBook_encryptAESCrypt();
7281 stubName = "electronicCodeBook_encryptAESCrypt";
7282 break;
7283 case vmIntrinsics::_electronicCodeBook_decryptAESCrypt:
7284 stubAddr = StubRoutines::electronicCodeBook_decryptAESCrypt();
7285 stubName = "electronicCodeBook_decryptAESCrypt";
7286 is_decrypt = true;
7287 break;
7288 default:
7289 break;
7290 }
7291
7292 if (stubAddr == nullptr) return false;
7293
7294 Node* electronicCodeBook_object = argument(0);
7295 Node* src = argument(1);
7296 Node* src_offset = argument(2);
7297 Node* len = argument(3);
7298 Node* dest = argument(4);
7299 Node* dest_offset = argument(5);
7300
7301 // (1) src and dest are arrays.
7302 const TypeAryPtr* src_type = src->Value(&_gvn)->isa_aryptr();
7303 const TypeAryPtr* dest_type = dest->Value(&_gvn)->isa_aryptr();
7304 assert( src_type != nullptr && src_type->elem() != Type::BOTTOM &&
7305 dest_type != nullptr && dest_type->elem() != Type::BOTTOM, "args are strange");
7306
7307 // checks are the responsibility of the caller
7308 Node* src_start = src;
7309 Node* dest_start = dest;
7310 if (src_offset != nullptr || dest_offset != nullptr) {
7311 assert(src_offset != nullptr && dest_offset != nullptr, "");
7312 src_start = array_element_address(src, src_offset, T_BYTE);
7313 dest_start = array_element_address(dest, dest_offset, T_BYTE);
7314 }
7315
7316 // if we are in this set of code, we "know" the embeddedCipher is an AESCrypt object
7317 // (because of the predicated logic executed earlier).
7318 // so we cast it here safely.
7319 // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java
7320
7321 Node* embeddedCipherObj = load_field_from_object(electronicCodeBook_object, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;");
7322 if (embeddedCipherObj == nullptr) return false;
7323
7324 // cast it to what we know it will be at runtime
7325 const TypeInstPtr* tinst = _gvn.type(electronicCodeBook_object)->isa_instptr();
7326 assert(tinst != nullptr, "ECB obj is null");
7327 assert(tinst->is_loaded(), "ECB obj is not loaded");
7328 ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AES_Crypt"));
7329 assert(klass_AESCrypt->is_loaded(), "predicate checks that this class is loaded");
7330
7331 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
7332 const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_AESCrypt);
7333 const TypeOopPtr* xtype = aklass->as_instance_type()->cast_to_ptr_type(TypePtr::NotNull);
7334 Node* aescrypt_object = new CheckCastPPNode(control(), embeddedCipherObj, xtype);
7335 aescrypt_object = _gvn.transform(aescrypt_object);
7336
7337 // we need to get the start of the aescrypt_object's expanded key array
7338 Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object, is_decrypt);
7339 if (k_start == nullptr) return false;
7340
7341 // Call the stub, passing src_start, dest_start, k_start, r_start and src_len
7342 Node* ecbCrypt = make_runtime_call(RC_LEAF | RC_NO_FP,
7343 OptoRuntime::electronicCodeBook_aescrypt_Type(),
7344 stubAddr, stubName, TypePtr::BOTTOM,
7345 src_start, dest_start, k_start, len);
7346
7347 // return cipher length (int)
7348 Node* retvalue = _gvn.transform(new ProjNode(ecbCrypt, TypeFunc::Parms));
7349 set_result(retvalue);
7350 return true;
7351 }
7352
7353 //------------------------------inline_counterMode_AESCrypt-----------------------
7354 bool LibraryCallKit::inline_counterMode_AESCrypt(vmIntrinsics::ID id) {
7355 assert(UseAES, "need AES instruction support");
7356 if (!UseAESCTRIntrinsics) return false;
7357
7358 address stubAddr = nullptr;
7359 const char *stubName = nullptr;
7360 if (id == vmIntrinsics::_counterMode_AESCrypt) {
7361 stubAddr = StubRoutines::counterMode_AESCrypt();
7362 stubName = "counterMode_AESCrypt";
7363 }
7364 if (stubAddr == nullptr) return false;
7365
7366 Node* counterMode_object = argument(0);
7367 Node* src = argument(1);
7368 Node* src_offset = argument(2);
7369 Node* len = argument(3);
7370 Node* dest = argument(4);
7371 Node* dest_offset = argument(5);
7372
7373 // (1) src and dest are arrays.
7374 const TypeAryPtr* src_type = src->Value(&_gvn)->isa_aryptr();
7375 const TypeAryPtr* dest_type = dest->Value(&_gvn)->isa_aryptr();
7376 assert( src_type != nullptr && src_type->elem() != Type::BOTTOM &&
7377 dest_type != nullptr && dest_type->elem() != Type::BOTTOM, "args are strange");
7378
7379 // checks are the responsibility of the caller
7380 Node* src_start = src;
7381 Node* dest_start = dest;
7382 if (src_offset != nullptr || dest_offset != nullptr) {
7383 assert(src_offset != nullptr && dest_offset != nullptr, "");
7384 src_start = array_element_address(src, src_offset, T_BYTE);
7385 dest_start = array_element_address(dest, dest_offset, T_BYTE);
7386 }
7387
7388 // if we are in this set of code, we "know" the embeddedCipher is an AESCrypt object
7389 // (because of the predicated logic executed earlier).
7390 // so we cast it here safely.
7391 // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java
7392 Node* embeddedCipherObj = load_field_from_object(counterMode_object, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;");
7393 if (embeddedCipherObj == nullptr) return false;
7394 // cast it to what we know it will be at runtime
7395 const TypeInstPtr* tinst = _gvn.type(counterMode_object)->isa_instptr();
7396 assert(tinst != nullptr, "CTR obj is null");
7397 assert(tinst->is_loaded(), "CTR obj is not loaded");
7398 ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AES_Crypt"));
7399 assert(klass_AESCrypt->is_loaded(), "predicate checks that this class is loaded");
7400 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
7401 const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_AESCrypt);
7402 const TypeOopPtr* xtype = aklass->as_instance_type()->cast_to_ptr_type(TypePtr::NotNull);
7403 Node* aescrypt_object = new CheckCastPPNode(control(), embeddedCipherObj, xtype);
7404 aescrypt_object = _gvn.transform(aescrypt_object);
7405 // we need to get the start of the aescrypt_object's expanded key array
7406 Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object, /* is_decrypt */ false);
7407 if (k_start == nullptr) return false;
7408 // similarly, get the start address of the r vector
7409 Node* obj_counter = load_field_from_object(counterMode_object, "counter", "[B");
7410 if (obj_counter == nullptr) return false;
7411 Node* cnt_start = array_element_address(obj_counter, intcon(0), T_BYTE);
7412
7413 Node* saved_encCounter = load_field_from_object(counterMode_object, "encryptedCounter", "[B");
7414 if (saved_encCounter == nullptr) return false;
7415 Node* saved_encCounter_start = array_element_address(saved_encCounter, intcon(0), T_BYTE);
7416 Node* used = field_address_from_object(counterMode_object, "used", "I", /*is_exact*/ false);
7417
7418 // Call the stub, passing src_start, dest_start, k_start, r_start and src_len
7419 Node* ctrCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
7420 OptoRuntime::counterMode_aescrypt_Type(),
7421 stubAddr, stubName, TypePtr::BOTTOM,
7422 src_start, dest_start, k_start, cnt_start, len, saved_encCounter_start, used);
7423
7424 // return cipher length (int)
7425 Node* retvalue = _gvn.transform(new ProjNode(ctrCrypt, TypeFunc::Parms));
7426 set_result(retvalue);
7427 return true;
7428 }
7429
7430 //------------------------------get_key_start_from_aescrypt_object-----------------------
7431 Node* LibraryCallKit::get_key_start_from_aescrypt_object(Node* aescrypt_object, bool is_decrypt) {
7432 // MixColumns for decryption can be reduced by preprocessing MixColumns with round keys.
7433 // Intel's extension is based on this optimization and AESCrypt generates round keys by preprocessing MixColumns.
7434 // However, ppc64 vncipher processes MixColumns and requires the same round keys with encryption.
7435 // The following platform specific stubs of encryption and decryption use the same round keys.
7436 #if defined(PPC64) || defined(S390) || defined(RISCV64)
7437 bool use_decryption_key = false;
7438 #else
7439 bool use_decryption_key = is_decrypt;
7440 #endif
7441 Node* objAESCryptKey = load_field_from_object(aescrypt_object, use_decryption_key ? "sessionKd" : "sessionKe", "[I");
7442 assert(objAESCryptKey != nullptr, "wrong version of com.sun.crypto.provider.AES_Crypt");
7443 if (objAESCryptKey == nullptr) return (Node *) nullptr;
7444
7445 // now have the array, need to get the start address of the selected key array
7446 Node* k_start = array_element_address(objAESCryptKey, intcon(0), T_INT);
7447 return k_start;
7448 }
7449
7450 //----------------------------inline_cipherBlockChaining_AESCrypt_predicate----------------------------
7451 // Return node representing slow path of predicate check.
7452 // the pseudo code we want to emulate with this predicate is:
7453 // for encryption:
7454 // if (embeddedCipherObj instanceof AESCrypt) do_intrinsic, else do_javapath
7455 // for decryption:
7456 // if ((embeddedCipherObj instanceof AESCrypt) && (cipher!=plain)) do_intrinsic, else do_javapath
7457 // note cipher==plain is more conservative than the original java code but that's OK
7458 //
7459 Node* LibraryCallKit::inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting) {
7460 // The receiver was checked for null already.
7461 Node* objCBC = argument(0);
7462
7463 Node* src = argument(1);
7464 Node* dest = argument(4);
7465
7466 // Load embeddedCipher field of CipherBlockChaining object.
7467 Node* embeddedCipherObj = load_field_from_object(objCBC, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;");
7468
7469 // get AESCrypt klass for instanceOf check
7470 // AESCrypt might not be loaded yet if some other SymmetricCipher got us to this compile point
7471 // will have same classloader as CipherBlockChaining object
7472 const TypeInstPtr* tinst = _gvn.type(objCBC)->isa_instptr();
7473 assert(tinst != nullptr, "CBCobj is null");
7474 assert(tinst->is_loaded(), "CBCobj is not loaded");
7475
7476 // we want to do an instanceof comparison against the AESCrypt class
7477 ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AES_Crypt"));
7478 if (!klass_AESCrypt->is_loaded()) {
7479 // if AESCrypt is not even loaded, we never take the intrinsic fast path
7480 Node* ctrl = control();
7481 set_control(top()); // no regular fast path
7482 return ctrl;
7483 }
7484
7485 src = must_be_not_null(src, true);
7486 dest = must_be_not_null(dest, true);
7487
7488 // Resolve oops to stable for CmpP below.
7489 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
7490
7491 Node* instof = gen_instanceof(embeddedCipherObj, makecon(TypeKlassPtr::make(instklass_AESCrypt)));
7492 Node* cmp_instof = _gvn.transform(new CmpINode(instof, intcon(1)));
7493 Node* bool_instof = _gvn.transform(new BoolNode(cmp_instof, BoolTest::ne));
7494
7495 Node* instof_false = generate_guard(bool_instof, nullptr, PROB_MIN);
7496
7497 // for encryption, we are done
7498 if (!decrypting)
7499 return instof_false; // even if it is null
7500
7501 // for decryption, we need to add a further check to avoid
7502 // taking the intrinsic path when cipher and plain are the same
7503 // see the original java code for why.
7504 RegionNode* region = new RegionNode(3);
7505 region->init_req(1, instof_false);
7506
7507 Node* cmp_src_dest = _gvn.transform(new CmpPNode(src, dest));
7508 Node* bool_src_dest = _gvn.transform(new BoolNode(cmp_src_dest, BoolTest::eq));
7509 Node* src_dest_conjoint = generate_guard(bool_src_dest, nullptr, PROB_MIN);
7510 region->init_req(2, src_dest_conjoint);
7511
7512 record_for_igvn(region);
7513 return _gvn.transform(region);
7514 }
7515
7516 //----------------------------inline_electronicCodeBook_AESCrypt_predicate----------------------------
7517 // Return node representing slow path of predicate check.
7518 // the pseudo code we want to emulate with this predicate is:
7519 // for encryption:
7520 // if (embeddedCipherObj instanceof AESCrypt) do_intrinsic, else do_javapath
7521 // for decryption:
7522 // if ((embeddedCipherObj instanceof AESCrypt) && (cipher!=plain)) do_intrinsic, else do_javapath
7523 // note cipher==plain is more conservative than the original java code but that's OK
7524 //
7525 Node* LibraryCallKit::inline_electronicCodeBook_AESCrypt_predicate(bool decrypting) {
7526 // The receiver was checked for null already.
7527 Node* objECB = argument(0);
7528
7529 // Load embeddedCipher field of ElectronicCodeBook object.
7530 Node* embeddedCipherObj = load_field_from_object(objECB, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;");
7531
7532 // get AESCrypt klass for instanceOf check
7533 // AESCrypt might not be loaded yet if some other SymmetricCipher got us to this compile point
7534 // will have same classloader as ElectronicCodeBook object
7535 const TypeInstPtr* tinst = _gvn.type(objECB)->isa_instptr();
7536 assert(tinst != nullptr, "ECBobj is null");
7537 assert(tinst->is_loaded(), "ECBobj is not loaded");
7538
7539 // we want to do an instanceof comparison against the AESCrypt class
7540 ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AES_Crypt"));
7541 if (!klass_AESCrypt->is_loaded()) {
7542 // if AESCrypt is not even loaded, we never take the intrinsic fast path
7543 Node* ctrl = control();
7544 set_control(top()); // no regular fast path
7545 return ctrl;
7546 }
7547 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
7548
7549 Node* instof = gen_instanceof(embeddedCipherObj, makecon(TypeKlassPtr::make(instklass_AESCrypt)));
7550 Node* cmp_instof = _gvn.transform(new CmpINode(instof, intcon(1)));
7551 Node* bool_instof = _gvn.transform(new BoolNode(cmp_instof, BoolTest::ne));
7552
7553 Node* instof_false = generate_guard(bool_instof, nullptr, PROB_MIN);
7554
7555 // for encryption, we are done
7556 if (!decrypting)
7557 return instof_false; // even if it is null
7558
7559 // for decryption, we need to add a further check to avoid
7560 // taking the intrinsic path when cipher and plain are the same
7561 // see the original java code for why.
7562 RegionNode* region = new RegionNode(3);
7563 region->init_req(1, instof_false);
7564 Node* src = argument(1);
7565 Node* dest = argument(4);
7566 Node* cmp_src_dest = _gvn.transform(new CmpPNode(src, dest));
7567 Node* bool_src_dest = _gvn.transform(new BoolNode(cmp_src_dest, BoolTest::eq));
7568 Node* src_dest_conjoint = generate_guard(bool_src_dest, nullptr, PROB_MIN);
7569 region->init_req(2, src_dest_conjoint);
7570
7571 record_for_igvn(region);
7572 return _gvn.transform(region);
7573 }
7574
7575 //----------------------------inline_counterMode_AESCrypt_predicate----------------------------
7576 // Return node representing slow path of predicate check.
7577 // the pseudo code we want to emulate with this predicate is:
7578 // for encryption:
7579 // if (embeddedCipherObj instanceof AESCrypt) do_intrinsic, else do_javapath
7580 // for decryption:
7581 // if ((embeddedCipherObj instanceof AESCrypt) && (cipher!=plain)) do_intrinsic, else do_javapath
7582 // note cipher==plain is more conservative than the original java code but that's OK
7583 //
7584
7585 Node* LibraryCallKit::inline_counterMode_AESCrypt_predicate() {
7586 // The receiver was checked for null already.
7587 Node* objCTR = argument(0);
7588
7589 // Load embeddedCipher field of CipherBlockChaining object.
7590 Node* embeddedCipherObj = load_field_from_object(objCTR, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;");
7591
7592 // get AESCrypt klass for instanceOf check
7593 // AESCrypt might not be loaded yet if some other SymmetricCipher got us to this compile point
7594 // will have same classloader as CipherBlockChaining object
7595 const TypeInstPtr* tinst = _gvn.type(objCTR)->isa_instptr();
7596 assert(tinst != nullptr, "CTRobj is null");
7597 assert(tinst->is_loaded(), "CTRobj is not loaded");
7598
7599 // we want to do an instanceof comparison against the AESCrypt class
7600 ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AES_Crypt"));
7601 if (!klass_AESCrypt->is_loaded()) {
7602 // if AESCrypt is not even loaded, we never take the intrinsic fast path
7603 Node* ctrl = control();
7604 set_control(top()); // no regular fast path
7605 return ctrl;
7606 }
7607
7608 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
7609 Node* instof = gen_instanceof(embeddedCipherObj, makecon(TypeKlassPtr::make(instklass_AESCrypt)));
7610 Node* cmp_instof = _gvn.transform(new CmpINode(instof, intcon(1)));
7611 Node* bool_instof = _gvn.transform(new BoolNode(cmp_instof, BoolTest::ne));
7612 Node* instof_false = generate_guard(bool_instof, nullptr, PROB_MIN);
7613
7614 return instof_false; // even if it is null
7615 }
7616
7617 //------------------------------inline_ghash_processBlocks
7618 bool LibraryCallKit::inline_ghash_processBlocks() {
7619 address stubAddr;
7620 const char *stubName;
7621 assert(UseGHASHIntrinsics, "need GHASH intrinsics support");
7622
7623 stubAddr = StubRoutines::ghash_processBlocks();
7624 stubName = "ghash_processBlocks";
7625
7626 Node* data = argument(0);
7627 Node* offset = argument(1);
7628 Node* len = argument(2);
7629 Node* state = argument(3);
7630 Node* subkeyH = argument(4);
7631
7632 state = must_be_not_null(state, true);
7633 subkeyH = must_be_not_null(subkeyH, true);
7634 data = must_be_not_null(data, true);
7635
7636 Node* state_start = array_element_address(state, intcon(0), T_LONG);
7637 assert(state_start, "state is null");
7638 Node* subkeyH_start = array_element_address(subkeyH, intcon(0), T_LONG);
7639 assert(subkeyH_start, "subkeyH is null");
7640 Node* data_start = array_element_address(data, offset, T_BYTE);
7641 assert(data_start, "data is null");
7642
7643 Node* ghash = make_runtime_call(RC_LEAF|RC_NO_FP,
7644 OptoRuntime::ghash_processBlocks_Type(),
7645 stubAddr, stubName, TypePtr::BOTTOM,
7646 state_start, subkeyH_start, data_start, len);
7647 return true;
7648 }
7649
7650 //------------------------------inline_chacha20Block
7651 bool LibraryCallKit::inline_chacha20Block() {
7652 address stubAddr;
7653 const char *stubName;
7654 assert(UseChaCha20Intrinsics, "need ChaCha20 intrinsics support");
7655
7656 stubAddr = StubRoutines::chacha20Block();
7657 stubName = "chacha20Block";
7658
7659 Node* state = argument(0);
7660 Node* result = argument(1);
7661
7662 state = must_be_not_null(state, true);
7663 result = must_be_not_null(result, true);
7664
7665 Node* state_start = array_element_address(state, intcon(0), T_INT);
7666 assert(state_start, "state is null");
7667 Node* result_start = array_element_address(result, intcon(0), T_BYTE);
7668 assert(result_start, "result is null");
7669
7670 Node* cc20Blk = make_runtime_call(RC_LEAF|RC_NO_FP,
7671 OptoRuntime::chacha20Block_Type(),
7672 stubAddr, stubName, TypePtr::BOTTOM,
7673 state_start, result_start);
7674 // return key stream length (int)
7675 Node* retvalue = _gvn.transform(new ProjNode(cc20Blk, TypeFunc::Parms));
7676 set_result(retvalue);
7677 return true;
7678 }
7679
7680 //------------------------------inline_kyberNtt
7681 bool LibraryCallKit::inline_kyberNtt() {
7682 address stubAddr;
7683 const char *stubName;
7684 assert(UseKyberIntrinsics, "need Kyber intrinsics support");
7685 assert(callee()->signature()->size() == 2, "kyberNtt has 2 parameters");
7686
7687 stubAddr = StubRoutines::kyberNtt();
7688 stubName = "kyberNtt";
7689 if (!stubAddr) return false;
7690
7691 Node* coeffs = argument(0);
7692 Node* ntt_zetas = argument(1);
7693
7694 coeffs = must_be_not_null(coeffs, true);
7695 ntt_zetas = must_be_not_null(ntt_zetas, true);
7696
7697 Node* coeffs_start = array_element_address(coeffs, intcon(0), T_SHORT);
7698 assert(coeffs_start, "coeffs is null");
7699 Node* ntt_zetas_start = array_element_address(ntt_zetas, intcon(0), T_SHORT);
7700 assert(ntt_zetas_start, "ntt_zetas is null");
7701 Node* kyberNtt = make_runtime_call(RC_LEAF|RC_NO_FP,
7702 OptoRuntime::kyberNtt_Type(),
7703 stubAddr, stubName, TypePtr::BOTTOM,
7704 coeffs_start, ntt_zetas_start);
7705 // return an int
7706 Node* retvalue = _gvn.transform(new ProjNode(kyberNtt, TypeFunc::Parms));
7707 set_result(retvalue);
7708 return true;
7709 }
7710
7711 //------------------------------inline_kyberInverseNtt
7712 bool LibraryCallKit::inline_kyberInverseNtt() {
7713 address stubAddr;
7714 const char *stubName;
7715 assert(UseKyberIntrinsics, "need Kyber intrinsics support");
7716 assert(callee()->signature()->size() == 2, "kyberInverseNtt has 2 parameters");
7717
7718 stubAddr = StubRoutines::kyberInverseNtt();
7719 stubName = "kyberInverseNtt";
7720 if (!stubAddr) return false;
7721
7722 Node* coeffs = argument(0);
7723 Node* zetas = argument(1);
7724
7725 coeffs = must_be_not_null(coeffs, true);
7726 zetas = must_be_not_null(zetas, true);
7727
7728 Node* coeffs_start = array_element_address(coeffs, intcon(0), T_SHORT);
7729 assert(coeffs_start, "coeffs is null");
7730 Node* zetas_start = array_element_address(zetas, intcon(0), T_SHORT);
7731 assert(zetas_start, "inverseNtt_zetas is null");
7732 Node* kyberInverseNtt = make_runtime_call(RC_LEAF|RC_NO_FP,
7733 OptoRuntime::kyberInverseNtt_Type(),
7734 stubAddr, stubName, TypePtr::BOTTOM,
7735 coeffs_start, zetas_start);
7736
7737 // return an int
7738 Node* retvalue = _gvn.transform(new ProjNode(kyberInverseNtt, TypeFunc::Parms));
7739 set_result(retvalue);
7740 return true;
7741 }
7742
7743 //------------------------------inline_kyberNttMult
7744 bool LibraryCallKit::inline_kyberNttMult() {
7745 address stubAddr;
7746 const char *stubName;
7747 assert(UseKyberIntrinsics, "need Kyber intrinsics support");
7748 assert(callee()->signature()->size() == 4, "kyberNttMult has 4 parameters");
7749
7750 stubAddr = StubRoutines::kyberNttMult();
7751 stubName = "kyberNttMult";
7752 if (!stubAddr) return false;
7753
7754 Node* result = argument(0);
7755 Node* ntta = argument(1);
7756 Node* nttb = argument(2);
7757 Node* zetas = argument(3);
7758
7759 result = must_be_not_null(result, true);
7760 ntta = must_be_not_null(ntta, true);
7761 nttb = must_be_not_null(nttb, true);
7762 zetas = must_be_not_null(zetas, true);
7763
7764 Node* result_start = array_element_address(result, intcon(0), T_SHORT);
7765 assert(result_start, "result is null");
7766 Node* ntta_start = array_element_address(ntta, intcon(0), T_SHORT);
7767 assert(ntta_start, "ntta is null");
7768 Node* nttb_start = array_element_address(nttb, intcon(0), T_SHORT);
7769 assert(nttb_start, "nttb is null");
7770 Node* zetas_start = array_element_address(zetas, intcon(0), T_SHORT);
7771 assert(zetas_start, "nttMult_zetas is null");
7772 Node* kyberNttMult = make_runtime_call(RC_LEAF|RC_NO_FP,
7773 OptoRuntime::kyberNttMult_Type(),
7774 stubAddr, stubName, TypePtr::BOTTOM,
7775 result_start, ntta_start, nttb_start,
7776 zetas_start);
7777
7778 // return an int
7779 Node* retvalue = _gvn.transform(new ProjNode(kyberNttMult, TypeFunc::Parms));
7780 set_result(retvalue);
7781
7782 return true;
7783 }
7784
7785 //------------------------------inline_kyberAddPoly_2
7786 bool LibraryCallKit::inline_kyberAddPoly_2() {
7787 address stubAddr;
7788 const char *stubName;
7789 assert(UseKyberIntrinsics, "need Kyber intrinsics support");
7790 assert(callee()->signature()->size() == 3, "kyberAddPoly_2 has 3 parameters");
7791
7792 stubAddr = StubRoutines::kyberAddPoly_2();
7793 stubName = "kyberAddPoly_2";
7794 if (!stubAddr) return false;
7795
7796 Node* result = argument(0);
7797 Node* a = argument(1);
7798 Node* b = argument(2);
7799
7800 result = must_be_not_null(result, true);
7801 a = must_be_not_null(a, true);
7802 b = must_be_not_null(b, true);
7803
7804 Node* result_start = array_element_address(result, intcon(0), T_SHORT);
7805 assert(result_start, "result is null");
7806 Node* a_start = array_element_address(a, intcon(0), T_SHORT);
7807 assert(a_start, "a is null");
7808 Node* b_start = array_element_address(b, intcon(0), T_SHORT);
7809 assert(b_start, "b is null");
7810 Node* kyberAddPoly_2 = make_runtime_call(RC_LEAF|RC_NO_FP,
7811 OptoRuntime::kyberAddPoly_2_Type(),
7812 stubAddr, stubName, TypePtr::BOTTOM,
7813 result_start, a_start, b_start);
7814 // return an int
7815 Node* retvalue = _gvn.transform(new ProjNode(kyberAddPoly_2, TypeFunc::Parms));
7816 set_result(retvalue);
7817 return true;
7818 }
7819
7820 //------------------------------inline_kyberAddPoly_3
7821 bool LibraryCallKit::inline_kyberAddPoly_3() {
7822 address stubAddr;
7823 const char *stubName;
7824 assert(UseKyberIntrinsics, "need Kyber intrinsics support");
7825 assert(callee()->signature()->size() == 4, "kyberAddPoly_3 has 4 parameters");
7826
7827 stubAddr = StubRoutines::kyberAddPoly_3();
7828 stubName = "kyberAddPoly_3";
7829 if (!stubAddr) return false;
7830
7831 Node* result = argument(0);
7832 Node* a = argument(1);
7833 Node* b = argument(2);
7834 Node* c = argument(3);
7835
7836 result = must_be_not_null(result, true);
7837 a = must_be_not_null(a, true);
7838 b = must_be_not_null(b, true);
7839 c = must_be_not_null(c, true);
7840
7841 Node* result_start = array_element_address(result, intcon(0), T_SHORT);
7842 assert(result_start, "result is null");
7843 Node* a_start = array_element_address(a, intcon(0), T_SHORT);
7844 assert(a_start, "a is null");
7845 Node* b_start = array_element_address(b, intcon(0), T_SHORT);
7846 assert(b_start, "b is null");
7847 Node* c_start = array_element_address(c, intcon(0), T_SHORT);
7848 assert(c_start, "c is null");
7849 Node* kyberAddPoly_3 = make_runtime_call(RC_LEAF|RC_NO_FP,
7850 OptoRuntime::kyberAddPoly_3_Type(),
7851 stubAddr, stubName, TypePtr::BOTTOM,
7852 result_start, a_start, b_start, c_start);
7853 // return an int
7854 Node* retvalue = _gvn.transform(new ProjNode(kyberAddPoly_3, TypeFunc::Parms));
7855 set_result(retvalue);
7856 return true;
7857 }
7858
7859 //------------------------------inline_kyber12To16
7860 bool LibraryCallKit::inline_kyber12To16() {
7861 address stubAddr;
7862 const char *stubName;
7863 assert(UseKyberIntrinsics, "need Kyber intrinsics support");
7864 assert(callee()->signature()->size() == 4, "kyber12To16 has 4 parameters");
7865
7866 stubAddr = StubRoutines::kyber12To16();
7867 stubName = "kyber12To16";
7868 if (!stubAddr) return false;
7869
7870 Node* condensed = argument(0);
7871 Node* condensedOffs = argument(1);
7872 Node* parsed = argument(2);
7873 Node* parsedLength = argument(3);
7874
7875 condensed = must_be_not_null(condensed, true);
7876 parsed = must_be_not_null(parsed, true);
7877
7878 Node* condensed_start = array_element_address(condensed, intcon(0), T_BYTE);
7879 assert(condensed_start, "condensed is null");
7880 Node* parsed_start = array_element_address(parsed, intcon(0), T_SHORT);
7881 assert(parsed_start, "parsed is null");
7882 Node* kyber12To16 = make_runtime_call(RC_LEAF|RC_NO_FP,
7883 OptoRuntime::kyber12To16_Type(),
7884 stubAddr, stubName, TypePtr::BOTTOM,
7885 condensed_start, condensedOffs, parsed_start, parsedLength);
7886 // return an int
7887 Node* retvalue = _gvn.transform(new ProjNode(kyber12To16, TypeFunc::Parms));
7888 set_result(retvalue);
7889 return true;
7890
7891 }
7892
7893 //------------------------------inline_kyberBarrettReduce
7894 bool LibraryCallKit::inline_kyberBarrettReduce() {
7895 address stubAddr;
7896 const char *stubName;
7897 assert(UseKyberIntrinsics, "need Kyber intrinsics support");
7898 assert(callee()->signature()->size() == 1, "kyberBarrettReduce has 1 parameters");
7899
7900 stubAddr = StubRoutines::kyberBarrettReduce();
7901 stubName = "kyberBarrettReduce";
7902 if (!stubAddr) return false;
7903
7904 Node* coeffs = argument(0);
7905
7906 coeffs = must_be_not_null(coeffs, true);
7907
7908 Node* coeffs_start = array_element_address(coeffs, intcon(0), T_SHORT);
7909 assert(coeffs_start, "coeffs is null");
7910 Node* kyberBarrettReduce = make_runtime_call(RC_LEAF|RC_NO_FP,
7911 OptoRuntime::kyberBarrettReduce_Type(),
7912 stubAddr, stubName, TypePtr::BOTTOM,
7913 coeffs_start);
7914 // return an int
7915 Node* retvalue = _gvn.transform(new ProjNode(kyberBarrettReduce, TypeFunc::Parms));
7916 set_result(retvalue);
7917 return true;
7918 }
7919
7920 //------------------------------inline_dilithiumAlmostNtt
7921 bool LibraryCallKit::inline_dilithiumAlmostNtt() {
7922 address stubAddr;
7923 const char *stubName;
7924 assert(UseDilithiumIntrinsics, "need Dilithium intrinsics support");
7925 assert(callee()->signature()->size() == 2, "dilithiumAlmostNtt has 2 parameters");
7926
7927 stubAddr = StubRoutines::dilithiumAlmostNtt();
7928 stubName = "dilithiumAlmostNtt";
7929 if (!stubAddr) return false;
7930
7931 Node* coeffs = argument(0);
7932 Node* ntt_zetas = argument(1);
7933
7934 coeffs = must_be_not_null(coeffs, true);
7935 ntt_zetas = must_be_not_null(ntt_zetas, true);
7936
7937 Node* coeffs_start = array_element_address(coeffs, intcon(0), T_INT);
7938 assert(coeffs_start, "coeffs is null");
7939 Node* ntt_zetas_start = array_element_address(ntt_zetas, intcon(0), T_INT);
7940 assert(ntt_zetas_start, "ntt_zetas is null");
7941 Node* dilithiumAlmostNtt = make_runtime_call(RC_LEAF|RC_NO_FP,
7942 OptoRuntime::dilithiumAlmostNtt_Type(),
7943 stubAddr, stubName, TypePtr::BOTTOM,
7944 coeffs_start, ntt_zetas_start);
7945 // return an int
7946 Node* retvalue = _gvn.transform(new ProjNode(dilithiumAlmostNtt, TypeFunc::Parms));
7947 set_result(retvalue);
7948 return true;
7949 }
7950
7951 //------------------------------inline_dilithiumAlmostInverseNtt
7952 bool LibraryCallKit::inline_dilithiumAlmostInverseNtt() {
7953 address stubAddr;
7954 const char *stubName;
7955 assert(UseDilithiumIntrinsics, "need Dilithium intrinsics support");
7956 assert(callee()->signature()->size() == 2, "dilithiumAlmostInverseNtt has 2 parameters");
7957
7958 stubAddr = StubRoutines::dilithiumAlmostInverseNtt();
7959 stubName = "dilithiumAlmostInverseNtt";
7960 if (!stubAddr) return false;
7961
7962 Node* coeffs = argument(0);
7963 Node* zetas = argument(1);
7964
7965 coeffs = must_be_not_null(coeffs, true);
7966 zetas = must_be_not_null(zetas, true);
7967
7968 Node* coeffs_start = array_element_address(coeffs, intcon(0), T_INT);
7969 assert(coeffs_start, "coeffs is null");
7970 Node* zetas_start = array_element_address(zetas, intcon(0), T_INT);
7971 assert(zetas_start, "inverseNtt_zetas is null");
7972 Node* dilithiumAlmostInverseNtt = make_runtime_call(RC_LEAF|RC_NO_FP,
7973 OptoRuntime::dilithiumAlmostInverseNtt_Type(),
7974 stubAddr, stubName, TypePtr::BOTTOM,
7975 coeffs_start, zetas_start);
7976 // return an int
7977 Node* retvalue = _gvn.transform(new ProjNode(dilithiumAlmostInverseNtt, TypeFunc::Parms));
7978 set_result(retvalue);
7979 return true;
7980 }
7981
7982 //------------------------------inline_dilithiumNttMult
7983 bool LibraryCallKit::inline_dilithiumNttMult() {
7984 address stubAddr;
7985 const char *stubName;
7986 assert(UseDilithiumIntrinsics, "need Dilithium intrinsics support");
7987 assert(callee()->signature()->size() == 3, "dilithiumNttMult has 3 parameters");
7988
7989 stubAddr = StubRoutines::dilithiumNttMult();
7990 stubName = "dilithiumNttMult";
7991 if (!stubAddr) return false;
7992
7993 Node* result = argument(0);
7994 Node* ntta = argument(1);
7995 Node* nttb = argument(2);
7996 Node* zetas = argument(3);
7997
7998 result = must_be_not_null(result, true);
7999 ntta = must_be_not_null(ntta, true);
8000 nttb = must_be_not_null(nttb, true);
8001 zetas = must_be_not_null(zetas, true);
8002
8003 Node* result_start = array_element_address(result, intcon(0), T_INT);
8004 assert(result_start, "result is null");
8005 Node* ntta_start = array_element_address(ntta, intcon(0), T_INT);
8006 assert(ntta_start, "ntta is null");
8007 Node* nttb_start = array_element_address(nttb, intcon(0), T_INT);
8008 assert(nttb_start, "nttb is null");
8009 Node* dilithiumNttMult = make_runtime_call(RC_LEAF|RC_NO_FP,
8010 OptoRuntime::dilithiumNttMult_Type(),
8011 stubAddr, stubName, TypePtr::BOTTOM,
8012 result_start, ntta_start, nttb_start);
8013
8014 // return an int
8015 Node* retvalue = _gvn.transform(new ProjNode(dilithiumNttMult, TypeFunc::Parms));
8016 set_result(retvalue);
8017
8018 return true;
8019 }
8020
8021 //------------------------------inline_dilithiumMontMulByConstant
8022 bool LibraryCallKit::inline_dilithiumMontMulByConstant() {
8023 address stubAddr;
8024 const char *stubName;
8025 assert(UseDilithiumIntrinsics, "need Dilithium intrinsics support");
8026 assert(callee()->signature()->size() == 2, "dilithiumMontMulByConstant has 2 parameters");
8027
8028 stubAddr = StubRoutines::dilithiumMontMulByConstant();
8029 stubName = "dilithiumMontMulByConstant";
8030 if (!stubAddr) return false;
8031
8032 Node* coeffs = argument(0);
8033 Node* constant = argument(1);
8034
8035 coeffs = must_be_not_null(coeffs, true);
8036
8037 Node* coeffs_start = array_element_address(coeffs, intcon(0), T_INT);
8038 assert(coeffs_start, "coeffs is null");
8039 Node* dilithiumMontMulByConstant = make_runtime_call(RC_LEAF|RC_NO_FP,
8040 OptoRuntime::dilithiumMontMulByConstant_Type(),
8041 stubAddr, stubName, TypePtr::BOTTOM,
8042 coeffs_start, constant);
8043
8044 // return an int
8045 Node* retvalue = _gvn.transform(new ProjNode(dilithiumMontMulByConstant, TypeFunc::Parms));
8046 set_result(retvalue);
8047 return true;
8048 }
8049
8050
8051 //------------------------------inline_dilithiumDecomposePoly
8052 bool LibraryCallKit::inline_dilithiumDecomposePoly() {
8053 address stubAddr;
8054 const char *stubName;
8055 assert(UseDilithiumIntrinsics, "need Dilithium intrinsics support");
8056 assert(callee()->signature()->size() == 5, "dilithiumDecomposePoly has 5 parameters");
8057
8058 stubAddr = StubRoutines::dilithiumDecomposePoly();
8059 stubName = "dilithiumDecomposePoly";
8060 if (!stubAddr) return false;
8061
8062 Node* input = argument(0);
8063 Node* lowPart = argument(1);
8064 Node* highPart = argument(2);
8065 Node* twoGamma2 = argument(3);
8066 Node* multiplier = argument(4);
8067
8068 input = must_be_not_null(input, true);
8069 lowPart = must_be_not_null(lowPart, true);
8070 highPart = must_be_not_null(highPart, true);
8071
8072 Node* input_start = array_element_address(input, intcon(0), T_INT);
8073 assert(input_start, "input is null");
8074 Node* lowPart_start = array_element_address(lowPart, intcon(0), T_INT);
8075 assert(lowPart_start, "lowPart is null");
8076 Node* highPart_start = array_element_address(highPart, intcon(0), T_INT);
8077 assert(highPart_start, "highPart is null");
8078
8079 Node* dilithiumDecomposePoly = make_runtime_call(RC_LEAF|RC_NO_FP,
8080 OptoRuntime::dilithiumDecomposePoly_Type(),
8081 stubAddr, stubName, TypePtr::BOTTOM,
8082 input_start, lowPart_start, highPart_start,
8083 twoGamma2, multiplier);
8084
8085 // return an int
8086 Node* retvalue = _gvn.transform(new ProjNode(dilithiumDecomposePoly, TypeFunc::Parms));
8087 set_result(retvalue);
8088 return true;
8089 }
8090
8091 bool LibraryCallKit::inline_base64_encodeBlock() {
8092 address stubAddr;
8093 const char *stubName;
8094 assert(UseBASE64Intrinsics, "need Base64 intrinsics support");
8095 assert(callee()->signature()->size() == 6, "base64_encodeBlock has 6 parameters");
8096 stubAddr = StubRoutines::base64_encodeBlock();
8097 stubName = "encodeBlock";
8098
8099 if (!stubAddr) return false;
8100 Node* base64obj = argument(0);
8101 Node* src = argument(1);
8102 Node* offset = argument(2);
8103 Node* len = argument(3);
8104 Node* dest = argument(4);
8105 Node* dp = argument(5);
8106 Node* isURL = argument(6);
8107
8108 src = must_be_not_null(src, true);
8109 dest = must_be_not_null(dest, true);
8110
8111 Node* src_start = array_element_address(src, intcon(0), T_BYTE);
8112 assert(src_start, "source array is null");
8113 Node* dest_start = array_element_address(dest, intcon(0), T_BYTE);
8114 assert(dest_start, "destination array is null");
8115
8116 Node* base64 = make_runtime_call(RC_LEAF,
8117 OptoRuntime::base64_encodeBlock_Type(),
8118 stubAddr, stubName, TypePtr::BOTTOM,
8119 src_start, offset, len, dest_start, dp, isURL);
8120 return true;
8121 }
8122
8123 bool LibraryCallKit::inline_base64_decodeBlock() {
8124 address stubAddr;
8125 const char *stubName;
8126 assert(UseBASE64Intrinsics, "need Base64 intrinsics support");
8127 assert(callee()->signature()->size() == 7, "base64_decodeBlock has 7 parameters");
8128 stubAddr = StubRoutines::base64_decodeBlock();
8129 stubName = "decodeBlock";
8130
8131 if (!stubAddr) return false;
8132 Node* base64obj = argument(0);
8133 Node* src = argument(1);
8134 Node* src_offset = argument(2);
8135 Node* len = argument(3);
8136 Node* dest = argument(4);
8137 Node* dest_offset = argument(5);
8138 Node* isURL = argument(6);
8139 Node* isMIME = argument(7);
8140
8141 src = must_be_not_null(src, true);
8142 dest = must_be_not_null(dest, true);
8143
8144 Node* src_start = array_element_address(src, intcon(0), T_BYTE);
8145 assert(src_start, "source array is null");
8146 Node* dest_start = array_element_address(dest, intcon(0), T_BYTE);
8147 assert(dest_start, "destination array is null");
8148
8149 Node* call = make_runtime_call(RC_LEAF,
8150 OptoRuntime::base64_decodeBlock_Type(),
8151 stubAddr, stubName, TypePtr::BOTTOM,
8152 src_start, src_offset, len, dest_start, dest_offset, isURL, isMIME);
8153 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
8154 set_result(result);
8155 return true;
8156 }
8157
8158 bool LibraryCallKit::inline_poly1305_processBlocks() {
8159 address stubAddr;
8160 const char *stubName;
8161 assert(UsePoly1305Intrinsics, "need Poly intrinsics support");
8162 assert(callee()->signature()->size() == 5, "poly1305_processBlocks has %d parameters", callee()->signature()->size());
8163 stubAddr = StubRoutines::poly1305_processBlocks();
8164 stubName = "poly1305_processBlocks";
8165
8166 if (!stubAddr) return false;
8167 null_check_receiver(); // null-check receiver
8168 if (stopped()) return true;
8169
8170 Node* input = argument(1);
8171 Node* input_offset = argument(2);
8172 Node* len = argument(3);
8173 Node* alimbs = argument(4);
8174 Node* rlimbs = argument(5);
8175
8176 input = must_be_not_null(input, true);
8177 alimbs = must_be_not_null(alimbs, true);
8178 rlimbs = must_be_not_null(rlimbs, true);
8179
8180 Node* input_start = array_element_address(input, input_offset, T_BYTE);
8181 assert(input_start, "input array is null");
8182 Node* acc_start = array_element_address(alimbs, intcon(0), T_LONG);
8183 assert(acc_start, "acc array is null");
8184 Node* r_start = array_element_address(rlimbs, intcon(0), T_LONG);
8185 assert(r_start, "r array is null");
8186
8187 Node* call = make_runtime_call(RC_LEAF | RC_NO_FP,
8188 OptoRuntime::poly1305_processBlocks_Type(),
8189 stubAddr, stubName, TypePtr::BOTTOM,
8190 input_start, len, acc_start, r_start);
8191 return true;
8192 }
8193
8194 bool LibraryCallKit::inline_intpoly_montgomeryMult_P256() {
8195 address stubAddr;
8196 const char *stubName;
8197 assert(UseIntPolyIntrinsics, "need intpoly intrinsics support");
8198 assert(callee()->signature()->size() == 3, "intpoly_montgomeryMult_P256 has %d parameters", callee()->signature()->size());
8199 stubAddr = StubRoutines::intpoly_montgomeryMult_P256();
8200 stubName = "intpoly_montgomeryMult_P256";
8201
8202 if (!stubAddr) return false;
8203 null_check_receiver(); // null-check receiver
8204 if (stopped()) return true;
8205
8206 Node* a = argument(1);
8207 Node* b = argument(2);
8208 Node* r = argument(3);
8209
8210 a = must_be_not_null(a, true);
8211 b = must_be_not_null(b, true);
8212 r = must_be_not_null(r, true);
8213
8214 Node* a_start = array_element_address(a, intcon(0), T_LONG);
8215 assert(a_start, "a array is null");
8216 Node* b_start = array_element_address(b, intcon(0), T_LONG);
8217 assert(b_start, "b array is null");
8218 Node* r_start = array_element_address(r, intcon(0), T_LONG);
8219 assert(r_start, "r array is null");
8220
8221 Node* call = make_runtime_call(RC_LEAF | RC_NO_FP,
8222 OptoRuntime::intpoly_montgomeryMult_P256_Type(),
8223 stubAddr, stubName, TypePtr::BOTTOM,
8224 a_start, b_start, r_start);
8225 return true;
8226 }
8227
8228 bool LibraryCallKit::inline_intpoly_assign() {
8229 assert(UseIntPolyIntrinsics, "need intpoly intrinsics support");
8230 assert(callee()->signature()->size() == 3, "intpoly_assign has %d parameters", callee()->signature()->size());
8231 const char *stubName = "intpoly_assign";
8232 address stubAddr = StubRoutines::intpoly_assign();
8233 if (!stubAddr) return false;
8234
8235 Node* set = argument(0);
8236 Node* a = argument(1);
8237 Node* b = argument(2);
8238 Node* arr_length = load_array_length(a);
8239
8240 a = must_be_not_null(a, true);
8241 b = must_be_not_null(b, true);
8242
8243 Node* a_start = array_element_address(a, intcon(0), T_LONG);
8244 assert(a_start, "a array is null");
8245 Node* b_start = array_element_address(b, intcon(0), T_LONG);
8246 assert(b_start, "b array is null");
8247
8248 Node* call = make_runtime_call(RC_LEAF | RC_NO_FP,
8249 OptoRuntime::intpoly_assign_Type(),
8250 stubAddr, stubName, TypePtr::BOTTOM,
8251 set, a_start, b_start, arr_length);
8252 return true;
8253 }
8254
8255 //------------------------------inline_digestBase_implCompress-----------------------
8256 //
8257 // Calculate MD5 for single-block byte[] array.
8258 // void com.sun.security.provider.MD5.implCompress(byte[] buf, int ofs)
8259 //
8260 // Calculate SHA (i.e., SHA-1) for single-block byte[] array.
8261 // void com.sun.security.provider.SHA.implCompress(byte[] buf, int ofs)
8262 //
8263 // Calculate SHA2 (i.e., SHA-244 or SHA-256) for single-block byte[] array.
8264 // void com.sun.security.provider.SHA2.implCompress(byte[] buf, int ofs)
8265 //
8266 // Calculate SHA5 (i.e., SHA-384 or SHA-512) for single-block byte[] array.
8267 // void com.sun.security.provider.SHA5.implCompress(byte[] buf, int ofs)
8268 //
8269 // Calculate SHA3 (i.e., SHA3-224 or SHA3-256 or SHA3-384 or SHA3-512) for single-block byte[] array.
8270 // void com.sun.security.provider.SHA3.implCompress(byte[] buf, int ofs)
8271 //
8272 bool LibraryCallKit::inline_digestBase_implCompress(vmIntrinsics::ID id) {
8273 assert(callee()->signature()->size() == 2, "sha_implCompress has 2 parameters");
8274
8275 Node* digestBase_obj = argument(0);
8276 Node* src = argument(1); // type oop
8277 Node* ofs = argument(2); // type int
8278
8279 const TypeAryPtr* src_type = src->Value(&_gvn)->isa_aryptr();
8280 if (src_type == nullptr || src_type->elem() == Type::BOTTOM) {
8281 // failed array check
8282 return false;
8283 }
8284 // Figure out the size and type of the elements we will be copying.
8285 BasicType src_elem = src_type->elem()->array_element_basic_type();
8286 if (src_elem != T_BYTE) {
8287 return false;
8288 }
8289 // 'src_start' points to src array + offset
8290 src = must_be_not_null(src, true);
8291 Node* src_start = array_element_address(src, ofs, src_elem);
8292 Node* state = nullptr;
8293 Node* block_size = nullptr;
8294 address stubAddr;
8295 const char *stubName;
8296
8297 switch(id) {
8298 case vmIntrinsics::_md5_implCompress:
8299 assert(UseMD5Intrinsics, "need MD5 instruction support");
8300 state = get_state_from_digest_object(digestBase_obj, T_INT);
8301 stubAddr = StubRoutines::md5_implCompress();
8302 stubName = "md5_implCompress";
8303 break;
8304 case vmIntrinsics::_sha_implCompress:
8305 assert(UseSHA1Intrinsics, "need SHA1 instruction support");
8306 state = get_state_from_digest_object(digestBase_obj, T_INT);
8307 stubAddr = StubRoutines::sha1_implCompress();
8308 stubName = "sha1_implCompress";
8309 break;
8310 case vmIntrinsics::_sha2_implCompress:
8311 assert(UseSHA256Intrinsics, "need SHA256 instruction support");
8312 state = get_state_from_digest_object(digestBase_obj, T_INT);
8313 stubAddr = StubRoutines::sha256_implCompress();
8314 stubName = "sha256_implCompress";
8315 break;
8316 case vmIntrinsics::_sha5_implCompress:
8317 assert(UseSHA512Intrinsics, "need SHA512 instruction support");
8318 state = get_state_from_digest_object(digestBase_obj, T_LONG);
8319 stubAddr = StubRoutines::sha512_implCompress();
8320 stubName = "sha512_implCompress";
8321 break;
8322 case vmIntrinsics::_sha3_implCompress:
8323 assert(UseSHA3Intrinsics, "need SHA3 instruction support");
8324 state = get_state_from_digest_object(digestBase_obj, T_LONG);
8325 stubAddr = StubRoutines::sha3_implCompress();
8326 stubName = "sha3_implCompress";
8327 block_size = get_block_size_from_digest_object(digestBase_obj);
8328 if (block_size == nullptr) return false;
8329 break;
8330 default:
8331 fatal_unexpected_iid(id);
8332 return false;
8333 }
8334 if (state == nullptr) return false;
8335
8336 assert(stubAddr != nullptr, "Stub %s is not generated", stubName);
8337 if (stubAddr == nullptr) return false;
8338
8339 // Call the stub.
8340 Node* call;
8341 if (block_size == nullptr) {
8342 call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::digestBase_implCompress_Type(false),
8343 stubAddr, stubName, TypePtr::BOTTOM,
8344 src_start, state);
8345 } else {
8346 call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::digestBase_implCompress_Type(true),
8347 stubAddr, stubName, TypePtr::BOTTOM,
8348 src_start, state, block_size);
8349 }
8350
8351 return true;
8352 }
8353
8354 //------------------------------inline_double_keccak
8355 bool LibraryCallKit::inline_double_keccak() {
8356 address stubAddr;
8357 const char *stubName;
8358 assert(UseSHA3Intrinsics, "need SHA3 intrinsics support");
8359 assert(callee()->signature()->size() == 2, "double_keccak has 2 parameters");
8360
8361 stubAddr = StubRoutines::double_keccak();
8362 stubName = "double_keccak";
8363 if (!stubAddr) return false;
8364
8365 Node* status0 = argument(0);
8366 Node* status1 = argument(1);
8367
8368 status0 = must_be_not_null(status0, true);
8369 status1 = must_be_not_null(status1, true);
8370
8371 Node* status0_start = array_element_address(status0, intcon(0), T_LONG);
8372 assert(status0_start, "status0 is null");
8373 Node* status1_start = array_element_address(status1, intcon(0), T_LONG);
8374 assert(status1_start, "status1 is null");
8375 Node* double_keccak = make_runtime_call(RC_LEAF|RC_NO_FP,
8376 OptoRuntime::double_keccak_Type(),
8377 stubAddr, stubName, TypePtr::BOTTOM,
8378 status0_start, status1_start);
8379 // return an int
8380 Node* retvalue = _gvn.transform(new ProjNode(double_keccak, TypeFunc::Parms));
8381 set_result(retvalue);
8382 return true;
8383 }
8384
8385
8386 //------------------------------inline_digestBase_implCompressMB-----------------------
8387 //
8388 // Calculate MD5/SHA/SHA2/SHA5/SHA3 for multi-block byte[] array.
8389 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit)
8390 //
8391 bool LibraryCallKit::inline_digestBase_implCompressMB(int predicate) {
8392 assert(UseMD5Intrinsics || UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics || UseSHA3Intrinsics,
8393 "need MD5/SHA1/SHA256/SHA512/SHA3 instruction support");
8394 assert((uint)predicate < 5, "sanity");
8395 assert(callee()->signature()->size() == 3, "digestBase_implCompressMB has 3 parameters");
8396
8397 Node* digestBase_obj = argument(0); // The receiver was checked for null already.
8398 Node* src = argument(1); // byte[] array
8399 Node* ofs = argument(2); // type int
8400 Node* limit = argument(3); // type int
8401
8402 const TypeAryPtr* src_type = src->Value(&_gvn)->isa_aryptr();
8403 if (src_type == nullptr || src_type->elem() == Type::BOTTOM) {
8404 // failed array check
8405 return false;
8406 }
8407 // Figure out the size and type of the elements we will be copying.
8408 BasicType src_elem = src_type->elem()->array_element_basic_type();
8409 if (src_elem != T_BYTE) {
8410 return false;
8411 }
8412 // 'src_start' points to src array + offset
8413 src = must_be_not_null(src, false);
8414 Node* src_start = array_element_address(src, ofs, src_elem);
8415
8416 const char* klass_digestBase_name = nullptr;
8417 const char* stub_name = nullptr;
8418 address stub_addr = nullptr;
8419 BasicType elem_type = T_INT;
8420
8421 switch (predicate) {
8422 case 0:
8423 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_md5_implCompress)) {
8424 klass_digestBase_name = "sun/security/provider/MD5";
8425 stub_name = "md5_implCompressMB";
8426 stub_addr = StubRoutines::md5_implCompressMB();
8427 }
8428 break;
8429 case 1:
8430 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_sha_implCompress)) {
8431 klass_digestBase_name = "sun/security/provider/SHA";
8432 stub_name = "sha1_implCompressMB";
8433 stub_addr = StubRoutines::sha1_implCompressMB();
8434 }
8435 break;
8436 case 2:
8437 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_sha2_implCompress)) {
8438 klass_digestBase_name = "sun/security/provider/SHA2";
8439 stub_name = "sha256_implCompressMB";
8440 stub_addr = StubRoutines::sha256_implCompressMB();
8441 }
8442 break;
8443 case 3:
8444 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_sha5_implCompress)) {
8445 klass_digestBase_name = "sun/security/provider/SHA5";
8446 stub_name = "sha512_implCompressMB";
8447 stub_addr = StubRoutines::sha512_implCompressMB();
8448 elem_type = T_LONG;
8449 }
8450 break;
8451 case 4:
8452 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_sha3_implCompress)) {
8453 klass_digestBase_name = "sun/security/provider/SHA3";
8454 stub_name = "sha3_implCompressMB";
8455 stub_addr = StubRoutines::sha3_implCompressMB();
8456 elem_type = T_LONG;
8457 }
8458 break;
8459 default:
8460 fatal("unknown DigestBase intrinsic predicate: %d", predicate);
8461 }
8462 if (klass_digestBase_name != nullptr) {
8463 assert(stub_addr != nullptr, "Stub is generated");
8464 if (stub_addr == nullptr) return false;
8465
8466 // get DigestBase klass to lookup for SHA klass
8467 const TypeInstPtr* tinst = _gvn.type(digestBase_obj)->isa_instptr();
8468 assert(tinst != nullptr, "digestBase_obj is not instance???");
8469 assert(tinst->is_loaded(), "DigestBase is not loaded");
8470
8471 ciKlass* klass_digestBase = tinst->instance_klass()->find_klass(ciSymbol::make(klass_digestBase_name));
8472 assert(klass_digestBase->is_loaded(), "predicate checks that this class is loaded");
8473 ciInstanceKlass* instklass_digestBase = klass_digestBase->as_instance_klass();
8474 return inline_digestBase_implCompressMB(digestBase_obj, instklass_digestBase, elem_type, stub_addr, stub_name, src_start, ofs, limit);
8475 }
8476 return false;
8477 }
8478
8479 //------------------------------inline_digestBase_implCompressMB-----------------------
8480 bool LibraryCallKit::inline_digestBase_implCompressMB(Node* digestBase_obj, ciInstanceKlass* instklass_digestBase,
8481 BasicType elem_type, address stubAddr, const char *stubName,
8482 Node* src_start, Node* ofs, Node* limit) {
8483 const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_digestBase);
8484 const TypeOopPtr* xtype = aklass->cast_to_exactness(false)->as_instance_type()->cast_to_ptr_type(TypePtr::NotNull);
8485 Node* digest_obj = new CheckCastPPNode(control(), digestBase_obj, xtype);
8486 digest_obj = _gvn.transform(digest_obj);
8487
8488 Node* state = get_state_from_digest_object(digest_obj, elem_type);
8489 if (state == nullptr) return false;
8490
8491 Node* block_size = nullptr;
8492 if (strcmp("sha3_implCompressMB", stubName) == 0) {
8493 block_size = get_block_size_from_digest_object(digest_obj);
8494 if (block_size == nullptr) return false;
8495 }
8496
8497 // Call the stub.
8498 Node* call;
8499 if (block_size == nullptr) {
8500 call = make_runtime_call(RC_LEAF|RC_NO_FP,
8501 OptoRuntime::digestBase_implCompressMB_Type(false),
8502 stubAddr, stubName, TypePtr::BOTTOM,
8503 src_start, state, ofs, limit);
8504 } else {
8505 call = make_runtime_call(RC_LEAF|RC_NO_FP,
8506 OptoRuntime::digestBase_implCompressMB_Type(true),
8507 stubAddr, stubName, TypePtr::BOTTOM,
8508 src_start, state, block_size, ofs, limit);
8509 }
8510
8511 // return ofs (int)
8512 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
8513 set_result(result);
8514
8515 return true;
8516 }
8517
8518 //------------------------------inline_galoisCounterMode_AESCrypt-----------------------
8519 bool LibraryCallKit::inline_galoisCounterMode_AESCrypt() {
8520 assert(UseAES, "need AES instruction support");
8521 address stubAddr = nullptr;
8522 const char *stubName = nullptr;
8523 stubAddr = StubRoutines::galoisCounterMode_AESCrypt();
8524 stubName = "galoisCounterMode_AESCrypt";
8525
8526 if (stubAddr == nullptr) return false;
8527
8528 Node* in = argument(0);
8529 Node* inOfs = argument(1);
8530 Node* len = argument(2);
8531 Node* ct = argument(3);
8532 Node* ctOfs = argument(4);
8533 Node* out = argument(5);
8534 Node* outOfs = argument(6);
8535 Node* gctr_object = argument(7);
8536 Node* ghash_object = argument(8);
8537
8538 // (1) in, ct and out are arrays.
8539 const TypeAryPtr* in_type = in->Value(&_gvn)->isa_aryptr();
8540 const TypeAryPtr* ct_type = ct->Value(&_gvn)->isa_aryptr();
8541 const TypeAryPtr* out_type = out->Value(&_gvn)->isa_aryptr();
8542 assert( in_type != nullptr && in_type->elem() != Type::BOTTOM &&
8543 ct_type != nullptr && ct_type->elem() != Type::BOTTOM &&
8544 out_type != nullptr && out_type->elem() != Type::BOTTOM, "args are strange");
8545
8546 // checks are the responsibility of the caller
8547 Node* in_start = in;
8548 Node* ct_start = ct;
8549 Node* out_start = out;
8550 if (inOfs != nullptr || ctOfs != nullptr || outOfs != nullptr) {
8551 assert(inOfs != nullptr && ctOfs != nullptr && outOfs != nullptr, "");
8552 in_start = array_element_address(in, inOfs, T_BYTE);
8553 ct_start = array_element_address(ct, ctOfs, T_BYTE);
8554 out_start = array_element_address(out, outOfs, T_BYTE);
8555 }
8556
8557 // if we are in this set of code, we "know" the embeddedCipher is an AESCrypt object
8558 // (because of the predicated logic executed earlier).
8559 // so we cast it here safely.
8560 // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java
8561 Node* embeddedCipherObj = load_field_from_object(gctr_object, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;");
8562 Node* counter = load_field_from_object(gctr_object, "counter", "[B");
8563 Node* subkeyHtbl = load_field_from_object(ghash_object, "subkeyHtbl", "[J");
8564 Node* state = load_field_from_object(ghash_object, "state", "[J");
8565
8566 if (embeddedCipherObj == nullptr || counter == nullptr || subkeyHtbl == nullptr || state == nullptr) {
8567 return false;
8568 }
8569 // cast it to what we know it will be at runtime
8570 const TypeInstPtr* tinst = _gvn.type(gctr_object)->isa_instptr();
8571 assert(tinst != nullptr, "GCTR obj is null");
8572 assert(tinst->is_loaded(), "GCTR obj is not loaded");
8573 ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AES_Crypt"));
8574 assert(klass_AESCrypt->is_loaded(), "predicate checks that this class is loaded");
8575 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
8576 const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_AESCrypt);
8577 const TypeOopPtr* xtype = aklass->as_instance_type();
8578 Node* aescrypt_object = new CheckCastPPNode(control(), embeddedCipherObj, xtype);
8579 aescrypt_object = _gvn.transform(aescrypt_object);
8580 // we need to get the start of the aescrypt_object's expanded key array
8581 Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object, /* is_decrypt */ false);
8582 if (k_start == nullptr) return false;
8583 // similarly, get the start address of the r vector
8584 Node* cnt_start = array_element_address(counter, intcon(0), T_BYTE);
8585 Node* state_start = array_element_address(state, intcon(0), T_LONG);
8586 Node* subkeyHtbl_start = array_element_address(subkeyHtbl, intcon(0), T_LONG);
8587
8588
8589 // Call the stub, passing params
8590 Node* gcmCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
8591 OptoRuntime::galoisCounterMode_aescrypt_Type(),
8592 stubAddr, stubName, TypePtr::BOTTOM,
8593 in_start, len, ct_start, out_start, k_start, state_start, subkeyHtbl_start, cnt_start);
8594
8595 // return cipher length (int)
8596 Node* retvalue = _gvn.transform(new ProjNode(gcmCrypt, TypeFunc::Parms));
8597 set_result(retvalue);
8598
8599 return true;
8600 }
8601
8602 //----------------------------inline_galoisCounterMode_AESCrypt_predicate----------------------------
8603 // Return node representing slow path of predicate check.
8604 // the pseudo code we want to emulate with this predicate is:
8605 // for encryption:
8606 // if (embeddedCipherObj instanceof AESCrypt) do_intrinsic, else do_javapath
8607 // for decryption:
8608 // if ((embeddedCipherObj instanceof AESCrypt) && (cipher!=plain)) do_intrinsic, else do_javapath
8609 // note cipher==plain is more conservative than the original java code but that's OK
8610 //
8611
8612 Node* LibraryCallKit::inline_galoisCounterMode_AESCrypt_predicate() {
8613 // The receiver was checked for null already.
8614 Node* objGCTR = argument(7);
8615 // Load embeddedCipher field of GCTR object.
8616 Node* embeddedCipherObj = load_field_from_object(objGCTR, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;");
8617 assert(embeddedCipherObj != nullptr, "embeddedCipherObj is null");
8618
8619 // get AESCrypt klass for instanceOf check
8620 // AESCrypt might not be loaded yet if some other SymmetricCipher got us to this compile point
8621 // will have same classloader as CipherBlockChaining object
8622 const TypeInstPtr* tinst = _gvn.type(objGCTR)->isa_instptr();
8623 assert(tinst != nullptr, "GCTR obj is null");
8624 assert(tinst->is_loaded(), "GCTR obj is not loaded");
8625
8626 // we want to do an instanceof comparison against the AESCrypt class
8627 ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AES_Crypt"));
8628 if (!klass_AESCrypt->is_loaded()) {
8629 // if AESCrypt is not even loaded, we never take the intrinsic fast path
8630 Node* ctrl = control();
8631 set_control(top()); // no regular fast path
8632 return ctrl;
8633 }
8634
8635 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
8636 Node* instof = gen_instanceof(embeddedCipherObj, makecon(TypeKlassPtr::make(instklass_AESCrypt)));
8637 Node* cmp_instof = _gvn.transform(new CmpINode(instof, intcon(1)));
8638 Node* bool_instof = _gvn.transform(new BoolNode(cmp_instof, BoolTest::ne));
8639 Node* instof_false = generate_guard(bool_instof, nullptr, PROB_MIN);
8640
8641 return instof_false; // even if it is null
8642 }
8643
8644 //------------------------------get_state_from_digest_object-----------------------
8645 Node * LibraryCallKit::get_state_from_digest_object(Node *digest_object, BasicType elem_type) {
8646 const char* state_type;
8647 switch (elem_type) {
8648 case T_BYTE: state_type = "[B"; break;
8649 case T_INT: state_type = "[I"; break;
8650 case T_LONG: state_type = "[J"; break;
8651 default: ShouldNotReachHere();
8652 }
8653 Node* digest_state = load_field_from_object(digest_object, "state", state_type);
8654 assert (digest_state != nullptr, "wrong version of sun.security.provider.MD5/SHA/SHA2/SHA5/SHA3");
8655 if (digest_state == nullptr) return (Node *) nullptr;
8656
8657 // now have the array, need to get the start address of the state array
8658 Node* state = array_element_address(digest_state, intcon(0), elem_type);
8659 return state;
8660 }
8661
8662 //------------------------------get_block_size_from_sha3_object----------------------------------
8663 Node * LibraryCallKit::get_block_size_from_digest_object(Node *digest_object) {
8664 Node* block_size = load_field_from_object(digest_object, "blockSize", "I");
8665 assert (block_size != nullptr, "sanity");
8666 return block_size;
8667 }
8668
8669 //----------------------------inline_digestBase_implCompressMB_predicate----------------------------
8670 // Return node representing slow path of predicate check.
8671 // the pseudo code we want to emulate with this predicate is:
8672 // if (digestBaseObj instanceof MD5/SHA/SHA2/SHA5/SHA3) do_intrinsic, else do_javapath
8673 //
8674 Node* LibraryCallKit::inline_digestBase_implCompressMB_predicate(int predicate) {
8675 assert(UseMD5Intrinsics || UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics || UseSHA3Intrinsics,
8676 "need MD5/SHA1/SHA256/SHA512/SHA3 instruction support");
8677 assert((uint)predicate < 5, "sanity");
8678
8679 // The receiver was checked for null already.
8680 Node* digestBaseObj = argument(0);
8681
8682 // get DigestBase klass for instanceOf check
8683 const TypeInstPtr* tinst = _gvn.type(digestBaseObj)->isa_instptr();
8684 assert(tinst != nullptr, "digestBaseObj is null");
8685 assert(tinst->is_loaded(), "DigestBase is not loaded");
8686
8687 const char* klass_name = nullptr;
8688 switch (predicate) {
8689 case 0:
8690 if (UseMD5Intrinsics) {
8691 // we want to do an instanceof comparison against the MD5 class
8692 klass_name = "sun/security/provider/MD5";
8693 }
8694 break;
8695 case 1:
8696 if (UseSHA1Intrinsics) {
8697 // we want to do an instanceof comparison against the SHA class
8698 klass_name = "sun/security/provider/SHA";
8699 }
8700 break;
8701 case 2:
8702 if (UseSHA256Intrinsics) {
8703 // we want to do an instanceof comparison against the SHA2 class
8704 klass_name = "sun/security/provider/SHA2";
8705 }
8706 break;
8707 case 3:
8708 if (UseSHA512Intrinsics) {
8709 // we want to do an instanceof comparison against the SHA5 class
8710 klass_name = "sun/security/provider/SHA5";
8711 }
8712 break;
8713 case 4:
8714 if (UseSHA3Intrinsics) {
8715 // we want to do an instanceof comparison against the SHA3 class
8716 klass_name = "sun/security/provider/SHA3";
8717 }
8718 break;
8719 default:
8720 fatal("unknown SHA intrinsic predicate: %d", predicate);
8721 }
8722
8723 ciKlass* klass = nullptr;
8724 if (klass_name != nullptr) {
8725 klass = tinst->instance_klass()->find_klass(ciSymbol::make(klass_name));
8726 }
8727 if ((klass == nullptr) || !klass->is_loaded()) {
8728 // if none of MD5/SHA/SHA2/SHA5 is loaded, we never take the intrinsic fast path
8729 Node* ctrl = control();
8730 set_control(top()); // no intrinsic path
8731 return ctrl;
8732 }
8733 ciInstanceKlass* instklass = klass->as_instance_klass();
8734
8735 Node* instof = gen_instanceof(digestBaseObj, makecon(TypeKlassPtr::make(instklass)));
8736 Node* cmp_instof = _gvn.transform(new CmpINode(instof, intcon(1)));
8737 Node* bool_instof = _gvn.transform(new BoolNode(cmp_instof, BoolTest::ne));
8738 Node* instof_false = generate_guard(bool_instof, nullptr, PROB_MIN);
8739
8740 return instof_false; // even if it is null
8741 }
8742
8743 //-------------inline_fma-----------------------------------
8744 bool LibraryCallKit::inline_fma(vmIntrinsics::ID id) {
8745 Node *a = nullptr;
8746 Node *b = nullptr;
8747 Node *c = nullptr;
8748 Node* result = nullptr;
8749 switch (id) {
8750 case vmIntrinsics::_fmaD:
8751 assert(callee()->signature()->size() == 6, "fma has 3 parameters of size 2 each.");
8752 // no receiver since it is static method
8753 a = argument(0);
8754 b = argument(2);
8755 c = argument(4);
8756 result = _gvn.transform(new FmaDNode(a, b, c));
8757 break;
8758 case vmIntrinsics::_fmaF:
8759 assert(callee()->signature()->size() == 3, "fma has 3 parameters of size 1 each.");
8760 a = argument(0);
8761 b = argument(1);
8762 c = argument(2);
8763 result = _gvn.transform(new FmaFNode(a, b, c));
8764 break;
8765 default:
8766 fatal_unexpected_iid(id); break;
8767 }
8768 set_result(result);
8769 return true;
8770 }
8771
8772 bool LibraryCallKit::inline_character_compare(vmIntrinsics::ID id) {
8773 // argument(0) is receiver
8774 Node* codePoint = argument(1);
8775 Node* n = nullptr;
8776
8777 switch (id) {
8778 case vmIntrinsics::_isDigit :
8779 n = new DigitNode(control(), codePoint);
8780 break;
8781 case vmIntrinsics::_isLowerCase :
8782 n = new LowerCaseNode(control(), codePoint);
8783 break;
8784 case vmIntrinsics::_isUpperCase :
8785 n = new UpperCaseNode(control(), codePoint);
8786 break;
8787 case vmIntrinsics::_isWhitespace :
8788 n = new WhitespaceNode(control(), codePoint);
8789 break;
8790 default:
8791 fatal_unexpected_iid(id);
8792 }
8793
8794 set_result(_gvn.transform(n));
8795 return true;
8796 }
8797
8798 bool LibraryCallKit::inline_profileBoolean() {
8799 Node* counts = argument(1);
8800 const TypeAryPtr* ary = nullptr;
8801 ciArray* aobj = nullptr;
8802 if (counts->is_Con()
8803 && (ary = counts->bottom_type()->isa_aryptr()) != nullptr
8804 && (aobj = ary->const_oop()->as_array()) != nullptr
8805 && (aobj->length() == 2)) {
8806 // Profile is int[2] where [0] and [1] correspond to false and true value occurrences respectively.
8807 jint false_cnt = aobj->element_value(0).as_int();
8808 jint true_cnt = aobj->element_value(1).as_int();
8809
8810 if (C->log() != nullptr) {
8811 C->log()->elem("observe source='profileBoolean' false='%d' true='%d'",
8812 false_cnt, true_cnt);
8813 }
8814
8815 if (false_cnt + true_cnt == 0) {
8816 // According to profile, never executed.
8817 uncommon_trap_exact(Deoptimization::Reason_intrinsic,
8818 Deoptimization::Action_reinterpret);
8819 return true;
8820 }
8821
8822 // result is a boolean (0 or 1) and its profile (false_cnt & true_cnt)
8823 // is a number of each value occurrences.
8824 Node* result = argument(0);
8825 if (false_cnt == 0 || true_cnt == 0) {
8826 // According to profile, one value has been never seen.
8827 int expected_val = (false_cnt == 0) ? 1 : 0;
8828
8829 Node* cmp = _gvn.transform(new CmpINode(result, intcon(expected_val)));
8830 Node* test = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
8831
8832 IfNode* check = create_and_map_if(control(), test, PROB_ALWAYS, COUNT_UNKNOWN);
8833 Node* fast_path = _gvn.transform(new IfTrueNode(check));
8834 Node* slow_path = _gvn.transform(new IfFalseNode(check));
8835
8836 { // Slow path: uncommon trap for never seen value and then reexecute
8837 // MethodHandleImpl::profileBoolean() to bump the count, so JIT knows
8838 // the value has been seen at least once.
8839 PreserveJVMState pjvms(this);
8840 PreserveReexecuteState preexecs(this);
8841 jvms()->set_should_reexecute(true);
8842
8843 set_control(slow_path);
8844 set_i_o(i_o());
8845
8846 uncommon_trap_exact(Deoptimization::Reason_intrinsic,
8847 Deoptimization::Action_reinterpret);
8848 }
8849 // The guard for never seen value enables sharpening of the result and
8850 // returning a constant. It allows to eliminate branches on the same value
8851 // later on.
8852 set_control(fast_path);
8853 result = intcon(expected_val);
8854 }
8855 // Stop profiling.
8856 // MethodHandleImpl::profileBoolean() has profiling logic in its bytecode.
8857 // By replacing method body with profile data (represented as ProfileBooleanNode
8858 // on IR level) we effectively disable profiling.
8859 // It enables full speed execution once optimized code is generated.
8860 Node* profile = _gvn.transform(new ProfileBooleanNode(result, false_cnt, true_cnt));
8861 C->record_for_igvn(profile);
8862 set_result(profile);
8863 return true;
8864 } else {
8865 // Continue profiling.
8866 // Profile data isn't available at the moment. So, execute method's bytecode version.
8867 // Usually, when GWT LambdaForms are profiled it means that a stand-alone nmethod
8868 // is compiled and counters aren't available since corresponding MethodHandle
8869 // isn't a compile-time constant.
8870 return false;
8871 }
8872 }
8873
8874 bool LibraryCallKit::inline_isCompileConstant() {
8875 Node* n = argument(0);
8876 set_result(n->is_Con() ? intcon(1) : intcon(0));
8877 return true;
8878 }
8879
8880 //------------------------------- inline_getObjectSize --------------------------------------
8881 //
8882 // Calculate the runtime size of the object/array.
8883 // native long sun.instrument.InstrumentationImpl.getObjectSize0(long nativeAgent, Object objectToSize);
8884 //
8885 bool LibraryCallKit::inline_getObjectSize() {
8886 Node* obj = argument(3);
8887 Node* klass_node = load_object_klass(obj);
8888
8889 jint layout_con = Klass::_lh_neutral_value;
8890 Node* layout_val = get_layout_helper(klass_node, layout_con);
8891 int layout_is_con = (layout_val == nullptr);
8892
8893 if (layout_is_con) {
8894 // Layout helper is constant, can figure out things at compile time.
8895
8896 if (Klass::layout_helper_is_instance(layout_con)) {
8897 // Instance case: layout_con contains the size itself.
8898 Node *size = longcon(Klass::layout_helper_size_in_bytes(layout_con));
8899 set_result(size);
8900 } else {
8901 // Array case: size is round(header + element_size*arraylength).
8902 // Since arraylength is different for every array instance, we have to
8903 // compute the whole thing at runtime.
8904
8905 Node* arr_length = load_array_length(obj);
8906
8907 int round_mask = MinObjAlignmentInBytes - 1;
8908 int hsize = Klass::layout_helper_header_size(layout_con);
8909 int eshift = Klass::layout_helper_log2_element_size(layout_con);
8910
8911 if ((round_mask & ~right_n_bits(eshift)) == 0) {
8912 round_mask = 0; // strength-reduce it if it goes away completely
8913 }
8914 assert((hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded");
8915 Node* header_size = intcon(hsize + round_mask);
8916
8917 Node* lengthx = ConvI2X(arr_length);
8918 Node* headerx = ConvI2X(header_size);
8919
8920 Node* abody = lengthx;
8921 if (eshift != 0) {
8922 abody = _gvn.transform(new LShiftXNode(lengthx, intcon(eshift)));
8923 }
8924 Node* size = _gvn.transform( new AddXNode(headerx, abody) );
8925 if (round_mask != 0) {
8926 size = _gvn.transform( new AndXNode(size, MakeConX(~round_mask)) );
8927 }
8928 size = ConvX2L(size);
8929 set_result(size);
8930 }
8931 } else {
8932 // Layout helper is not constant, need to test for array-ness at runtime.
8933
8934 enum { _instance_path = 1, _array_path, PATH_LIMIT };
8935 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
8936 PhiNode* result_val = new PhiNode(result_reg, TypeLong::LONG);
8937 record_for_igvn(result_reg);
8938
8939 Node* array_ctl = generate_array_guard(klass_node, nullptr, &obj);
8940 if (array_ctl != nullptr) {
8941 // Array case: size is round(header + element_size*arraylength).
8942 // Since arraylength is different for every array instance, we have to
8943 // compute the whole thing at runtime.
8944
8945 PreserveJVMState pjvms(this);
8946 set_control(array_ctl);
8947 Node* arr_length = load_array_length(obj);
8948
8949 int round_mask = MinObjAlignmentInBytes - 1;
8950 Node* mask = intcon(round_mask);
8951
8952 Node* hss = intcon(Klass::_lh_header_size_shift);
8953 Node* hsm = intcon(Klass::_lh_header_size_mask);
8954 Node* header_size = _gvn.transform(new URShiftINode(layout_val, hss));
8955 header_size = _gvn.transform(new AndINode(header_size, hsm));
8956 header_size = _gvn.transform(new AddINode(header_size, mask));
8957
8958 // There is no need to mask or shift this value.
8959 // The semantics of LShiftINode include an implicit mask to 0x1F.
8960 assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place");
8961 Node* elem_shift = layout_val;
8962
8963 Node* lengthx = ConvI2X(arr_length);
8964 Node* headerx = ConvI2X(header_size);
8965
8966 Node* abody = _gvn.transform(new LShiftXNode(lengthx, elem_shift));
8967 Node* size = _gvn.transform(new AddXNode(headerx, abody));
8968 if (round_mask != 0) {
8969 size = _gvn.transform(new AndXNode(size, MakeConX(~round_mask)));
8970 }
8971 size = ConvX2L(size);
8972
8973 result_reg->init_req(_array_path, control());
8974 result_val->init_req(_array_path, size);
8975 }
8976
8977 if (!stopped()) {
8978 // Instance case: the layout helper gives us instance size almost directly,
8979 // but we need to mask out the _lh_instance_slow_path_bit.
8980 Node* size = ConvI2X(layout_val);
8981 assert((int) Klass::_lh_instance_slow_path_bit < BytesPerLong, "clear bit");
8982 Node* mask = MakeConX(~(intptr_t) right_n_bits(LogBytesPerLong));
8983 size = _gvn.transform(new AndXNode(size, mask));
8984 size = ConvX2L(size);
8985
8986 result_reg->init_req(_instance_path, control());
8987 result_val->init_req(_instance_path, size);
8988 }
8989
8990 set_result(result_reg, result_val);
8991 }
8992
8993 return true;
8994 }
8995
8996 //------------------------------- inline_blackhole --------------------------------------
8997 //
8998 // Make sure all arguments to this node are alive.
8999 // This matches methods that were requested to be blackholed through compile commands.
9000 //
9001 bool LibraryCallKit::inline_blackhole() {
9002 assert(callee()->is_static(), "Should have been checked before: only static methods here");
9003 assert(callee()->is_empty(), "Should have been checked before: only empty methods here");
9004 assert(callee()->holder()->is_loaded(), "Should have been checked before: only methods for loaded classes here");
9005
9006 // Blackhole node pinches only the control, not memory. This allows
9007 // the blackhole to be pinned in the loop that computes blackholed
9008 // values, but have no other side effects, like breaking the optimizations
9009 // across the blackhole.
9010
9011 Node* bh = _gvn.transform(new BlackholeNode(control()));
9012 set_control(_gvn.transform(new ProjNode(bh, TypeFunc::Control)));
9013
9014 // Bind call arguments as blackhole arguments to keep them alive
9015 uint nargs = callee()->arg_size();
9016 for (uint i = 0; i < nargs; i++) {
9017 bh->add_req(argument(i));
9018 }
9019
9020 return true;
9021 }
9022
9023 Node* LibraryCallKit::unbox_fp16_value(const TypeInstPtr* float16_box_type, ciField* field, Node* box) {
9024 const TypeInstPtr* box_type = _gvn.type(box)->isa_instptr();
9025 if (box_type == nullptr || box_type->instance_klass() != float16_box_type->instance_klass()) {
9026 return nullptr; // box klass is not Float16
9027 }
9028
9029 // Null check; get notnull casted pointer
9030 Node* null_ctl = top();
9031 Node* not_null_box = null_check_oop(box, &null_ctl, true);
9032 // If not_null_box is dead, only null-path is taken
9033 if (stopped()) {
9034 set_control(null_ctl);
9035 return nullptr;
9036 }
9037 assert(not_null_box->bottom_type()->is_instptr()->maybe_null() == false, "");
9038 const TypePtr* adr_type = C->alias_type(field)->adr_type();
9039 Node* adr = basic_plus_adr(not_null_box, field->offset_in_bytes());
9040 return access_load_at(not_null_box, adr, adr_type, TypeInt::SHORT, T_SHORT, IN_HEAP);
9041 }
9042
9043 Node* LibraryCallKit::box_fp16_value(const TypeInstPtr* float16_box_type, ciField* field, Node* value) {
9044 PreserveReexecuteState preexecs(this);
9045 jvms()->set_should_reexecute(true);
9046
9047 const TypeKlassPtr* klass_type = float16_box_type->as_klass_type();
9048 Node* klass_node = makecon(klass_type);
9049 Node* box = new_instance(klass_node);
9050
9051 Node* value_field = basic_plus_adr(box, field->offset_in_bytes());
9052 const TypePtr* value_adr_type = value_field->bottom_type()->is_ptr();
9053
9054 Node* field_store = _gvn.transform(access_store_at(box,
9055 value_field,
9056 value_adr_type,
9057 value,
9058 TypeInt::SHORT,
9059 T_SHORT,
9060 IN_HEAP));
9061 set_memory(field_store, value_adr_type);
9062 return box;
9063 }
9064
9065 bool LibraryCallKit::inline_fp16_operations(vmIntrinsics::ID id, int num_args) {
9066 if (!Matcher::match_rule_supported(Op_ReinterpretS2HF) ||
9067 !Matcher::match_rule_supported(Op_ReinterpretHF2S)) {
9068 return false;
9069 }
9070
9071 const TypeInstPtr* box_type = _gvn.type(argument(0))->isa_instptr();
9072 if (box_type == nullptr || box_type->const_oop() == nullptr) {
9073 return false;
9074 }
9075
9076 ciInstanceKlass* float16_klass = box_type->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
9077 const TypeInstPtr* float16_box_type = TypeInstPtr::make_exact(TypePtr::NotNull, float16_klass);
9078 ciField* field = float16_klass->get_field_by_name(ciSymbols::value_name(),
9079 ciSymbols::short_signature(),
9080 false);
9081 assert(field != nullptr, "");
9082
9083 // Transformed nodes
9084 Node* fld1 = nullptr;
9085 Node* fld2 = nullptr;
9086 Node* fld3 = nullptr;
9087 switch(num_args) {
9088 case 3:
9089 fld3 = unbox_fp16_value(float16_box_type, field, argument(3));
9090 if (fld3 == nullptr) {
9091 return false;
9092 }
9093 fld3 = _gvn.transform(new ReinterpretS2HFNode(fld3));
9094 // fall-through
9095 case 2:
9096 fld2 = unbox_fp16_value(float16_box_type, field, argument(2));
9097 if (fld2 == nullptr) {
9098 return false;
9099 }
9100 fld2 = _gvn.transform(new ReinterpretS2HFNode(fld2));
9101 // fall-through
9102 case 1:
9103 fld1 = unbox_fp16_value(float16_box_type, field, argument(1));
9104 if (fld1 == nullptr) {
9105 return false;
9106 }
9107 fld1 = _gvn.transform(new ReinterpretS2HFNode(fld1));
9108 break;
9109 default: fatal("Unsupported number of arguments %d", num_args);
9110 }
9111
9112 Node* result = nullptr;
9113 switch (id) {
9114 // Unary operations
9115 case vmIntrinsics::_sqrt_float16:
9116 result = _gvn.transform(new SqrtHFNode(C, control(), fld1));
9117 break;
9118 // Ternary operations
9119 case vmIntrinsics::_fma_float16:
9120 result = _gvn.transform(new FmaHFNode(fld1, fld2, fld3));
9121 break;
9122 default:
9123 fatal_unexpected_iid(id);
9124 break;
9125 }
9126 result = _gvn.transform(new ReinterpretHF2SNode(result));
9127 set_result(box_fp16_value(float16_box_type, field, result));
9128 return true;
9129 }
9130