1 /*
2 * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/macroAssembler.hpp"
26 #include "ci/ciSymbols.hpp"
27 #include "ci/ciUtilities.inline.hpp"
28 #include "classfile/vmIntrinsics.hpp"
29 #include "compiler/compileBroker.hpp"
30 #include "compiler/compileLog.hpp"
31 #include "gc/shared/barrierSet.hpp"
32 #include "jfr/support/jfrIntrinsics.hpp"
33 #include "memory/resourceArea.hpp"
34 #include "oops/klass.inline.hpp"
35 #include "oops/objArrayKlass.hpp"
36 #include "opto/addnode.hpp"
37 #include "opto/arraycopynode.hpp"
38 #include "opto/c2compiler.hpp"
39 #include "opto/castnode.hpp"
40 #include "opto/cfgnode.hpp"
41 #include "opto/convertnode.hpp"
42 #include "opto/countbitsnode.hpp"
43 #include "opto/idealKit.hpp"
44 #include "opto/library_call.hpp"
45 #include "opto/mathexactnode.hpp"
46 #include "opto/mulnode.hpp"
47 #include "opto/narrowptrnode.hpp"
48 #include "opto/opaquenode.hpp"
49 #include "opto/parse.hpp"
50 #include "opto/rootnode.hpp"
51 #include "opto/runtime.hpp"
52 #include "opto/subnode.hpp"
53 #include "opto/vectornode.hpp"
54 #include "prims/jvmtiExport.hpp"
55 #include "prims/jvmtiThreadState.hpp"
56 #include "prims/unsafe.hpp"
57 #include "runtime/jniHandles.inline.hpp"
58 #include "runtime/mountUnmountDisabler.hpp"
59 #include "runtime/objectMonitor.hpp"
60 #include "runtime/sharedRuntime.hpp"
61 #include "runtime/stubRoutines.hpp"
62 #include "utilities/macros.hpp"
63 #include "utilities/powerOfTwo.hpp"
64
65 //---------------------------make_vm_intrinsic----------------------------
66 CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
67 vmIntrinsicID id = m->intrinsic_id();
68 assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
69
70 if (!m->is_loaded()) {
71 // Do not attempt to inline unloaded methods.
72 return nullptr;
73 }
74
75 C2Compiler* compiler = (C2Compiler*)CompilerThread::current()->compiler();
76 bool is_available = false;
77
78 {
79 // For calling is_intrinsic_supported and is_intrinsic_disabled_by_flag
80 // the compiler must transition to '_thread_in_vm' state because both
81 // methods access VM-internal data.
82 VM_ENTRY_MARK;
83 methodHandle mh(THREAD, m->get_Method());
84 is_available = compiler != nullptr && compiler->is_intrinsic_available(mh, C->directive());
85 if (is_available && is_virtual) {
86 is_available = vmIntrinsics::does_virtual_dispatch(id);
87 }
88 }
89
90 if (is_available) {
91 assert(id <= vmIntrinsics::LAST_COMPILER_INLINE, "caller responsibility");
92 assert(id != vmIntrinsics::_Object_init && id != vmIntrinsics::_invoke, "enum out of order?");
93 return new LibraryIntrinsic(m, is_virtual,
94 vmIntrinsics::predicates_needed(id),
95 vmIntrinsics::does_virtual_dispatch(id),
96 id);
97 } else {
98 return nullptr;
99 }
100 }
101
102 JVMState* LibraryIntrinsic::generate(JVMState* jvms) {
103 LibraryCallKit kit(jvms, this);
104 Compile* C = kit.C;
105 int nodes = C->unique();
106 #ifndef PRODUCT
107 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
108 char buf[1000];
109 const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
110 tty->print_cr("Intrinsic %s", str);
111 }
112 #endif
113 ciMethod* callee = kit.callee();
114 const int bci = kit.bci();
115 #ifdef ASSERT
116 Node* ctrl = kit.control();
117 #endif
118 // Try to inline the intrinsic.
119 if (callee->check_intrinsic_candidate() &&
120 kit.try_to_inline(_last_predicate)) {
121 const char *inline_msg = is_virtual() ? "(intrinsic, virtual)"
122 : "(intrinsic)";
123 CompileTask::print_inlining_ul(callee, jvms->depth() - 1, bci, InliningResult::SUCCESS, inline_msg);
124 C->inline_printer()->record(callee, jvms, InliningResult::SUCCESS, inline_msg);
125 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
126 if (C->log()) {
127 C->log()->elem("intrinsic id='%s'%s nodes='%d'",
128 vmIntrinsics::name_at(intrinsic_id()),
129 (is_virtual() ? " virtual='1'" : ""),
130 C->unique() - nodes);
131 }
132 // Push the result from the inlined method onto the stack.
133 kit.push_result();
134 return kit.transfer_exceptions_into_jvms();
135 }
136
137 // The intrinsic bailed out
138 assert(ctrl == kit.control(), "Control flow was added although the intrinsic bailed out");
139 assert(jvms->map() == kit.map(), "Out of sync JVM state");
140 if (jvms->has_method()) {
141 // Not a root compile.
142 const char* msg;
143 if (callee->intrinsic_candidate()) {
144 msg = is_virtual() ? "failed to inline (intrinsic, virtual)" : "failed to inline (intrinsic)";
145 } else {
146 msg = is_virtual() ? "failed to inline (intrinsic, virtual), method not annotated"
147 : "failed to inline (intrinsic), method not annotated";
148 }
149 CompileTask::print_inlining_ul(callee, jvms->depth() - 1, bci, InliningResult::FAILURE, msg);
150 C->inline_printer()->record(callee, jvms, InliningResult::FAILURE, msg);
151 } else {
152 // Root compile
153 ResourceMark rm;
154 stringStream msg_stream;
155 msg_stream.print("Did not generate intrinsic %s%s at bci:%d in",
156 vmIntrinsics::name_at(intrinsic_id()),
157 is_virtual() ? " (virtual)" : "", bci);
158 const char *msg = msg_stream.freeze();
159 log_debug(jit, inlining)("%s", msg);
160 if (C->print_intrinsics() || C->print_inlining()) {
161 tty->print("%s", msg);
162 }
163 }
164 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
165
166 return nullptr;
167 }
168
169 Node* LibraryIntrinsic::generate_predicate(JVMState* jvms, int predicate) {
170 LibraryCallKit kit(jvms, this);
171 Compile* C = kit.C;
172 int nodes = C->unique();
173 _last_predicate = predicate;
174 #ifndef PRODUCT
175 assert(is_predicated() && predicate < predicates_count(), "sanity");
176 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
177 char buf[1000];
178 const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
179 tty->print_cr("Predicate for intrinsic %s", str);
180 }
181 #endif
182 ciMethod* callee = kit.callee();
183 const int bci = kit.bci();
184
185 Node* slow_ctl = kit.try_to_predicate(predicate);
186 if (!kit.failing()) {
187 const char *inline_msg = is_virtual() ? "(intrinsic, virtual, predicate)"
188 : "(intrinsic, predicate)";
189 CompileTask::print_inlining_ul(callee, jvms->depth() - 1, bci, InliningResult::SUCCESS, inline_msg);
190 C->inline_printer()->record(callee, jvms, InliningResult::SUCCESS, inline_msg);
191
192 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
193 if (C->log()) {
194 C->log()->elem("predicate_intrinsic id='%s'%s nodes='%d'",
195 vmIntrinsics::name_at(intrinsic_id()),
196 (is_virtual() ? " virtual='1'" : ""),
197 C->unique() - nodes);
198 }
199 return slow_ctl; // Could be null if the check folds.
200 }
201
202 // The intrinsic bailed out
203 if (jvms->has_method()) {
204 // Not a root compile.
205 const char* msg = "failed to generate predicate for intrinsic";
206 CompileTask::print_inlining_ul(kit.callee(), jvms->depth() - 1, bci, InliningResult::FAILURE, msg);
207 C->inline_printer()->record(kit.callee(), jvms, InliningResult::FAILURE, msg);
208 } else {
209 // Root compile
210 ResourceMark rm;
211 stringStream msg_stream;
212 msg_stream.print("Did not generate intrinsic %s%s at bci:%d in",
213 vmIntrinsics::name_at(intrinsic_id()),
214 is_virtual() ? " (virtual)" : "", bci);
215 const char *msg = msg_stream.freeze();
216 log_debug(jit, inlining)("%s", msg);
217 C->inline_printer()->record(kit.callee(), jvms, InliningResult::FAILURE, msg);
218 }
219 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
220 return nullptr;
221 }
222
223 bool LibraryCallKit::try_to_inline(int predicate) {
224 // Handle symbolic names for otherwise undistinguished boolean switches:
225 const bool is_store = true;
226 const bool is_compress = true;
227 const bool is_static = true;
228 const bool is_volatile = true;
229
230 if (!jvms()->has_method()) {
231 // Root JVMState has a null method.
232 assert(map()->memory()->Opcode() == Op_Parm, "");
233 // Insert the memory aliasing node
234 set_all_memory(reset_memory());
235 }
236 assert(merged_memory(), "");
237
238 switch (intrinsic_id()) {
239 case vmIntrinsics::_hashCode: return inline_native_hashcode(intrinsic()->is_virtual(), !is_static);
240 case vmIntrinsics::_identityHashCode: return inline_native_hashcode(/*!virtual*/ false, is_static);
241 case vmIntrinsics::_getClass: return inline_native_getClass();
242
243 case vmIntrinsics::_ceil:
244 case vmIntrinsics::_floor:
245 case vmIntrinsics::_rint:
246 case vmIntrinsics::_dsin:
247 case vmIntrinsics::_dcos:
248 case vmIntrinsics::_dtan:
249 case vmIntrinsics::_dsinh:
250 case vmIntrinsics::_dtanh:
251 case vmIntrinsics::_dcbrt:
252 case vmIntrinsics::_dabs:
253 case vmIntrinsics::_fabs:
254 case vmIntrinsics::_iabs:
255 case vmIntrinsics::_labs:
256 case vmIntrinsics::_datan2:
257 case vmIntrinsics::_dsqrt:
258 case vmIntrinsics::_dsqrt_strict:
259 case vmIntrinsics::_dexp:
260 case vmIntrinsics::_dlog:
261 case vmIntrinsics::_dlog10:
262 case vmIntrinsics::_dpow:
263 case vmIntrinsics::_dcopySign:
264 case vmIntrinsics::_fcopySign:
265 case vmIntrinsics::_dsignum:
266 case vmIntrinsics::_roundF:
267 case vmIntrinsics::_roundD:
268 case vmIntrinsics::_fsignum: return inline_math_native(intrinsic_id());
269
270 case vmIntrinsics::_notify:
271 case vmIntrinsics::_notifyAll:
272 return inline_notify(intrinsic_id());
273
274 case vmIntrinsics::_addExactI: return inline_math_addExactI(false /* add */);
275 case vmIntrinsics::_addExactL: return inline_math_addExactL(false /* add */);
276 case vmIntrinsics::_decrementExactI: return inline_math_subtractExactI(true /* decrement */);
277 case vmIntrinsics::_decrementExactL: return inline_math_subtractExactL(true /* decrement */);
278 case vmIntrinsics::_incrementExactI: return inline_math_addExactI(true /* increment */);
279 case vmIntrinsics::_incrementExactL: return inline_math_addExactL(true /* increment */);
280 case vmIntrinsics::_multiplyExactI: return inline_math_multiplyExactI();
281 case vmIntrinsics::_multiplyExactL: return inline_math_multiplyExactL();
282 case vmIntrinsics::_multiplyHigh: return inline_math_multiplyHigh();
283 case vmIntrinsics::_unsignedMultiplyHigh: return inline_math_unsignedMultiplyHigh();
284 case vmIntrinsics::_negateExactI: return inline_math_negateExactI();
285 case vmIntrinsics::_negateExactL: return inline_math_negateExactL();
286 case vmIntrinsics::_subtractExactI: return inline_math_subtractExactI(false /* subtract */);
287 case vmIntrinsics::_subtractExactL: return inline_math_subtractExactL(false /* subtract */);
288
289 case vmIntrinsics::_arraycopy: return inline_arraycopy();
290
291 case vmIntrinsics::_arraySort: return inline_array_sort();
292 case vmIntrinsics::_arrayPartition: return inline_array_partition();
293
294 case vmIntrinsics::_compareToL: return inline_string_compareTo(StrIntrinsicNode::LL);
295 case vmIntrinsics::_compareToU: return inline_string_compareTo(StrIntrinsicNode::UU);
296 case vmIntrinsics::_compareToLU: return inline_string_compareTo(StrIntrinsicNode::LU);
297 case vmIntrinsics::_compareToUL: return inline_string_compareTo(StrIntrinsicNode::UL);
298
299 case vmIntrinsics::_indexOfL: return inline_string_indexOf(StrIntrinsicNode::LL);
300 case vmIntrinsics::_indexOfU: return inline_string_indexOf(StrIntrinsicNode::UU);
301 case vmIntrinsics::_indexOfUL: return inline_string_indexOf(StrIntrinsicNode::UL);
302 case vmIntrinsics::_indexOfIL: return inline_string_indexOfI(StrIntrinsicNode::LL);
303 case vmIntrinsics::_indexOfIU: return inline_string_indexOfI(StrIntrinsicNode::UU);
304 case vmIntrinsics::_indexOfIUL: return inline_string_indexOfI(StrIntrinsicNode::UL);
305 case vmIntrinsics::_indexOfU_char: return inline_string_indexOfChar(StrIntrinsicNode::U);
306 case vmIntrinsics::_indexOfL_char: return inline_string_indexOfChar(StrIntrinsicNode::L);
307
308 case vmIntrinsics::_equalsL: return inline_string_equals(StrIntrinsicNode::LL);
309
310 case vmIntrinsics::_vectorizedHashCode: return inline_vectorizedHashCode();
311
312 case vmIntrinsics::_toBytesStringU: return inline_string_toBytesU();
313 case vmIntrinsics::_getCharsStringU: return inline_string_getCharsU();
314 case vmIntrinsics::_getCharStringU: return inline_string_char_access(!is_store);
315 case vmIntrinsics::_putCharStringU: return inline_string_char_access( is_store);
316
317 case vmIntrinsics::_compressStringC:
318 case vmIntrinsics::_compressStringB: return inline_string_copy( is_compress);
319 case vmIntrinsics::_inflateStringC:
320 case vmIntrinsics::_inflateStringB: return inline_string_copy(!is_compress);
321
322 case vmIntrinsics::_getReference: return inline_unsafe_access(!is_store, T_OBJECT, Relaxed, false);
323 case vmIntrinsics::_getBoolean: return inline_unsafe_access(!is_store, T_BOOLEAN, Relaxed, false);
324 case vmIntrinsics::_getByte: return inline_unsafe_access(!is_store, T_BYTE, Relaxed, false);
325 case vmIntrinsics::_getShort: return inline_unsafe_access(!is_store, T_SHORT, Relaxed, false);
326 case vmIntrinsics::_getChar: return inline_unsafe_access(!is_store, T_CHAR, Relaxed, false);
327 case vmIntrinsics::_getInt: return inline_unsafe_access(!is_store, T_INT, Relaxed, false);
328 case vmIntrinsics::_getLong: return inline_unsafe_access(!is_store, T_LONG, Relaxed, false);
329 case vmIntrinsics::_getFloat: return inline_unsafe_access(!is_store, T_FLOAT, Relaxed, false);
330 case vmIntrinsics::_getDouble: return inline_unsafe_access(!is_store, T_DOUBLE, Relaxed, false);
331
332 case vmIntrinsics::_putReference: return inline_unsafe_access( is_store, T_OBJECT, Relaxed, false);
333 case vmIntrinsics::_putBoolean: return inline_unsafe_access( is_store, T_BOOLEAN, Relaxed, false);
334 case vmIntrinsics::_putByte: return inline_unsafe_access( is_store, T_BYTE, Relaxed, false);
335 case vmIntrinsics::_putShort: return inline_unsafe_access( is_store, T_SHORT, Relaxed, false);
336 case vmIntrinsics::_putChar: return inline_unsafe_access( is_store, T_CHAR, Relaxed, false);
337 case vmIntrinsics::_putInt: return inline_unsafe_access( is_store, T_INT, Relaxed, false);
338 case vmIntrinsics::_putLong: return inline_unsafe_access( is_store, T_LONG, Relaxed, false);
339 case vmIntrinsics::_putFloat: return inline_unsafe_access( is_store, T_FLOAT, Relaxed, false);
340 case vmIntrinsics::_putDouble: return inline_unsafe_access( is_store, T_DOUBLE, Relaxed, false);
341
342 case vmIntrinsics::_getReferenceVolatile: return inline_unsafe_access(!is_store, T_OBJECT, Volatile, false);
343 case vmIntrinsics::_getBooleanVolatile: return inline_unsafe_access(!is_store, T_BOOLEAN, Volatile, false);
344 case vmIntrinsics::_getByteVolatile: return inline_unsafe_access(!is_store, T_BYTE, Volatile, false);
345 case vmIntrinsics::_getShortVolatile: return inline_unsafe_access(!is_store, T_SHORT, Volatile, false);
346 case vmIntrinsics::_getCharVolatile: return inline_unsafe_access(!is_store, T_CHAR, Volatile, false);
347 case vmIntrinsics::_getIntVolatile: return inline_unsafe_access(!is_store, T_INT, Volatile, false);
348 case vmIntrinsics::_getLongVolatile: return inline_unsafe_access(!is_store, T_LONG, Volatile, false);
349 case vmIntrinsics::_getFloatVolatile: return inline_unsafe_access(!is_store, T_FLOAT, Volatile, false);
350 case vmIntrinsics::_getDoubleVolatile: return inline_unsafe_access(!is_store, T_DOUBLE, Volatile, false);
351
352 case vmIntrinsics::_putReferenceVolatile: return inline_unsafe_access( is_store, T_OBJECT, Volatile, false);
353 case vmIntrinsics::_putBooleanVolatile: return inline_unsafe_access( is_store, T_BOOLEAN, Volatile, false);
354 case vmIntrinsics::_putByteVolatile: return inline_unsafe_access( is_store, T_BYTE, Volatile, false);
355 case vmIntrinsics::_putShortVolatile: return inline_unsafe_access( is_store, T_SHORT, Volatile, false);
356 case vmIntrinsics::_putCharVolatile: return inline_unsafe_access( is_store, T_CHAR, Volatile, false);
357 case vmIntrinsics::_putIntVolatile: return inline_unsafe_access( is_store, T_INT, Volatile, false);
358 case vmIntrinsics::_putLongVolatile: return inline_unsafe_access( is_store, T_LONG, Volatile, false);
359 case vmIntrinsics::_putFloatVolatile: return inline_unsafe_access( is_store, T_FLOAT, Volatile, false);
360 case vmIntrinsics::_putDoubleVolatile: return inline_unsafe_access( is_store, T_DOUBLE, Volatile, false);
361
362 case vmIntrinsics::_getShortUnaligned: return inline_unsafe_access(!is_store, T_SHORT, Relaxed, true);
363 case vmIntrinsics::_getCharUnaligned: return inline_unsafe_access(!is_store, T_CHAR, Relaxed, true);
364 case vmIntrinsics::_getIntUnaligned: return inline_unsafe_access(!is_store, T_INT, Relaxed, true);
365 case vmIntrinsics::_getLongUnaligned: return inline_unsafe_access(!is_store, T_LONG, Relaxed, true);
366
367 case vmIntrinsics::_putShortUnaligned: return inline_unsafe_access( is_store, T_SHORT, Relaxed, true);
368 case vmIntrinsics::_putCharUnaligned: return inline_unsafe_access( is_store, T_CHAR, Relaxed, true);
369 case vmIntrinsics::_putIntUnaligned: return inline_unsafe_access( is_store, T_INT, Relaxed, true);
370 case vmIntrinsics::_putLongUnaligned: return inline_unsafe_access( is_store, T_LONG, Relaxed, true);
371
372 case vmIntrinsics::_getReferenceAcquire: return inline_unsafe_access(!is_store, T_OBJECT, Acquire, false);
373 case vmIntrinsics::_getBooleanAcquire: return inline_unsafe_access(!is_store, T_BOOLEAN, Acquire, false);
374 case vmIntrinsics::_getByteAcquire: return inline_unsafe_access(!is_store, T_BYTE, Acquire, false);
375 case vmIntrinsics::_getShortAcquire: return inline_unsafe_access(!is_store, T_SHORT, Acquire, false);
376 case vmIntrinsics::_getCharAcquire: return inline_unsafe_access(!is_store, T_CHAR, Acquire, false);
377 case vmIntrinsics::_getIntAcquire: return inline_unsafe_access(!is_store, T_INT, Acquire, false);
378 case vmIntrinsics::_getLongAcquire: return inline_unsafe_access(!is_store, T_LONG, Acquire, false);
379 case vmIntrinsics::_getFloatAcquire: return inline_unsafe_access(!is_store, T_FLOAT, Acquire, false);
380 case vmIntrinsics::_getDoubleAcquire: return inline_unsafe_access(!is_store, T_DOUBLE, Acquire, false);
381
382 case vmIntrinsics::_putReferenceRelease: return inline_unsafe_access( is_store, T_OBJECT, Release, false);
383 case vmIntrinsics::_putBooleanRelease: return inline_unsafe_access( is_store, T_BOOLEAN, Release, false);
384 case vmIntrinsics::_putByteRelease: return inline_unsafe_access( is_store, T_BYTE, Release, false);
385 case vmIntrinsics::_putShortRelease: return inline_unsafe_access( is_store, T_SHORT, Release, false);
386 case vmIntrinsics::_putCharRelease: return inline_unsafe_access( is_store, T_CHAR, Release, false);
387 case vmIntrinsics::_putIntRelease: return inline_unsafe_access( is_store, T_INT, Release, false);
388 case vmIntrinsics::_putLongRelease: return inline_unsafe_access( is_store, T_LONG, Release, false);
389 case vmIntrinsics::_putFloatRelease: return inline_unsafe_access( is_store, T_FLOAT, Release, false);
390 case vmIntrinsics::_putDoubleRelease: return inline_unsafe_access( is_store, T_DOUBLE, Release, false);
391
392 case vmIntrinsics::_getReferenceOpaque: return inline_unsafe_access(!is_store, T_OBJECT, Opaque, false);
393 case vmIntrinsics::_getBooleanOpaque: return inline_unsafe_access(!is_store, T_BOOLEAN, Opaque, false);
394 case vmIntrinsics::_getByteOpaque: return inline_unsafe_access(!is_store, T_BYTE, Opaque, false);
395 case vmIntrinsics::_getShortOpaque: return inline_unsafe_access(!is_store, T_SHORT, Opaque, false);
396 case vmIntrinsics::_getCharOpaque: return inline_unsafe_access(!is_store, T_CHAR, Opaque, false);
397 case vmIntrinsics::_getIntOpaque: return inline_unsafe_access(!is_store, T_INT, Opaque, false);
398 case vmIntrinsics::_getLongOpaque: return inline_unsafe_access(!is_store, T_LONG, Opaque, false);
399 case vmIntrinsics::_getFloatOpaque: return inline_unsafe_access(!is_store, T_FLOAT, Opaque, false);
400 case vmIntrinsics::_getDoubleOpaque: return inline_unsafe_access(!is_store, T_DOUBLE, Opaque, false);
401
402 case vmIntrinsics::_putReferenceOpaque: return inline_unsafe_access( is_store, T_OBJECT, Opaque, false);
403 case vmIntrinsics::_putBooleanOpaque: return inline_unsafe_access( is_store, T_BOOLEAN, Opaque, false);
404 case vmIntrinsics::_putByteOpaque: return inline_unsafe_access( is_store, T_BYTE, Opaque, false);
405 case vmIntrinsics::_putShortOpaque: return inline_unsafe_access( is_store, T_SHORT, Opaque, false);
406 case vmIntrinsics::_putCharOpaque: return inline_unsafe_access( is_store, T_CHAR, Opaque, false);
407 case vmIntrinsics::_putIntOpaque: return inline_unsafe_access( is_store, T_INT, Opaque, false);
408 case vmIntrinsics::_putLongOpaque: return inline_unsafe_access( is_store, T_LONG, Opaque, false);
409 case vmIntrinsics::_putFloatOpaque: return inline_unsafe_access( is_store, T_FLOAT, Opaque, false);
410 case vmIntrinsics::_putDoubleOpaque: return inline_unsafe_access( is_store, T_DOUBLE, Opaque, false);
411
412 case vmIntrinsics::_compareAndSetReference: return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap, Volatile);
413 case vmIntrinsics::_compareAndSetByte: return inline_unsafe_load_store(T_BYTE, LS_cmp_swap, Volatile);
414 case vmIntrinsics::_compareAndSetShort: return inline_unsafe_load_store(T_SHORT, LS_cmp_swap, Volatile);
415 case vmIntrinsics::_compareAndSetInt: return inline_unsafe_load_store(T_INT, LS_cmp_swap, Volatile);
416 case vmIntrinsics::_compareAndSetLong: return inline_unsafe_load_store(T_LONG, LS_cmp_swap, Volatile);
417
418 case vmIntrinsics::_weakCompareAndSetReferencePlain: return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Relaxed);
419 case vmIntrinsics::_weakCompareAndSetReferenceAcquire: return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Acquire);
420 case vmIntrinsics::_weakCompareAndSetReferenceRelease: return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Release);
421 case vmIntrinsics::_weakCompareAndSetReference: return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Volatile);
422 case vmIntrinsics::_weakCompareAndSetBytePlain: return inline_unsafe_load_store(T_BYTE, LS_cmp_swap_weak, Relaxed);
423 case vmIntrinsics::_weakCompareAndSetByteAcquire: return inline_unsafe_load_store(T_BYTE, LS_cmp_swap_weak, Acquire);
424 case vmIntrinsics::_weakCompareAndSetByteRelease: return inline_unsafe_load_store(T_BYTE, LS_cmp_swap_weak, Release);
425 case vmIntrinsics::_weakCompareAndSetByte: return inline_unsafe_load_store(T_BYTE, LS_cmp_swap_weak, Volatile);
426 case vmIntrinsics::_weakCompareAndSetShortPlain: return inline_unsafe_load_store(T_SHORT, LS_cmp_swap_weak, Relaxed);
427 case vmIntrinsics::_weakCompareAndSetShortAcquire: return inline_unsafe_load_store(T_SHORT, LS_cmp_swap_weak, Acquire);
428 case vmIntrinsics::_weakCompareAndSetShortRelease: return inline_unsafe_load_store(T_SHORT, LS_cmp_swap_weak, Release);
429 case vmIntrinsics::_weakCompareAndSetShort: return inline_unsafe_load_store(T_SHORT, LS_cmp_swap_weak, Volatile);
430 case vmIntrinsics::_weakCompareAndSetIntPlain: return inline_unsafe_load_store(T_INT, LS_cmp_swap_weak, Relaxed);
431 case vmIntrinsics::_weakCompareAndSetIntAcquire: return inline_unsafe_load_store(T_INT, LS_cmp_swap_weak, Acquire);
432 case vmIntrinsics::_weakCompareAndSetIntRelease: return inline_unsafe_load_store(T_INT, LS_cmp_swap_weak, Release);
433 case vmIntrinsics::_weakCompareAndSetInt: return inline_unsafe_load_store(T_INT, LS_cmp_swap_weak, Volatile);
434 case vmIntrinsics::_weakCompareAndSetLongPlain: return inline_unsafe_load_store(T_LONG, LS_cmp_swap_weak, Relaxed);
435 case vmIntrinsics::_weakCompareAndSetLongAcquire: return inline_unsafe_load_store(T_LONG, LS_cmp_swap_weak, Acquire);
436 case vmIntrinsics::_weakCompareAndSetLongRelease: return inline_unsafe_load_store(T_LONG, LS_cmp_swap_weak, Release);
437 case vmIntrinsics::_weakCompareAndSetLong: return inline_unsafe_load_store(T_LONG, LS_cmp_swap_weak, Volatile);
438
439 case vmIntrinsics::_compareAndExchangeReference: return inline_unsafe_load_store(T_OBJECT, LS_cmp_exchange, Volatile);
440 case vmIntrinsics::_compareAndExchangeReferenceAcquire: return inline_unsafe_load_store(T_OBJECT, LS_cmp_exchange, Acquire);
441 case vmIntrinsics::_compareAndExchangeReferenceRelease: return inline_unsafe_load_store(T_OBJECT, LS_cmp_exchange, Release);
442 case vmIntrinsics::_compareAndExchangeByte: return inline_unsafe_load_store(T_BYTE, LS_cmp_exchange, Volatile);
443 case vmIntrinsics::_compareAndExchangeByteAcquire: return inline_unsafe_load_store(T_BYTE, LS_cmp_exchange, Acquire);
444 case vmIntrinsics::_compareAndExchangeByteRelease: return inline_unsafe_load_store(T_BYTE, LS_cmp_exchange, Release);
445 case vmIntrinsics::_compareAndExchangeShort: return inline_unsafe_load_store(T_SHORT, LS_cmp_exchange, Volatile);
446 case vmIntrinsics::_compareAndExchangeShortAcquire: return inline_unsafe_load_store(T_SHORT, LS_cmp_exchange, Acquire);
447 case vmIntrinsics::_compareAndExchangeShortRelease: return inline_unsafe_load_store(T_SHORT, LS_cmp_exchange, Release);
448 case vmIntrinsics::_compareAndExchangeInt: return inline_unsafe_load_store(T_INT, LS_cmp_exchange, Volatile);
449 case vmIntrinsics::_compareAndExchangeIntAcquire: return inline_unsafe_load_store(T_INT, LS_cmp_exchange, Acquire);
450 case vmIntrinsics::_compareAndExchangeIntRelease: return inline_unsafe_load_store(T_INT, LS_cmp_exchange, Release);
451 case vmIntrinsics::_compareAndExchangeLong: return inline_unsafe_load_store(T_LONG, LS_cmp_exchange, Volatile);
452 case vmIntrinsics::_compareAndExchangeLongAcquire: return inline_unsafe_load_store(T_LONG, LS_cmp_exchange, Acquire);
453 case vmIntrinsics::_compareAndExchangeLongRelease: return inline_unsafe_load_store(T_LONG, LS_cmp_exchange, Release);
454
455 case vmIntrinsics::_getAndAddByte: return inline_unsafe_load_store(T_BYTE, LS_get_add, Volatile);
456 case vmIntrinsics::_getAndAddShort: return inline_unsafe_load_store(T_SHORT, LS_get_add, Volatile);
457 case vmIntrinsics::_getAndAddInt: return inline_unsafe_load_store(T_INT, LS_get_add, Volatile);
458 case vmIntrinsics::_getAndAddLong: return inline_unsafe_load_store(T_LONG, LS_get_add, Volatile);
459
460 case vmIntrinsics::_getAndSetByte: return inline_unsafe_load_store(T_BYTE, LS_get_set, Volatile);
461 case vmIntrinsics::_getAndSetShort: return inline_unsafe_load_store(T_SHORT, LS_get_set, Volatile);
462 case vmIntrinsics::_getAndSetInt: return inline_unsafe_load_store(T_INT, LS_get_set, Volatile);
463 case vmIntrinsics::_getAndSetLong: return inline_unsafe_load_store(T_LONG, LS_get_set, Volatile);
464 case vmIntrinsics::_getAndSetReference: return inline_unsafe_load_store(T_OBJECT, LS_get_set, Volatile);
465
466 case vmIntrinsics::_loadFence:
467 case vmIntrinsics::_storeFence:
468 case vmIntrinsics::_storeStoreFence:
469 case vmIntrinsics::_fullFence: return inline_unsafe_fence(intrinsic_id());
470
471 case vmIntrinsics::_onSpinWait: return inline_onspinwait();
472
473 case vmIntrinsics::_currentCarrierThread: return inline_native_currentCarrierThread();
474 case vmIntrinsics::_currentThread: return inline_native_currentThread();
475 case vmIntrinsics::_setCurrentThread: return inline_native_setCurrentThread();
476
477 case vmIntrinsics::_scopedValueCache: return inline_native_scopedValueCache();
478 case vmIntrinsics::_setScopedValueCache: return inline_native_setScopedValueCache();
479
480 case vmIntrinsics::_Continuation_pin: return inline_native_Continuation_pinning(false);
481 case vmIntrinsics::_Continuation_unpin: return inline_native_Continuation_pinning(true);
482
483 case vmIntrinsics::_vthreadEndFirstTransition: return inline_native_vthread_end_transition(CAST_FROM_FN_PTR(address, OptoRuntime::vthread_end_first_transition_Java()),
484 "endFirstTransition", true);
485 case vmIntrinsics::_vthreadStartFinalTransition: return inline_native_vthread_start_transition(CAST_FROM_FN_PTR(address, OptoRuntime::vthread_start_final_transition_Java()),
486 "startFinalTransition", true);
487 case vmIntrinsics::_vthreadStartTransition: return inline_native_vthread_start_transition(CAST_FROM_FN_PTR(address, OptoRuntime::vthread_start_transition_Java()),
488 "startTransition", false);
489 case vmIntrinsics::_vthreadEndTransition: return inline_native_vthread_end_transition(CAST_FROM_FN_PTR(address, OptoRuntime::vthread_end_transition_Java()),
490 "endTransition", false);
491 #if INCLUDE_JVMTI
492 case vmIntrinsics::_notifyJvmtiVThreadDisableSuspend: return inline_native_notify_jvmti_sync();
493 #endif
494
495 #ifdef JFR_HAVE_INTRINSICS
496 case vmIntrinsics::_counterTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, JfrTime::time_function()), "counterTime");
497 case vmIntrinsics::_getEventWriter: return inline_native_getEventWriter();
498 case vmIntrinsics::_jvm_commit: return inline_native_jvm_commit();
499 #endif
500 case vmIntrinsics::_currentTimeMillis: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
501 case vmIntrinsics::_nanoTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
502 case vmIntrinsics::_writeback0: return inline_unsafe_writeback0();
503 case vmIntrinsics::_writebackPreSync0: return inline_unsafe_writebackSync0(true);
504 case vmIntrinsics::_writebackPostSync0: return inline_unsafe_writebackSync0(false);
505 case vmIntrinsics::_allocateInstance: return inline_unsafe_allocate();
506 case vmIntrinsics::_copyMemory: return inline_unsafe_copyMemory();
507 case vmIntrinsics::_setMemory: return inline_unsafe_setMemory();
508 case vmIntrinsics::_getLength: return inline_native_getLength();
509 case vmIntrinsics::_copyOf: return inline_array_copyOf(false);
510 case vmIntrinsics::_copyOfRange: return inline_array_copyOf(true);
511 case vmIntrinsics::_equalsB: return inline_array_equals(StrIntrinsicNode::LL);
512 case vmIntrinsics::_equalsC: return inline_array_equals(StrIntrinsicNode::UU);
513 case vmIntrinsics::_Preconditions_checkIndex: return inline_preconditions_checkIndex(T_INT);
514 case vmIntrinsics::_Preconditions_checkLongIndex: return inline_preconditions_checkIndex(T_LONG);
515 case vmIntrinsics::_clone: return inline_native_clone(intrinsic()->is_virtual());
516
517 case vmIntrinsics::_allocateUninitializedArray: return inline_unsafe_newArray(true);
518 case vmIntrinsics::_newArray: return inline_unsafe_newArray(false);
519
520 case vmIntrinsics::_isAssignableFrom: return inline_native_subtype_check();
521
522 case vmIntrinsics::_isInstance:
523 case vmIntrinsics::_isHidden:
524 case vmIntrinsics::_getSuperclass: return inline_native_Class_query(intrinsic_id());
525
526 case vmIntrinsics::_floatToRawIntBits:
527 case vmIntrinsics::_floatToIntBits:
528 case vmIntrinsics::_intBitsToFloat:
529 case vmIntrinsics::_doubleToRawLongBits:
530 case vmIntrinsics::_doubleToLongBits:
531 case vmIntrinsics::_longBitsToDouble:
532 case vmIntrinsics::_floatToFloat16:
533 case vmIntrinsics::_float16ToFloat: return inline_fp_conversions(intrinsic_id());
534 case vmIntrinsics::_sqrt_float16: return inline_fp16_operations(intrinsic_id(), 1);
535 case vmIntrinsics::_fma_float16: return inline_fp16_operations(intrinsic_id(), 3);
536 case vmIntrinsics::_floatIsFinite:
537 case vmIntrinsics::_floatIsInfinite:
538 case vmIntrinsics::_doubleIsFinite:
539 case vmIntrinsics::_doubleIsInfinite: return inline_fp_range_check(intrinsic_id());
540
541 case vmIntrinsics::_numberOfLeadingZeros_i:
542 case vmIntrinsics::_numberOfLeadingZeros_l:
543 case vmIntrinsics::_numberOfTrailingZeros_i:
544 case vmIntrinsics::_numberOfTrailingZeros_l:
545 case vmIntrinsics::_bitCount_i:
546 case vmIntrinsics::_bitCount_l:
547 case vmIntrinsics::_reverse_i:
548 case vmIntrinsics::_reverse_l:
549 case vmIntrinsics::_reverseBytes_i:
550 case vmIntrinsics::_reverseBytes_l:
551 case vmIntrinsics::_reverseBytes_s:
552 case vmIntrinsics::_reverseBytes_c: return inline_number_methods(intrinsic_id());
553
554 case vmIntrinsics::_compress_i:
555 case vmIntrinsics::_compress_l:
556 case vmIntrinsics::_expand_i:
557 case vmIntrinsics::_expand_l: return inline_bitshuffle_methods(intrinsic_id());
558
559 case vmIntrinsics::_compareUnsigned_i:
560 case vmIntrinsics::_compareUnsigned_l: return inline_compare_unsigned(intrinsic_id());
561
562 case vmIntrinsics::_divideUnsigned_i:
563 case vmIntrinsics::_divideUnsigned_l:
564 case vmIntrinsics::_remainderUnsigned_i:
565 case vmIntrinsics::_remainderUnsigned_l: return inline_divmod_methods(intrinsic_id());
566
567 case vmIntrinsics::_getCallerClass: return inline_native_Reflection_getCallerClass();
568
569 case vmIntrinsics::_Reference_get0: return inline_reference_get0();
570 case vmIntrinsics::_Reference_refersTo0: return inline_reference_refersTo0(false);
571 case vmIntrinsics::_PhantomReference_refersTo0: return inline_reference_refersTo0(true);
572 case vmIntrinsics::_Reference_clear0: return inline_reference_clear0(false);
573 case vmIntrinsics::_PhantomReference_clear0: return inline_reference_clear0(true);
574
575 case vmIntrinsics::_Class_cast: return inline_Class_cast();
576
577 case vmIntrinsics::_aescrypt_encryptBlock:
578 case vmIntrinsics::_aescrypt_decryptBlock: return inline_aescrypt_Block(intrinsic_id());
579
580 case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
581 case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
582 return inline_cipherBlockChaining_AESCrypt(intrinsic_id());
583
584 case vmIntrinsics::_electronicCodeBook_encryptAESCrypt:
585 case vmIntrinsics::_electronicCodeBook_decryptAESCrypt:
586 return inline_electronicCodeBook_AESCrypt(intrinsic_id());
587
588 case vmIntrinsics::_counterMode_AESCrypt:
589 return inline_counterMode_AESCrypt(intrinsic_id());
590
591 case vmIntrinsics::_galoisCounterMode_AESCrypt:
592 return inline_galoisCounterMode_AESCrypt();
593
594 case vmIntrinsics::_md5_implCompress:
595 case vmIntrinsics::_sha_implCompress:
596 case vmIntrinsics::_sha2_implCompress:
597 case vmIntrinsics::_sha5_implCompress:
598 case vmIntrinsics::_sha3_implCompress:
599 return inline_digestBase_implCompress(intrinsic_id());
600 case vmIntrinsics::_double_keccak:
601 return inline_double_keccak();
602
603 case vmIntrinsics::_digestBase_implCompressMB:
604 return inline_digestBase_implCompressMB(predicate);
605
606 case vmIntrinsics::_multiplyToLen:
607 return inline_multiplyToLen();
608
609 case vmIntrinsics::_squareToLen:
610 return inline_squareToLen();
611
612 case vmIntrinsics::_mulAdd:
613 return inline_mulAdd();
614
615 case vmIntrinsics::_montgomeryMultiply:
616 return inline_montgomeryMultiply();
617 case vmIntrinsics::_montgomerySquare:
618 return inline_montgomerySquare();
619
620 case vmIntrinsics::_bigIntegerRightShiftWorker:
621 return inline_bigIntegerShift(true);
622 case vmIntrinsics::_bigIntegerLeftShiftWorker:
623 return inline_bigIntegerShift(false);
624
625 case vmIntrinsics::_vectorizedMismatch:
626 return inline_vectorizedMismatch();
627
628 case vmIntrinsics::_ghash_processBlocks:
629 return inline_ghash_processBlocks();
630 case vmIntrinsics::_chacha20Block:
631 return inline_chacha20Block();
632 case vmIntrinsics::_kyberNtt:
633 return inline_kyberNtt();
634 case vmIntrinsics::_kyberInverseNtt:
635 return inline_kyberInverseNtt();
636 case vmIntrinsics::_kyberNttMult:
637 return inline_kyberNttMult();
638 case vmIntrinsics::_kyberAddPoly_2:
639 return inline_kyberAddPoly_2();
640 case vmIntrinsics::_kyberAddPoly_3:
641 return inline_kyberAddPoly_3();
642 case vmIntrinsics::_kyber12To16:
643 return inline_kyber12To16();
644 case vmIntrinsics::_kyberBarrettReduce:
645 return inline_kyberBarrettReduce();
646 case vmIntrinsics::_dilithiumAlmostNtt:
647 return inline_dilithiumAlmostNtt();
648 case vmIntrinsics::_dilithiumAlmostInverseNtt:
649 return inline_dilithiumAlmostInverseNtt();
650 case vmIntrinsics::_dilithiumNttMult:
651 return inline_dilithiumNttMult();
652 case vmIntrinsics::_dilithiumMontMulByConstant:
653 return inline_dilithiumMontMulByConstant();
654 case vmIntrinsics::_dilithiumDecomposePoly:
655 return inline_dilithiumDecomposePoly();
656 case vmIntrinsics::_base64_encodeBlock:
657 return inline_base64_encodeBlock();
658 case vmIntrinsics::_base64_decodeBlock:
659 return inline_base64_decodeBlock();
660 case vmIntrinsics::_poly1305_processBlocks:
661 return inline_poly1305_processBlocks();
662 case vmIntrinsics::_intpoly_montgomeryMult_P256:
663 return inline_intpoly_montgomeryMult_P256();
664 case vmIntrinsics::_intpoly_assign:
665 return inline_intpoly_assign();
666 case vmIntrinsics::_encodeISOArray:
667 case vmIntrinsics::_encodeByteISOArray:
668 return inline_encodeISOArray(false);
669 case vmIntrinsics::_encodeAsciiArray:
670 return inline_encodeISOArray(true);
671
672 case vmIntrinsics::_updateCRC32:
673 return inline_updateCRC32();
674 case vmIntrinsics::_updateBytesCRC32:
675 return inline_updateBytesCRC32();
676 case vmIntrinsics::_updateByteBufferCRC32:
677 return inline_updateByteBufferCRC32();
678
679 case vmIntrinsics::_updateBytesCRC32C:
680 return inline_updateBytesCRC32C();
681 case vmIntrinsics::_updateDirectByteBufferCRC32C:
682 return inline_updateDirectByteBufferCRC32C();
683
684 case vmIntrinsics::_updateBytesAdler32:
685 return inline_updateBytesAdler32();
686 case vmIntrinsics::_updateByteBufferAdler32:
687 return inline_updateByteBufferAdler32();
688
689 case vmIntrinsics::_profileBoolean:
690 return inline_profileBoolean();
691 case vmIntrinsics::_isCompileConstant:
692 return inline_isCompileConstant();
693
694 case vmIntrinsics::_countPositives:
695 return inline_countPositives();
696
697 case vmIntrinsics::_fmaD:
698 case vmIntrinsics::_fmaF:
699 return inline_fma(intrinsic_id());
700
701 case vmIntrinsics::_isDigit:
702 case vmIntrinsics::_isLowerCase:
703 case vmIntrinsics::_isUpperCase:
704 case vmIntrinsics::_isWhitespace:
705 return inline_character_compare(intrinsic_id());
706
707 case vmIntrinsics::_min:
708 case vmIntrinsics::_max:
709 case vmIntrinsics::_min_strict:
710 case vmIntrinsics::_max_strict:
711 case vmIntrinsics::_minL:
712 case vmIntrinsics::_maxL:
713 case vmIntrinsics::_minF:
714 case vmIntrinsics::_maxF:
715 case vmIntrinsics::_minD:
716 case vmIntrinsics::_maxD:
717 case vmIntrinsics::_minF_strict:
718 case vmIntrinsics::_maxF_strict:
719 case vmIntrinsics::_minD_strict:
720 case vmIntrinsics::_maxD_strict:
721 return inline_min_max(intrinsic_id());
722
723 case vmIntrinsics::_VectorUnaryOp:
724 return inline_vector_nary_operation(1);
725 case vmIntrinsics::_VectorBinaryOp:
726 return inline_vector_nary_operation(2);
727 case vmIntrinsics::_VectorUnaryLibOp:
728 return inline_vector_call(1);
729 case vmIntrinsics::_VectorBinaryLibOp:
730 return inline_vector_call(2);
731 case vmIntrinsics::_VectorTernaryOp:
732 return inline_vector_nary_operation(3);
733 case vmIntrinsics::_VectorFromBitsCoerced:
734 return inline_vector_frombits_coerced();
735 case vmIntrinsics::_VectorMaskOp:
736 return inline_vector_mask_operation();
737 case vmIntrinsics::_VectorLoadOp:
738 return inline_vector_mem_operation(/*is_store=*/false);
739 case vmIntrinsics::_VectorLoadMaskedOp:
740 return inline_vector_mem_masked_operation(/*is_store*/false);
741 case vmIntrinsics::_VectorStoreOp:
742 return inline_vector_mem_operation(/*is_store=*/true);
743 case vmIntrinsics::_VectorStoreMaskedOp:
744 return inline_vector_mem_masked_operation(/*is_store=*/true);
745 case vmIntrinsics::_VectorGatherOp:
746 return inline_vector_gather_scatter(/*is_scatter*/ false);
747 case vmIntrinsics::_VectorScatterOp:
748 return inline_vector_gather_scatter(/*is_scatter*/ true);
749 case vmIntrinsics::_VectorReductionCoerced:
750 return inline_vector_reduction();
751 case vmIntrinsics::_VectorTest:
752 return inline_vector_test();
753 case vmIntrinsics::_VectorBlend:
754 return inline_vector_blend();
755 case vmIntrinsics::_VectorRearrange:
756 return inline_vector_rearrange();
757 case vmIntrinsics::_VectorSelectFrom:
758 return inline_vector_select_from();
759 case vmIntrinsics::_VectorCompare:
760 return inline_vector_compare();
761 case vmIntrinsics::_VectorBroadcastInt:
762 return inline_vector_broadcast_int();
763 case vmIntrinsics::_VectorConvert:
764 return inline_vector_convert();
765 case vmIntrinsics::_VectorInsert:
766 return inline_vector_insert();
767 case vmIntrinsics::_VectorExtract:
768 return inline_vector_extract();
769 case vmIntrinsics::_VectorCompressExpand:
770 return inline_vector_compress_expand();
771 case vmIntrinsics::_VectorSelectFromTwoVectorOp:
772 return inline_vector_select_from_two_vectors();
773 case vmIntrinsics::_IndexVector:
774 return inline_index_vector();
775 case vmIntrinsics::_IndexPartiallyInUpperRange:
776 return inline_index_partially_in_upper_range();
777
778 case vmIntrinsics::_getObjectSize:
779 return inline_getObjectSize();
780
781 case vmIntrinsics::_blackhole:
782 return inline_blackhole();
783
784 default:
785 // If you get here, it may be that someone has added a new intrinsic
786 // to the list in vmIntrinsics.hpp without implementing it here.
787 #ifndef PRODUCT
788 if ((PrintMiscellaneous && (Verbose || WizardMode)) || PrintOpto) {
789 tty->print_cr("*** Warning: Unimplemented intrinsic %s(%d)",
790 vmIntrinsics::name_at(intrinsic_id()), vmIntrinsics::as_int(intrinsic_id()));
791 }
792 #endif
793 return false;
794 }
795 }
796
797 Node* LibraryCallKit::try_to_predicate(int predicate) {
798 if (!jvms()->has_method()) {
799 // Root JVMState has a null method.
800 assert(map()->memory()->Opcode() == Op_Parm, "");
801 // Insert the memory aliasing node
802 set_all_memory(reset_memory());
803 }
804 assert(merged_memory(), "");
805
806 switch (intrinsic_id()) {
807 case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
808 return inline_cipherBlockChaining_AESCrypt_predicate(false);
809 case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
810 return inline_cipherBlockChaining_AESCrypt_predicate(true);
811 case vmIntrinsics::_electronicCodeBook_encryptAESCrypt:
812 return inline_electronicCodeBook_AESCrypt_predicate(false);
813 case vmIntrinsics::_electronicCodeBook_decryptAESCrypt:
814 return inline_electronicCodeBook_AESCrypt_predicate(true);
815 case vmIntrinsics::_counterMode_AESCrypt:
816 return inline_counterMode_AESCrypt_predicate();
817 case vmIntrinsics::_digestBase_implCompressMB:
818 return inline_digestBase_implCompressMB_predicate(predicate);
819 case vmIntrinsics::_galoisCounterMode_AESCrypt:
820 return inline_galoisCounterMode_AESCrypt_predicate();
821
822 default:
823 // If you get here, it may be that someone has added a new intrinsic
824 // to the list in vmIntrinsics.hpp without implementing it here.
825 #ifndef PRODUCT
826 if ((PrintMiscellaneous && (Verbose || WizardMode)) || PrintOpto) {
827 tty->print_cr("*** Warning: Unimplemented predicate for intrinsic %s(%d)",
828 vmIntrinsics::name_at(intrinsic_id()), vmIntrinsics::as_int(intrinsic_id()));
829 }
830 #endif
831 Node* slow_ctl = control();
832 set_control(top()); // No fast path intrinsic
833 return slow_ctl;
834 }
835 }
836
837 //------------------------------set_result-------------------------------
838 // Helper function for finishing intrinsics.
839 void LibraryCallKit::set_result(RegionNode* region, PhiNode* value) {
840 record_for_igvn(region);
841 set_control(_gvn.transform(region));
842 set_result( _gvn.transform(value));
843 assert(value->type()->basic_type() == result()->bottom_type()->basic_type(), "sanity");
844 }
845
846 //------------------------------generate_guard---------------------------
847 // Helper function for generating guarded fast-slow graph structures.
848 // The given 'test', if true, guards a slow path. If the test fails
849 // then a fast path can be taken. (We generally hope it fails.)
850 // In all cases, GraphKit::control() is updated to the fast path.
851 // The returned value represents the control for the slow path.
852 // The return value is never 'top'; it is either a valid control
853 // or null if it is obvious that the slow path can never be taken.
854 // Also, if region and the slow control are not null, the slow edge
855 // is appended to the region.
856 Node* LibraryCallKit::generate_guard(Node* test, RegionNode* region, float true_prob) {
857 if (stopped()) {
858 // Already short circuited.
859 return nullptr;
860 }
861
862 // Build an if node and its projections.
863 // If test is true we take the slow path, which we assume is uncommon.
864 if (_gvn.type(test) == TypeInt::ZERO) {
865 // The slow branch is never taken. No need to build this guard.
866 return nullptr;
867 }
868
869 IfNode* iff = create_and_map_if(control(), test, true_prob, COUNT_UNKNOWN);
870
871 Node* if_slow = _gvn.transform(new IfTrueNode(iff));
872 if (if_slow == top()) {
873 // The slow branch is never taken. No need to build this guard.
874 return nullptr;
875 }
876
877 if (region != nullptr)
878 region->add_req(if_slow);
879
880 Node* if_fast = _gvn.transform(new IfFalseNode(iff));
881 set_control(if_fast);
882
883 return if_slow;
884 }
885
886 inline Node* LibraryCallKit::generate_slow_guard(Node* test, RegionNode* region) {
887 return generate_guard(test, region, PROB_UNLIKELY_MAG(3));
888 }
889 inline Node* LibraryCallKit::generate_fair_guard(Node* test, RegionNode* region) {
890 return generate_guard(test, region, PROB_FAIR);
891 }
892
893 inline Node* LibraryCallKit::generate_negative_guard(Node* index, RegionNode* region,
894 Node** pos_index, bool with_opaque) {
895 if (stopped())
896 return nullptr; // already stopped
897 if (_gvn.type(index)->higher_equal(TypeInt::POS)) // [0,maxint]
898 return nullptr; // index is already adequately typed
899 Node* cmp_lt = _gvn.transform(new CmpINode(index, intcon(0)));
900 Node* bol_lt = _gvn.transform(new BoolNode(cmp_lt, BoolTest::lt));
901 if (with_opaque) {
902 bol_lt = _gvn.transform(new OpaqueConstantBoolNode(C, bol_lt, false));
903 }
904 Node* is_neg = generate_guard(bol_lt, region, PROB_MIN);
905 if (is_neg != nullptr && pos_index != nullptr) {
906 // Emulate effect of Parse::adjust_map_after_if.
907 Node* ccast = new CastIINode(control(), index, TypeInt::POS);
908 (*pos_index) = _gvn.transform(ccast);
909 }
910 return is_neg;
911 }
912
913 // Make sure that 'position' is a valid limit index, in [0..length].
914 // There are two equivalent plans for checking this:
915 // A. (offset + copyLength) unsigned<= arrayLength
916 // B. offset <= (arrayLength - copyLength)
917 // We require that all of the values above, except for the sum and
918 // difference, are already known to be non-negative.
919 // Plan A is robust in the face of overflow, if offset and copyLength
920 // are both hugely positive.
921 //
922 // Plan B is less direct and intuitive, but it does not overflow at
923 // all, since the difference of two non-negatives is always
924 // representable. Whenever Java methods must perform the equivalent
925 // check they generally use Plan B instead of Plan A.
926 // For the moment we use Plan A.
927 inline Node* LibraryCallKit::generate_limit_guard(Node* offset,
928 Node* subseq_length,
929 Node* array_length,
930 RegionNode* region,
931 bool with_opaque) {
932 if (stopped())
933 return nullptr; // already stopped
934 bool zero_offset = _gvn.type(offset) == TypeInt::ZERO;
935 if (zero_offset && subseq_length->eqv_uncast(array_length))
936 return nullptr; // common case of whole-array copy
937 Node* last = subseq_length;
938 if (!zero_offset) // last += offset
939 last = _gvn.transform(new AddINode(last, offset));
940 Node* cmp_lt = _gvn.transform(new CmpUNode(array_length, last));
941 Node* bol_lt = _gvn.transform(new BoolNode(cmp_lt, BoolTest::lt));
942 if (with_opaque) {
943 bol_lt = _gvn.transform(new OpaqueConstantBoolNode(C, bol_lt, false));
944 }
945 Node* is_over = generate_guard(bol_lt, region, PROB_MIN);
946 return is_over;
947 }
948
949 // Emit range checks for the given String.value byte array
950 void LibraryCallKit::generate_string_range_check(Node* array,
951 Node* offset,
952 Node* count,
953 bool char_count,
954 bool halt_on_oob) {
955 if (stopped()) {
956 return; // already stopped
957 }
958 RegionNode* bailout = new RegionNode(1);
959 record_for_igvn(bailout);
960 if (char_count) {
961 // Convert char count to byte count
962 count = _gvn.transform(new LShiftINode(count, intcon(1)));
963 }
964
965 // Offset and count must not be negative
966 generate_negative_guard(offset, bailout, nullptr, halt_on_oob);
967 generate_negative_guard(count, bailout, nullptr, halt_on_oob);
968 // Offset + count must not exceed length of array
969 generate_limit_guard(offset, count, load_array_length(array), bailout, halt_on_oob);
970
971 if (bailout->req() > 1) {
972 if (halt_on_oob) {
973 bailout = _gvn.transform(bailout)->as_Region();
974 Node* frame = _gvn.transform(new ParmNode(C->start(), TypeFunc::FramePtr));
975 Node* halt = _gvn.transform(new HaltNode(bailout, frame, "unexpected guard failure in intrinsic"));
976 C->root()->add_req(halt);
977 } else {
978 PreserveJVMState pjvms(this);
979 set_control(_gvn.transform(bailout));
980 uncommon_trap(Deoptimization::Reason_intrinsic,
981 Deoptimization::Action_maybe_recompile);
982 }
983 }
984 }
985
986 Node* LibraryCallKit::current_thread_helper(Node*& tls_output, ByteSize handle_offset,
987 bool is_immutable) {
988 ciKlass* thread_klass = env()->Thread_klass();
989 const Type* thread_type
990 = TypeOopPtr::make_from_klass(thread_klass)->cast_to_ptr_type(TypePtr::NotNull);
991
992 Node* thread = _gvn.transform(new ThreadLocalNode());
993 Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(handle_offset));
994 tls_output = thread;
995
996 Node* thread_obj_handle
997 = (is_immutable
998 ? LoadNode::make(_gvn, nullptr, immutable_memory(), p, p->bottom_type()->is_ptr(),
999 TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered)
1000 : make_load(nullptr, p, p->bottom_type()->is_ptr(), T_ADDRESS, MemNode::unordered));
1001 thread_obj_handle = _gvn.transform(thread_obj_handle);
1002
1003 DecoratorSet decorators = IN_NATIVE;
1004 if (is_immutable) {
1005 decorators |= C2_IMMUTABLE_MEMORY;
1006 }
1007 return access_load(thread_obj_handle, thread_type, T_OBJECT, decorators);
1008 }
1009
1010 //--------------------------generate_current_thread--------------------
1011 Node* LibraryCallKit::generate_current_thread(Node* &tls_output) {
1012 return current_thread_helper(tls_output, JavaThread::threadObj_offset(),
1013 /*is_immutable*/false);
1014 }
1015
1016 //--------------------------generate_virtual_thread--------------------
1017 Node* LibraryCallKit::generate_virtual_thread(Node* tls_output) {
1018 return current_thread_helper(tls_output, JavaThread::vthread_offset(),
1019 !C->method()->changes_current_thread());
1020 }
1021
1022 //------------------------------make_string_method_node------------------------
1023 // Helper method for String intrinsic functions. This version is called with
1024 // str1 and str2 pointing to byte[] nodes containing Latin1 or UTF16 encoded
1025 // characters (depending on 'is_byte'). cnt1 and cnt2 are pointing to Int nodes
1026 // containing the lengths of str1 and str2.
1027 Node* LibraryCallKit::make_string_method_node(int opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2, StrIntrinsicNode::ArgEnc ae) {
1028 Node* result = nullptr;
1029 switch (opcode) {
1030 case Op_StrIndexOf:
1031 result = new StrIndexOfNode(control(), memory(TypeAryPtr::BYTES),
1032 str1_start, cnt1, str2_start, cnt2, ae);
1033 break;
1034 case Op_StrComp:
1035 result = new StrCompNode(control(), memory(TypeAryPtr::BYTES),
1036 str1_start, cnt1, str2_start, cnt2, ae);
1037 break;
1038 case Op_StrEquals:
1039 // We already know that cnt1 == cnt2 here (checked in 'inline_string_equals').
1040 // Use the constant length if there is one because optimized match rule may exist.
1041 result = new StrEqualsNode(control(), memory(TypeAryPtr::BYTES),
1042 str1_start, str2_start, cnt2->is_Con() ? cnt2 : cnt1, ae);
1043 break;
1044 default:
1045 ShouldNotReachHere();
1046 return nullptr;
1047 }
1048
1049 // All these intrinsics have checks.
1050 C->set_has_split_ifs(true); // Has chance for split-if optimization
1051 clear_upper_avx();
1052
1053 return _gvn.transform(result);
1054 }
1055
1056 //------------------------------inline_string_compareTo------------------------
1057 bool LibraryCallKit::inline_string_compareTo(StrIntrinsicNode::ArgEnc ae) {
1058 Node* arg1 = argument(0);
1059 Node* arg2 = argument(1);
1060
1061 arg1 = must_be_not_null(arg1, true);
1062 arg2 = must_be_not_null(arg2, true);
1063
1064 // Get start addr and length of first argument
1065 Node* arg1_start = array_element_address(arg1, intcon(0), T_BYTE);
1066 Node* arg1_cnt = load_array_length(arg1);
1067
1068 // Get start addr and length of second argument
1069 Node* arg2_start = array_element_address(arg2, intcon(0), T_BYTE);
1070 Node* arg2_cnt = load_array_length(arg2);
1071
1072 Node* result = make_string_method_node(Op_StrComp, arg1_start, arg1_cnt, arg2_start, arg2_cnt, ae);
1073 set_result(result);
1074 return true;
1075 }
1076
1077 //------------------------------inline_string_equals------------------------
1078 bool LibraryCallKit::inline_string_equals(StrIntrinsicNode::ArgEnc ae) {
1079 Node* arg1 = argument(0);
1080 Node* arg2 = argument(1);
1081
1082 // paths (plus control) merge
1083 RegionNode* region = new RegionNode(3);
1084 Node* phi = new PhiNode(region, TypeInt::BOOL);
1085
1086 if (!stopped()) {
1087
1088 arg1 = must_be_not_null(arg1, true);
1089 arg2 = must_be_not_null(arg2, true);
1090
1091 // Get start addr and length of first argument
1092 Node* arg1_start = array_element_address(arg1, intcon(0), T_BYTE);
1093 Node* arg1_cnt = load_array_length(arg1);
1094
1095 // Get start addr and length of second argument
1096 Node* arg2_start = array_element_address(arg2, intcon(0), T_BYTE);
1097 Node* arg2_cnt = load_array_length(arg2);
1098
1099 // Check for arg1_cnt != arg2_cnt
1100 Node* cmp = _gvn.transform(new CmpINode(arg1_cnt, arg2_cnt));
1101 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
1102 Node* if_ne = generate_slow_guard(bol, nullptr);
1103 if (if_ne != nullptr) {
1104 phi->init_req(2, intcon(0));
1105 region->init_req(2, if_ne);
1106 }
1107
1108 // Check for count == 0 is done by assembler code for StrEquals.
1109
1110 if (!stopped()) {
1111 Node* equals = make_string_method_node(Op_StrEquals, arg1_start, arg1_cnt, arg2_start, arg2_cnt, ae);
1112 phi->init_req(1, equals);
1113 region->init_req(1, control());
1114 }
1115 }
1116
1117 // post merge
1118 set_control(_gvn.transform(region));
1119 record_for_igvn(region);
1120
1121 set_result(_gvn.transform(phi));
1122 return true;
1123 }
1124
1125 //------------------------------inline_array_equals----------------------------
1126 bool LibraryCallKit::inline_array_equals(StrIntrinsicNode::ArgEnc ae) {
1127 assert(ae == StrIntrinsicNode::UU || ae == StrIntrinsicNode::LL, "unsupported array types");
1128 Node* arg1 = argument(0);
1129 Node* arg2 = argument(1);
1130
1131 const TypeAryPtr* mtype = (ae == StrIntrinsicNode::UU) ? TypeAryPtr::CHARS : TypeAryPtr::BYTES;
1132 set_result(_gvn.transform(new AryEqNode(control(), memory(mtype), arg1, arg2, ae)));
1133 clear_upper_avx();
1134
1135 return true;
1136 }
1137
1138
1139 //------------------------------inline_countPositives------------------------------
1140 // int java.lang.StringCoding#countPositives0(byte[] ba, int off, int len)
1141 bool LibraryCallKit::inline_countPositives() {
1142 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
1143 return false;
1144 }
1145
1146 assert(callee()->signature()->size() == 3, "countPositives has 3 parameters");
1147 // no receiver since it is static method
1148 Node* ba = argument(0);
1149 Node* offset = argument(1);
1150 Node* len = argument(2);
1151
1152 ba = must_be_not_null(ba, true);
1153 generate_string_range_check(ba, offset, len, false, true);
1154 if (stopped()) {
1155 return true;
1156 }
1157
1158 Node* ba_start = array_element_address(ba, offset, T_BYTE);
1159 Node* result = new CountPositivesNode(control(), memory(TypeAryPtr::BYTES), ba_start, len);
1160 set_result(_gvn.transform(result));
1161 clear_upper_avx();
1162 return true;
1163 }
1164
1165 bool LibraryCallKit::inline_preconditions_checkIndex(BasicType bt) {
1166 Node* index = argument(0);
1167 Node* length = bt == T_INT ? argument(1) : argument(2);
1168 if (too_many_traps(Deoptimization::Reason_intrinsic) || too_many_traps(Deoptimization::Reason_range_check)) {
1169 return false;
1170 }
1171
1172 // check that length is positive
1173 Node* len_pos_cmp = _gvn.transform(CmpNode::make(length, integercon(0, bt), bt));
1174 Node* len_pos_bol = _gvn.transform(new BoolNode(len_pos_cmp, BoolTest::ge));
1175
1176 {
1177 BuildCutout unless(this, len_pos_bol, PROB_MAX);
1178 uncommon_trap(Deoptimization::Reason_intrinsic,
1179 Deoptimization::Action_make_not_entrant);
1180 }
1181
1182 if (stopped()) {
1183 // Length is known to be always negative during compilation and the IR graph so far constructed is good so return success
1184 return true;
1185 }
1186
1187 // length is now known positive, add a cast node to make this explicit
1188 jlong upper_bound = _gvn.type(length)->is_integer(bt)->hi_as_long();
1189 Node* casted_length = ConstraintCastNode::make_cast_for_basic_type(
1190 control(), length, TypeInteger::make(0, upper_bound, Type::WidenMax, bt),
1191 ConstraintCastNode::DependencyType::FloatingNarrowing, bt);
1192 casted_length = _gvn.transform(casted_length);
1193 replace_in_map(length, casted_length);
1194 length = casted_length;
1195
1196 // Use an unsigned comparison for the range check itself
1197 Node* rc_cmp = _gvn.transform(CmpNode::make(index, length, bt, true));
1198 BoolTest::mask btest = BoolTest::lt;
1199 Node* rc_bool = _gvn.transform(new BoolNode(rc_cmp, btest));
1200 RangeCheckNode* rc = new RangeCheckNode(control(), rc_bool, PROB_MAX, COUNT_UNKNOWN);
1201 _gvn.set_type(rc, rc->Value(&_gvn));
1202 if (!rc_bool->is_Con()) {
1203 record_for_igvn(rc);
1204 }
1205 set_control(_gvn.transform(new IfTrueNode(rc)));
1206 {
1207 PreserveJVMState pjvms(this);
1208 set_control(_gvn.transform(new IfFalseNode(rc)));
1209 uncommon_trap(Deoptimization::Reason_range_check,
1210 Deoptimization::Action_make_not_entrant);
1211 }
1212
1213 if (stopped()) {
1214 // Range check is known to always fail during compilation and the IR graph so far constructed is good so return success
1215 return true;
1216 }
1217
1218 // index is now known to be >= 0 and < length, cast it
1219 Node* result = ConstraintCastNode::make_cast_for_basic_type(
1220 control(), index, TypeInteger::make(0, upper_bound, Type::WidenMax, bt),
1221 ConstraintCastNode::DependencyType::FloatingNarrowing, bt);
1222 result = _gvn.transform(result);
1223 set_result(result);
1224 replace_in_map(index, result);
1225 return true;
1226 }
1227
1228 //------------------------------inline_string_indexOf------------------------
1229 bool LibraryCallKit::inline_string_indexOf(StrIntrinsicNode::ArgEnc ae) {
1230 if (!Matcher::match_rule_supported(Op_StrIndexOf)) {
1231 return false;
1232 }
1233 Node* src = argument(0);
1234 Node* tgt = argument(1);
1235
1236 // Make the merge point
1237 RegionNode* result_rgn = new RegionNode(4);
1238 Node* result_phi = new PhiNode(result_rgn, TypeInt::INT);
1239
1240 src = must_be_not_null(src, true);
1241 tgt = must_be_not_null(tgt, true);
1242
1243 // Get start addr and length of source string
1244 Node* src_start = array_element_address(src, intcon(0), T_BYTE);
1245 Node* src_count = load_array_length(src);
1246
1247 // Get start addr and length of substring
1248 Node* tgt_start = array_element_address(tgt, intcon(0), T_BYTE);
1249 Node* tgt_count = load_array_length(tgt);
1250
1251 Node* result = nullptr;
1252 bool call_opt_stub = (StubRoutines::_string_indexof_array[ae] != nullptr);
1253
1254 if (ae == StrIntrinsicNode::UU || ae == StrIntrinsicNode::UL) {
1255 // Divide src size by 2 if String is UTF16 encoded
1256 src_count = _gvn.transform(new RShiftINode(src_count, intcon(1)));
1257 }
1258 if (ae == StrIntrinsicNode::UU) {
1259 // Divide substring size by 2 if String is UTF16 encoded
1260 tgt_count = _gvn.transform(new RShiftINode(tgt_count, intcon(1)));
1261 }
1262
1263 if (call_opt_stub) {
1264 Node* call = make_runtime_call(RC_LEAF, OptoRuntime::string_IndexOf_Type(),
1265 StubRoutines::_string_indexof_array[ae],
1266 "stringIndexOf", TypePtr::BOTTOM, src_start,
1267 src_count, tgt_start, tgt_count);
1268 result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
1269 } else {
1270 result = make_indexOf_node(src_start, src_count, tgt_start, tgt_count,
1271 result_rgn, result_phi, ae);
1272 }
1273 if (result != nullptr) {
1274 result_phi->init_req(3, result);
1275 result_rgn->init_req(3, control());
1276 }
1277 set_control(_gvn.transform(result_rgn));
1278 record_for_igvn(result_rgn);
1279 set_result(_gvn.transform(result_phi));
1280
1281 return true;
1282 }
1283
1284 //-----------------------------inline_string_indexOfI-----------------------
1285 bool LibraryCallKit::inline_string_indexOfI(StrIntrinsicNode::ArgEnc ae) {
1286 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
1287 return false;
1288 }
1289 if (!Matcher::match_rule_supported(Op_StrIndexOf)) {
1290 return false;
1291 }
1292
1293 assert(callee()->signature()->size() == 5, "String.indexOf() has 5 arguments");
1294 Node* src = argument(0); // byte[]
1295 Node* src_count = argument(1); // char count
1296 Node* tgt = argument(2); // byte[]
1297 Node* tgt_count = argument(3); // char count
1298 Node* from_index = argument(4); // char index
1299
1300 src = must_be_not_null(src, true);
1301 tgt = must_be_not_null(tgt, true);
1302
1303 // Multiply byte array index by 2 if String is UTF16 encoded
1304 Node* src_offset = (ae == StrIntrinsicNode::LL) ? from_index : _gvn.transform(new LShiftINode(from_index, intcon(1)));
1305 src_count = _gvn.transform(new SubINode(src_count, from_index));
1306 Node* src_start = array_element_address(src, src_offset, T_BYTE);
1307 Node* tgt_start = array_element_address(tgt, intcon(0), T_BYTE);
1308
1309 // Range checks
1310 generate_string_range_check(src, src_offset, src_count, ae != StrIntrinsicNode::LL, true);
1311 generate_string_range_check(tgt, intcon(0), tgt_count, ae == StrIntrinsicNode::UU, true);
1312 if (stopped()) {
1313 return true;
1314 }
1315
1316 RegionNode* region = new RegionNode(5);
1317 Node* phi = new PhiNode(region, TypeInt::INT);
1318 Node* result = nullptr;
1319
1320 bool call_opt_stub = (StubRoutines::_string_indexof_array[ae] != nullptr);
1321
1322 if (call_opt_stub) {
1323 Node* call = make_runtime_call(RC_LEAF, OptoRuntime::string_IndexOf_Type(),
1324 StubRoutines::_string_indexof_array[ae],
1325 "stringIndexOf", TypePtr::BOTTOM, src_start,
1326 src_count, tgt_start, tgt_count);
1327 result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
1328 } else {
1329 result = make_indexOf_node(src_start, src_count, tgt_start, tgt_count,
1330 region, phi, ae);
1331 }
1332 if (result != nullptr) {
1333 // The result is index relative to from_index if substring was found, -1 otherwise.
1334 // Generate code which will fold into cmove.
1335 Node* cmp = _gvn.transform(new CmpINode(result, intcon(0)));
1336 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::lt));
1337
1338 Node* if_lt = generate_slow_guard(bol, nullptr);
1339 if (if_lt != nullptr) {
1340 // result == -1
1341 phi->init_req(3, result);
1342 region->init_req(3, if_lt);
1343 }
1344 if (!stopped()) {
1345 result = _gvn.transform(new AddINode(result, from_index));
1346 phi->init_req(4, result);
1347 region->init_req(4, control());
1348 }
1349 }
1350
1351 set_control(_gvn.transform(region));
1352 record_for_igvn(region);
1353 set_result(_gvn.transform(phi));
1354 clear_upper_avx();
1355
1356 return true;
1357 }
1358
1359 // Create StrIndexOfNode with fast path checks
1360 Node* LibraryCallKit::make_indexOf_node(Node* src_start, Node* src_count, Node* tgt_start, Node* tgt_count,
1361 RegionNode* region, Node* phi, StrIntrinsicNode::ArgEnc ae) {
1362 // Check for substr count > string count
1363 Node* cmp = _gvn.transform(new CmpINode(tgt_count, src_count));
1364 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::gt));
1365 Node* if_gt = generate_slow_guard(bol, nullptr);
1366 if (if_gt != nullptr) {
1367 phi->init_req(1, intcon(-1));
1368 region->init_req(1, if_gt);
1369 }
1370 if (!stopped()) {
1371 // Check for substr count == 0
1372 cmp = _gvn.transform(new CmpINode(tgt_count, intcon(0)));
1373 bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
1374 Node* if_zero = generate_slow_guard(bol, nullptr);
1375 if (if_zero != nullptr) {
1376 phi->init_req(2, intcon(0));
1377 region->init_req(2, if_zero);
1378 }
1379 }
1380 if (!stopped()) {
1381 return make_string_method_node(Op_StrIndexOf, src_start, src_count, tgt_start, tgt_count, ae);
1382 }
1383 return nullptr;
1384 }
1385
1386 //-----------------------------inline_string_indexOfChar-----------------------
1387 bool LibraryCallKit::inline_string_indexOfChar(StrIntrinsicNode::ArgEnc ae) {
1388 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
1389 return false;
1390 }
1391 if (!Matcher::match_rule_supported(Op_StrIndexOfChar)) {
1392 return false;
1393 }
1394 assert(callee()->signature()->size() == 4, "String.indexOfChar() has 4 arguments");
1395 Node* src = argument(0); // byte[]
1396 Node* int_ch = argument(1);
1397 Node* from_index = argument(2);
1398 Node* max = argument(3);
1399
1400 src = must_be_not_null(src, true);
1401
1402 Node* src_offset = ae == StrIntrinsicNode::L ? from_index : _gvn.transform(new LShiftINode(from_index, intcon(1)));
1403 Node* src_start = array_element_address(src, src_offset, T_BYTE);
1404 Node* src_count = _gvn.transform(new SubINode(max, from_index));
1405
1406 // Range checks
1407 generate_string_range_check(src, src_offset, src_count, ae == StrIntrinsicNode::U, true);
1408
1409 // Check for int_ch >= 0
1410 Node* int_ch_cmp = _gvn.transform(new CmpINode(int_ch, intcon(0)));
1411 Node* int_ch_bol = _gvn.transform(new BoolNode(int_ch_cmp, BoolTest::ge));
1412 {
1413 BuildCutout unless(this, int_ch_bol, PROB_MAX);
1414 uncommon_trap(Deoptimization::Reason_intrinsic,
1415 Deoptimization::Action_maybe_recompile);
1416 }
1417 if (stopped()) {
1418 return true;
1419 }
1420
1421 RegionNode* region = new RegionNode(3);
1422 Node* phi = new PhiNode(region, TypeInt::INT);
1423
1424 Node* result = new StrIndexOfCharNode(control(), memory(TypeAryPtr::BYTES), src_start, src_count, int_ch, ae);
1425 C->set_has_split_ifs(true); // Has chance for split-if optimization
1426 _gvn.transform(result);
1427
1428 Node* cmp = _gvn.transform(new CmpINode(result, intcon(0)));
1429 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::lt));
1430
1431 Node* if_lt = generate_slow_guard(bol, nullptr);
1432 if (if_lt != nullptr) {
1433 // result == -1
1434 phi->init_req(2, result);
1435 region->init_req(2, if_lt);
1436 }
1437 if (!stopped()) {
1438 result = _gvn.transform(new AddINode(result, from_index));
1439 phi->init_req(1, result);
1440 region->init_req(1, control());
1441 }
1442 set_control(_gvn.transform(region));
1443 record_for_igvn(region);
1444 set_result(_gvn.transform(phi));
1445 clear_upper_avx();
1446
1447 return true;
1448 }
1449 //---------------------------inline_string_copy---------------------
1450 // compressIt == true --> generate a compressed copy operation (compress char[]/byte[] to byte[])
1451 // int StringUTF16.compress0(char[] src, int srcOff, byte[] dst, int dstOff, int len)
1452 // int StringUTF16.compress0(byte[] src, int srcOff, byte[] dst, int dstOff, int len)
1453 // compressIt == false --> generate an inflated copy operation (inflate byte[] to char[]/byte[])
1454 // void StringLatin1.inflate0(byte[] src, int srcOff, char[] dst, int dstOff, int len)
1455 // void StringLatin1.inflate0(byte[] src, int srcOff, byte[] dst, int dstOff, int len)
1456 bool LibraryCallKit::inline_string_copy(bool compress) {
1457 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
1458 return false;
1459 }
1460 int nargs = 5; // 2 oops, 3 ints
1461 assert(callee()->signature()->size() == nargs, "string copy has 5 arguments");
1462
1463 Node* src = argument(0);
1464 Node* src_offset = argument(1);
1465 Node* dst = argument(2);
1466 Node* dst_offset = argument(3);
1467 Node* length = argument(4);
1468
1469 // Check for allocation before we add nodes that would confuse
1470 // tightly_coupled_allocation()
1471 AllocateArrayNode* alloc = tightly_coupled_allocation(dst);
1472
1473 // Figure out the size and type of the elements we will be copying.
1474 const TypeAryPtr* src_type = src->Value(&_gvn)->isa_aryptr();
1475 const TypeAryPtr* dst_type = dst->Value(&_gvn)->isa_aryptr();
1476 if (src_type == nullptr || dst_type == nullptr) {
1477 return false;
1478 }
1479 BasicType src_elem = src_type->elem()->array_element_basic_type();
1480 BasicType dst_elem = dst_type->elem()->array_element_basic_type();
1481 assert((compress && dst_elem == T_BYTE && (src_elem == T_BYTE || src_elem == T_CHAR)) ||
1482 (!compress && src_elem == T_BYTE && (dst_elem == T_BYTE || dst_elem == T_CHAR)),
1483 "Unsupported array types for inline_string_copy");
1484
1485 src = must_be_not_null(src, true);
1486 dst = must_be_not_null(dst, true);
1487
1488 // Convert char[] offsets to byte[] offsets
1489 bool convert_src = (compress && src_elem == T_BYTE);
1490 bool convert_dst = (!compress && dst_elem == T_BYTE);
1491 if (convert_src) {
1492 src_offset = _gvn.transform(new LShiftINode(src_offset, intcon(1)));
1493 } else if (convert_dst) {
1494 dst_offset = _gvn.transform(new LShiftINode(dst_offset, intcon(1)));
1495 }
1496
1497 // Range checks
1498 generate_string_range_check(src, src_offset, length, convert_src, true);
1499 generate_string_range_check(dst, dst_offset, length, convert_dst, true);
1500 if (stopped()) {
1501 return true;
1502 }
1503
1504 Node* src_start = array_element_address(src, src_offset, src_elem);
1505 Node* dst_start = array_element_address(dst, dst_offset, dst_elem);
1506 // 'src_start' points to src array + scaled offset
1507 // 'dst_start' points to dst array + scaled offset
1508 Node* count = nullptr;
1509 if (compress) {
1510 count = compress_string(src_start, TypeAryPtr::get_array_body_type(src_elem), dst_start, length);
1511 } else {
1512 inflate_string(src_start, dst_start, TypeAryPtr::get_array_body_type(dst_elem), length);
1513 }
1514
1515 if (alloc != nullptr) {
1516 if (alloc->maybe_set_complete(&_gvn)) {
1517 // "You break it, you buy it."
1518 InitializeNode* init = alloc->initialization();
1519 assert(init->is_complete(), "we just did this");
1520 init->set_complete_with_arraycopy();
1521 assert(dst->is_CheckCastPP(), "sanity");
1522 assert(dst->in(0)->in(0) == init, "dest pinned");
1523 }
1524 // Do not let stores that initialize this object be reordered with
1525 // a subsequent store that would make this object accessible by
1526 // other threads.
1527 // Record what AllocateNode this StoreStore protects so that
1528 // escape analysis can go from the MemBarStoreStoreNode to the
1529 // AllocateNode and eliminate the MemBarStoreStoreNode if possible
1530 // based on the escape status of the AllocateNode.
1531 insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
1532 }
1533 if (compress) {
1534 set_result(_gvn.transform(count));
1535 }
1536 clear_upper_avx();
1537
1538 return true;
1539 }
1540
1541 #ifdef _LP64
1542 #define XTOP ,top() /*additional argument*/
1543 #else //_LP64
1544 #define XTOP /*no additional argument*/
1545 #endif //_LP64
1546
1547 //------------------------inline_string_toBytesU--------------------------
1548 // public static byte[] StringUTF16.toBytes(char[] value, int off, int len)
1549 bool LibraryCallKit::inline_string_toBytesU() {
1550 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
1551 return false;
1552 }
1553 // Get the arguments.
1554 Node* value = argument(0);
1555 Node* offset = argument(1);
1556 Node* length = argument(2);
1557
1558 Node* newcopy = nullptr;
1559
1560 // Set the original stack and the reexecute bit for the interpreter to reexecute
1561 // the bytecode that invokes StringUTF16.toBytes() if deoptimization happens.
1562 { PreserveReexecuteState preexecs(this);
1563 jvms()->set_should_reexecute(true);
1564
1565 // Check if a null path was taken unconditionally.
1566 value = null_check(value);
1567
1568 RegionNode* bailout = new RegionNode(1);
1569 record_for_igvn(bailout);
1570
1571 // Range checks
1572 generate_negative_guard(offset, bailout);
1573 generate_negative_guard(length, bailout);
1574 generate_limit_guard(offset, length, load_array_length(value), bailout);
1575 // Make sure that resulting byte[] length does not overflow Integer.MAX_VALUE
1576 generate_limit_guard(length, intcon(0), intcon(max_jint/2), bailout);
1577
1578 if (bailout->req() > 1) {
1579 PreserveJVMState pjvms(this);
1580 set_control(_gvn.transform(bailout));
1581 uncommon_trap(Deoptimization::Reason_intrinsic,
1582 Deoptimization::Action_maybe_recompile);
1583 }
1584 if (stopped()) {
1585 return true;
1586 }
1587
1588 Node* size = _gvn.transform(new LShiftINode(length, intcon(1)));
1589 Node* klass_node = makecon(TypeKlassPtr::make(ciTypeArrayKlass::make(T_BYTE)));
1590 newcopy = new_array(klass_node, size, 0); // no arguments to push
1591 AllocateArrayNode* alloc = tightly_coupled_allocation(newcopy);
1592 guarantee(alloc != nullptr, "created above");
1593
1594 // Calculate starting addresses.
1595 Node* src_start = array_element_address(value, offset, T_CHAR);
1596 Node* dst_start = basic_plus_adr(newcopy, arrayOopDesc::base_offset_in_bytes(T_BYTE));
1597
1598 // Check if dst array address is aligned to HeapWordSize
1599 bool aligned = (arrayOopDesc::base_offset_in_bytes(T_BYTE) % HeapWordSize == 0);
1600 // If true, then check if src array address is aligned to HeapWordSize
1601 if (aligned) {
1602 const TypeInt* toffset = gvn().type(offset)->is_int();
1603 aligned = toffset->is_con() && ((arrayOopDesc::base_offset_in_bytes(T_CHAR) +
1604 toffset->get_con() * type2aelembytes(T_CHAR)) % HeapWordSize == 0);
1605 }
1606
1607 // Figure out which arraycopy runtime method to call (disjoint, uninitialized).
1608 const char* copyfunc_name = "arraycopy";
1609 address copyfunc_addr = StubRoutines::select_arraycopy_function(T_CHAR, aligned, true, copyfunc_name, true);
1610 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
1611 OptoRuntime::fast_arraycopy_Type(),
1612 copyfunc_addr, copyfunc_name, TypeRawPtr::BOTTOM,
1613 src_start, dst_start, ConvI2X(length) XTOP);
1614 // Do not let reads from the cloned object float above the arraycopy.
1615 if (alloc->maybe_set_complete(&_gvn)) {
1616 // "You break it, you buy it."
1617 InitializeNode* init = alloc->initialization();
1618 assert(init->is_complete(), "we just did this");
1619 init->set_complete_with_arraycopy();
1620 assert(newcopy->is_CheckCastPP(), "sanity");
1621 assert(newcopy->in(0)->in(0) == init, "dest pinned");
1622 }
1623 // Do not let stores that initialize this object be reordered with
1624 // a subsequent store that would make this object accessible by
1625 // other threads.
1626 // Record what AllocateNode this StoreStore protects so that
1627 // escape analysis can go from the MemBarStoreStoreNode to the
1628 // AllocateNode and eliminate the MemBarStoreStoreNode if possible
1629 // based on the escape status of the AllocateNode.
1630 insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
1631 } // original reexecute is set back here
1632
1633 C->set_has_split_ifs(true); // Has chance for split-if optimization
1634 if (!stopped()) {
1635 set_result(newcopy);
1636 }
1637 clear_upper_avx();
1638
1639 return true;
1640 }
1641
1642 //------------------------inline_string_getCharsU--------------------------
1643 // public void StringUTF16.getChars(byte[] src, int srcBegin, int srcEnd, char dst[], int dstBegin)
1644 bool LibraryCallKit::inline_string_getCharsU() {
1645 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
1646 return false;
1647 }
1648
1649 // Get the arguments.
1650 Node* src = argument(0);
1651 Node* src_begin = argument(1);
1652 Node* src_end = argument(2); // exclusive offset (i < src_end)
1653 Node* dst = argument(3);
1654 Node* dst_begin = argument(4);
1655
1656 // Check for allocation before we add nodes that would confuse
1657 // tightly_coupled_allocation()
1658 AllocateArrayNode* alloc = tightly_coupled_allocation(dst);
1659
1660 // Check if a null path was taken unconditionally.
1661 src = null_check(src);
1662 dst = null_check(dst);
1663 if (stopped()) {
1664 return true;
1665 }
1666
1667 // Get length and convert char[] offset to byte[] offset
1668 Node* length = _gvn.transform(new SubINode(src_end, src_begin));
1669 src_begin = _gvn.transform(new LShiftINode(src_begin, intcon(1)));
1670
1671 // Range checks
1672 generate_string_range_check(src, src_begin, length, true);
1673 generate_string_range_check(dst, dst_begin, length, false);
1674 if (stopped()) {
1675 return true;
1676 }
1677
1678 if (!stopped()) {
1679 // Calculate starting addresses.
1680 Node* src_start = array_element_address(src, src_begin, T_BYTE);
1681 Node* dst_start = array_element_address(dst, dst_begin, T_CHAR);
1682
1683 // Check if array addresses are aligned to HeapWordSize
1684 const TypeInt* tsrc = gvn().type(src_begin)->is_int();
1685 const TypeInt* tdst = gvn().type(dst_begin)->is_int();
1686 bool aligned = tsrc->is_con() && ((arrayOopDesc::base_offset_in_bytes(T_BYTE) + tsrc->get_con() * type2aelembytes(T_BYTE)) % HeapWordSize == 0) &&
1687 tdst->is_con() && ((arrayOopDesc::base_offset_in_bytes(T_CHAR) + tdst->get_con() * type2aelembytes(T_CHAR)) % HeapWordSize == 0);
1688
1689 // Figure out which arraycopy runtime method to call (disjoint, uninitialized).
1690 const char* copyfunc_name = "arraycopy";
1691 address copyfunc_addr = StubRoutines::select_arraycopy_function(T_CHAR, aligned, true, copyfunc_name, true);
1692 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
1693 OptoRuntime::fast_arraycopy_Type(),
1694 copyfunc_addr, copyfunc_name, TypeRawPtr::BOTTOM,
1695 src_start, dst_start, ConvI2X(length) XTOP);
1696 // Do not let reads from the cloned object float above the arraycopy.
1697 if (alloc != nullptr) {
1698 if (alloc->maybe_set_complete(&_gvn)) {
1699 // "You break it, you buy it."
1700 InitializeNode* init = alloc->initialization();
1701 assert(init->is_complete(), "we just did this");
1702 init->set_complete_with_arraycopy();
1703 assert(dst->is_CheckCastPP(), "sanity");
1704 assert(dst->in(0)->in(0) == init, "dest pinned");
1705 }
1706 // Do not let stores that initialize this object be reordered with
1707 // a subsequent store that would make this object accessible by
1708 // other threads.
1709 // Record what AllocateNode this StoreStore protects so that
1710 // escape analysis can go from the MemBarStoreStoreNode to the
1711 // AllocateNode and eliminate the MemBarStoreStoreNode if possible
1712 // based on the escape status of the AllocateNode.
1713 insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
1714 } else {
1715 insert_mem_bar(Op_MemBarCPUOrder);
1716 }
1717 }
1718
1719 C->set_has_split_ifs(true); // Has chance for split-if optimization
1720 return true;
1721 }
1722
1723 //----------------------inline_string_char_access----------------------------
1724 // Store/Load char to/from byte[] array.
1725 // static void StringUTF16.putChar(byte[] val, int index, int c)
1726 // static char StringUTF16.getChar(byte[] val, int index)
1727 bool LibraryCallKit::inline_string_char_access(bool is_store) {
1728 Node* value = argument(0);
1729 Node* index = argument(1);
1730 Node* ch = is_store ? argument(2) : nullptr;
1731
1732 // This intrinsic accesses byte[] array as char[] array. Computing the offsets
1733 // correctly requires matched array shapes.
1734 assert (arrayOopDesc::base_offset_in_bytes(T_CHAR) == arrayOopDesc::base_offset_in_bytes(T_BYTE),
1735 "sanity: byte[] and char[] bases agree");
1736 assert (type2aelembytes(T_CHAR) == type2aelembytes(T_BYTE)*2,
1737 "sanity: byte[] and char[] scales agree");
1738
1739 // Bail when getChar over constants is requested: constant folding would
1740 // reject folding mismatched char access over byte[]. A normal inlining for getChar
1741 // Java method would constant fold nicely instead.
1742 if (!is_store && value->is_Con() && index->is_Con()) {
1743 return false;
1744 }
1745
1746 // Save state and restore on bailout
1747 SavedState old_state(this);
1748
1749 value = must_be_not_null(value, true);
1750
1751 Node* adr = array_element_address(value, index, T_CHAR);
1752 if (adr->is_top()) {
1753 return false;
1754 }
1755 old_state.discard();
1756 if (is_store) {
1757 access_store_at(value, adr, TypeAryPtr::BYTES, ch, TypeInt::CHAR, T_CHAR, IN_HEAP | MO_UNORDERED | C2_MISMATCHED);
1758 } else {
1759 ch = access_load_at(value, adr, TypeAryPtr::BYTES, TypeInt::CHAR, T_CHAR, IN_HEAP | MO_UNORDERED | C2_MISMATCHED | C2_CONTROL_DEPENDENT_LOAD | C2_UNKNOWN_CONTROL_LOAD);
1760 set_result(ch);
1761 }
1762 return true;
1763 }
1764
1765
1766 //------------------------------inline_math-----------------------------------
1767 // public static double Math.abs(double)
1768 // public static double Math.sqrt(double)
1769 // public static double Math.log(double)
1770 // public static double Math.log10(double)
1771 // public static double Math.round(double)
1772 bool LibraryCallKit::inline_double_math(vmIntrinsics::ID id) {
1773 Node* arg = argument(0);
1774 Node* n = nullptr;
1775 switch (id) {
1776 case vmIntrinsics::_dabs: n = new AbsDNode( arg); break;
1777 case vmIntrinsics::_dsqrt:
1778 case vmIntrinsics::_dsqrt_strict:
1779 n = new SqrtDNode(C, control(), arg); break;
1780 case vmIntrinsics::_ceil: n = RoundDoubleModeNode::make(_gvn, arg, RoundDoubleModeNode::rmode_ceil); break;
1781 case vmIntrinsics::_floor: n = RoundDoubleModeNode::make(_gvn, arg, RoundDoubleModeNode::rmode_floor); break;
1782 case vmIntrinsics::_rint: n = RoundDoubleModeNode::make(_gvn, arg, RoundDoubleModeNode::rmode_rint); break;
1783 case vmIntrinsics::_roundD: n = new RoundDNode(arg); break;
1784 case vmIntrinsics::_dcopySign: n = CopySignDNode::make(_gvn, arg, argument(2)); break;
1785 case vmIntrinsics::_dsignum: n = SignumDNode::make(_gvn, arg); break;
1786 default: fatal_unexpected_iid(id); break;
1787 }
1788 set_result(_gvn.transform(n));
1789 return true;
1790 }
1791
1792 //------------------------------inline_math-----------------------------------
1793 // public static float Math.abs(float)
1794 // public static int Math.abs(int)
1795 // public static long Math.abs(long)
1796 bool LibraryCallKit::inline_math(vmIntrinsics::ID id) {
1797 Node* arg = argument(0);
1798 Node* n = nullptr;
1799 switch (id) {
1800 case vmIntrinsics::_fabs: n = new AbsFNode( arg); break;
1801 case vmIntrinsics::_iabs: n = new AbsINode( arg); break;
1802 case vmIntrinsics::_labs: n = new AbsLNode( arg); break;
1803 case vmIntrinsics::_fcopySign: n = new CopySignFNode(arg, argument(1)); break;
1804 case vmIntrinsics::_fsignum: n = SignumFNode::make(_gvn, arg); break;
1805 case vmIntrinsics::_roundF: n = new RoundFNode(arg); break;
1806 default: fatal_unexpected_iid(id); break;
1807 }
1808 set_result(_gvn.transform(n));
1809 return true;
1810 }
1811
1812 //------------------------------runtime_math-----------------------------
1813 bool LibraryCallKit::runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName) {
1814 assert(call_type == OptoRuntime::Math_DD_D_Type() || call_type == OptoRuntime::Math_D_D_Type(),
1815 "must be (DD)D or (D)D type");
1816
1817 // Inputs
1818 Node* a = argument(0);
1819 Node* b = (call_type == OptoRuntime::Math_DD_D_Type()) ? argument(2) : nullptr;
1820
1821 const TypePtr* no_memory_effects = nullptr;
1822 Node* trig = make_runtime_call(RC_LEAF | RC_PURE, call_type, funcAddr, funcName,
1823 no_memory_effects,
1824 a, top(), b, b ? top() : nullptr);
1825 Node* value = _gvn.transform(new ProjNode(trig, TypeFunc::Parms+0));
1826 #ifdef ASSERT
1827 Node* value_top = _gvn.transform(new ProjNode(trig, TypeFunc::Parms+1));
1828 assert(value_top == top(), "second value must be top");
1829 #endif
1830
1831 set_result(value);
1832 return true;
1833 }
1834
1835 //------------------------------inline_math_pow-----------------------------
1836 bool LibraryCallKit::inline_math_pow() {
1837 Node* exp = argument(2);
1838 const TypeD* d = _gvn.type(exp)->isa_double_constant();
1839 if (d != nullptr) {
1840 if (d->getd() == 2.0) {
1841 // Special case: pow(x, 2.0) => x * x
1842 Node* base = argument(0);
1843 set_result(_gvn.transform(new MulDNode(base, base)));
1844 return true;
1845 } else if (d->getd() == 0.5 && Matcher::match_rule_supported(Op_SqrtD)) {
1846 // Special case: pow(x, 0.5) => sqrt(x)
1847 Node* base = argument(0);
1848 Node* zero = _gvn.zerocon(T_DOUBLE);
1849
1850 RegionNode* region = new RegionNode(3);
1851 Node* phi = new PhiNode(region, Type::DOUBLE);
1852
1853 Node* cmp = _gvn.transform(new CmpDNode(base, zero));
1854 // According to the API specs, pow(-0.0, 0.5) = 0.0 and sqrt(-0.0) = -0.0.
1855 // So pow(-0.0, 0.5) shouldn't be replaced with sqrt(-0.0).
1856 // -0.0/+0.0 are both excluded since floating-point comparison doesn't distinguish -0.0 from +0.0.
1857 Node* test = _gvn.transform(new BoolNode(cmp, BoolTest::le));
1858
1859 Node* if_pow = generate_slow_guard(test, nullptr);
1860 Node* value_sqrt = _gvn.transform(new SqrtDNode(C, control(), base));
1861 phi->init_req(1, value_sqrt);
1862 region->init_req(1, control());
1863
1864 if (if_pow != nullptr) {
1865 set_control(if_pow);
1866 address target = StubRoutines::dpow() != nullptr ? StubRoutines::dpow() :
1867 CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
1868 const TypePtr* no_memory_effects = nullptr;
1869 Node* trig = make_runtime_call(RC_LEAF, OptoRuntime::Math_DD_D_Type(), target, "POW",
1870 no_memory_effects, base, top(), exp, top());
1871 Node* value_pow = _gvn.transform(new ProjNode(trig, TypeFunc::Parms+0));
1872 #ifdef ASSERT
1873 Node* value_top = _gvn.transform(new ProjNode(trig, TypeFunc::Parms+1));
1874 assert(value_top == top(), "second value must be top");
1875 #endif
1876 phi->init_req(2, value_pow);
1877 region->init_req(2, _gvn.transform(new ProjNode(trig, TypeFunc::Control)));
1878 }
1879
1880 C->set_has_split_ifs(true); // Has chance for split-if optimization
1881 set_control(_gvn.transform(region));
1882 record_for_igvn(region);
1883 set_result(_gvn.transform(phi));
1884
1885 return true;
1886 }
1887 }
1888
1889 return StubRoutines::dpow() != nullptr ?
1890 runtime_math(OptoRuntime::Math_DD_D_Type(), StubRoutines::dpow(), "dpow") :
1891 runtime_math(OptoRuntime::Math_DD_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dpow), "POW");
1892 }
1893
1894 //------------------------------inline_math_native-----------------------------
1895 bool LibraryCallKit::inline_math_native(vmIntrinsics::ID id) {
1896 switch (id) {
1897 case vmIntrinsics::_dsin:
1898 return StubRoutines::dsin() != nullptr ?
1899 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dsin(), "dsin") :
1900 runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dsin), "SIN");
1901 case vmIntrinsics::_dcos:
1902 return StubRoutines::dcos() != nullptr ?
1903 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dcos(), "dcos") :
1904 runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dcos), "COS");
1905 case vmIntrinsics::_dtan:
1906 return StubRoutines::dtan() != nullptr ?
1907 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dtan(), "dtan") :
1908 runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dtan), "TAN");
1909 case vmIntrinsics::_dsinh:
1910 return StubRoutines::dsinh() != nullptr ?
1911 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dsinh(), "dsinh") : false;
1912 case vmIntrinsics::_dtanh:
1913 return StubRoutines::dtanh() != nullptr ?
1914 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dtanh(), "dtanh") : false;
1915 case vmIntrinsics::_dcbrt:
1916 return StubRoutines::dcbrt() != nullptr ?
1917 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dcbrt(), "dcbrt") : false;
1918 case vmIntrinsics::_dexp:
1919 return StubRoutines::dexp() != nullptr ?
1920 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dexp(), "dexp") :
1921 runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dexp), "EXP");
1922 case vmIntrinsics::_dlog:
1923 return StubRoutines::dlog() != nullptr ?
1924 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dlog(), "dlog") :
1925 runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dlog), "LOG");
1926 case vmIntrinsics::_dlog10:
1927 return StubRoutines::dlog10() != nullptr ?
1928 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dlog10(), "dlog10") :
1929 runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), "LOG10");
1930
1931 case vmIntrinsics::_roundD: return Matcher::match_rule_supported(Op_RoundD) ? inline_double_math(id) : false;
1932 case vmIntrinsics::_ceil:
1933 case vmIntrinsics::_floor:
1934 case vmIntrinsics::_rint: return Matcher::match_rule_supported(Op_RoundDoubleMode) ? inline_double_math(id) : false;
1935
1936 case vmIntrinsics::_dsqrt:
1937 case vmIntrinsics::_dsqrt_strict:
1938 return Matcher::match_rule_supported(Op_SqrtD) ? inline_double_math(id) : false;
1939 case vmIntrinsics::_dabs: return Matcher::has_match_rule(Op_AbsD) ? inline_double_math(id) : false;
1940 case vmIntrinsics::_fabs: return Matcher::match_rule_supported(Op_AbsF) ? inline_math(id) : false;
1941 case vmIntrinsics::_iabs: return Matcher::match_rule_supported(Op_AbsI) ? inline_math(id) : false;
1942 case vmIntrinsics::_labs: return Matcher::match_rule_supported(Op_AbsL) ? inline_math(id) : false;
1943
1944 case vmIntrinsics::_dpow: return inline_math_pow();
1945 case vmIntrinsics::_dcopySign: return inline_double_math(id);
1946 case vmIntrinsics::_fcopySign: return inline_math(id);
1947 case vmIntrinsics::_dsignum: return Matcher::match_rule_supported(Op_SignumD) ? inline_double_math(id) : false;
1948 case vmIntrinsics::_fsignum: return Matcher::match_rule_supported(Op_SignumF) ? inline_math(id) : false;
1949 case vmIntrinsics::_roundF: return Matcher::match_rule_supported(Op_RoundF) ? inline_math(id) : false;
1950
1951 // These intrinsics are not yet correctly implemented
1952 case vmIntrinsics::_datan2:
1953 return false;
1954
1955 default:
1956 fatal_unexpected_iid(id);
1957 return false;
1958 }
1959 }
1960
1961 //----------------------------inline_notify-----------------------------------*
1962 bool LibraryCallKit::inline_notify(vmIntrinsics::ID id) {
1963 const TypeFunc* ftype = OptoRuntime::monitor_notify_Type();
1964 address func;
1965 if (id == vmIntrinsics::_notify) {
1966 func = OptoRuntime::monitor_notify_Java();
1967 } else {
1968 func = OptoRuntime::monitor_notifyAll_Java();
1969 }
1970 Node* call = make_runtime_call(RC_NO_LEAF, ftype, func, nullptr, TypeRawPtr::BOTTOM, argument(0));
1971 make_slow_call_ex(call, env()->Throwable_klass(), false);
1972 return true;
1973 }
1974
1975
1976 //----------------------------inline_min_max-----------------------------------
1977 bool LibraryCallKit::inline_min_max(vmIntrinsics::ID id) {
1978 Node* a = nullptr;
1979 Node* b = nullptr;
1980 Node* n = nullptr;
1981 switch (id) {
1982 case vmIntrinsics::_min:
1983 case vmIntrinsics::_max:
1984 case vmIntrinsics::_minF:
1985 case vmIntrinsics::_maxF:
1986 case vmIntrinsics::_minF_strict:
1987 case vmIntrinsics::_maxF_strict:
1988 case vmIntrinsics::_min_strict:
1989 case vmIntrinsics::_max_strict:
1990 assert(callee()->signature()->size() == 2, "minF/maxF has 2 parameters of size 1 each.");
1991 a = argument(0);
1992 b = argument(1);
1993 break;
1994 case vmIntrinsics::_minD:
1995 case vmIntrinsics::_maxD:
1996 case vmIntrinsics::_minD_strict:
1997 case vmIntrinsics::_maxD_strict:
1998 assert(callee()->signature()->size() == 4, "minD/maxD has 2 parameters of size 2 each.");
1999 a = argument(0);
2000 b = argument(2);
2001 break;
2002 case vmIntrinsics::_minL:
2003 case vmIntrinsics::_maxL:
2004 assert(callee()->signature()->size() == 4, "minL/maxL has 2 parameters of size 2 each.");
2005 a = argument(0);
2006 b = argument(2);
2007 break;
2008 default:
2009 fatal_unexpected_iid(id);
2010 break;
2011 }
2012
2013 switch (id) {
2014 case vmIntrinsics::_min:
2015 case vmIntrinsics::_min_strict:
2016 n = new MinINode(a, b);
2017 break;
2018 case vmIntrinsics::_max:
2019 case vmIntrinsics::_max_strict:
2020 n = new MaxINode(a, b);
2021 break;
2022 case vmIntrinsics::_minF:
2023 case vmIntrinsics::_minF_strict:
2024 n = new MinFNode(a, b);
2025 break;
2026 case vmIntrinsics::_maxF:
2027 case vmIntrinsics::_maxF_strict:
2028 n = new MaxFNode(a, b);
2029 break;
2030 case vmIntrinsics::_minD:
2031 case vmIntrinsics::_minD_strict:
2032 n = new MinDNode(a, b);
2033 break;
2034 case vmIntrinsics::_maxD:
2035 case vmIntrinsics::_maxD_strict:
2036 n = new MaxDNode(a, b);
2037 break;
2038 case vmIntrinsics::_minL:
2039 n = new MinLNode(_gvn.C, a, b);
2040 break;
2041 case vmIntrinsics::_maxL:
2042 n = new MaxLNode(_gvn.C, a, b);
2043 break;
2044 default:
2045 fatal_unexpected_iid(id);
2046 break;
2047 }
2048
2049 set_result(_gvn.transform(n));
2050 return true;
2051 }
2052
2053 bool LibraryCallKit::inline_math_mathExact(Node* math, Node* test) {
2054 if (builtin_throw_too_many_traps(Deoptimization::Reason_intrinsic,
2055 env()->ArithmeticException_instance())) {
2056 // It has been already too many times, but we cannot use builtin_throw (e.g. we care about backtraces),
2057 // so let's bail out intrinsic rather than risking deopting again.
2058 return false;
2059 }
2060
2061 Node* bol = _gvn.transform( new BoolNode(test, BoolTest::overflow) );
2062 IfNode* check = create_and_map_if(control(), bol, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN);
2063 Node* fast_path = _gvn.transform( new IfFalseNode(check));
2064 Node* slow_path = _gvn.transform( new IfTrueNode(check) );
2065
2066 {
2067 PreserveJVMState pjvms(this);
2068 PreserveReexecuteState preexecs(this);
2069 jvms()->set_should_reexecute(true);
2070
2071 set_control(slow_path);
2072 set_i_o(i_o());
2073
2074 builtin_throw(Deoptimization::Reason_intrinsic,
2075 env()->ArithmeticException_instance(),
2076 /*allow_too_many_traps*/ false);
2077 }
2078
2079 set_control(fast_path);
2080 set_result(math);
2081 return true;
2082 }
2083
2084 template <typename OverflowOp>
2085 bool LibraryCallKit::inline_math_overflow(Node* arg1, Node* arg2) {
2086 typedef typename OverflowOp::MathOp MathOp;
2087
2088 MathOp* mathOp = new MathOp(arg1, arg2);
2089 Node* operation = _gvn.transform( mathOp );
2090 Node* ofcheck = _gvn.transform( new OverflowOp(arg1, arg2) );
2091 return inline_math_mathExact(operation, ofcheck);
2092 }
2093
2094 bool LibraryCallKit::inline_math_addExactI(bool is_increment) {
2095 return inline_math_overflow<OverflowAddINode>(argument(0), is_increment ? intcon(1) : argument(1));
2096 }
2097
2098 bool LibraryCallKit::inline_math_addExactL(bool is_increment) {
2099 return inline_math_overflow<OverflowAddLNode>(argument(0), is_increment ? longcon(1) : argument(2));
2100 }
2101
2102 bool LibraryCallKit::inline_math_subtractExactI(bool is_decrement) {
2103 return inline_math_overflow<OverflowSubINode>(argument(0), is_decrement ? intcon(1) : argument(1));
2104 }
2105
2106 bool LibraryCallKit::inline_math_subtractExactL(bool is_decrement) {
2107 return inline_math_overflow<OverflowSubLNode>(argument(0), is_decrement ? longcon(1) : argument(2));
2108 }
2109
2110 bool LibraryCallKit::inline_math_negateExactI() {
2111 return inline_math_overflow<OverflowSubINode>(intcon(0), argument(0));
2112 }
2113
2114 bool LibraryCallKit::inline_math_negateExactL() {
2115 return inline_math_overflow<OverflowSubLNode>(longcon(0), argument(0));
2116 }
2117
2118 bool LibraryCallKit::inline_math_multiplyExactI() {
2119 return inline_math_overflow<OverflowMulINode>(argument(0), argument(1));
2120 }
2121
2122 bool LibraryCallKit::inline_math_multiplyExactL() {
2123 return inline_math_overflow<OverflowMulLNode>(argument(0), argument(2));
2124 }
2125
2126 bool LibraryCallKit::inline_math_multiplyHigh() {
2127 set_result(_gvn.transform(new MulHiLNode(argument(0), argument(2))));
2128 return true;
2129 }
2130
2131 bool LibraryCallKit::inline_math_unsignedMultiplyHigh() {
2132 set_result(_gvn.transform(new UMulHiLNode(argument(0), argument(2))));
2133 return true;
2134 }
2135
2136 inline int
2137 LibraryCallKit::classify_unsafe_addr(Node* &base, Node* &offset, BasicType type) {
2138 const TypePtr* base_type = TypePtr::NULL_PTR;
2139 if (base != nullptr) base_type = _gvn.type(base)->isa_ptr();
2140 if (base_type == nullptr) {
2141 // Unknown type.
2142 return Type::AnyPtr;
2143 } else if (_gvn.type(base->uncast()) == TypePtr::NULL_PTR) {
2144 // Since this is a null+long form, we have to switch to a rawptr.
2145 base = _gvn.transform(new CastX2PNode(offset));
2146 offset = MakeConX(0);
2147 return Type::RawPtr;
2148 } else if (base_type->base() == Type::RawPtr) {
2149 return Type::RawPtr;
2150 } else if (base_type->isa_oopptr()) {
2151 // Base is never null => always a heap address.
2152 if (!TypePtr::NULL_PTR->higher_equal(base_type)) {
2153 return Type::OopPtr;
2154 }
2155 // Offset is small => always a heap address.
2156 const TypeX* offset_type = _gvn.type(offset)->isa_intptr_t();
2157 if (offset_type != nullptr &&
2158 base_type->offset() == 0 && // (should always be?)
2159 offset_type->_lo >= 0 &&
2160 !MacroAssembler::needs_explicit_null_check(offset_type->_hi)) {
2161 return Type::OopPtr;
2162 } else if (type == T_OBJECT) {
2163 // off heap access to an oop doesn't make any sense. Has to be on
2164 // heap.
2165 return Type::OopPtr;
2166 }
2167 // Otherwise, it might either be oop+off or null+addr.
2168 return Type::AnyPtr;
2169 } else {
2170 // No information:
2171 return Type::AnyPtr;
2172 }
2173 }
2174
2175 Node* LibraryCallKit::make_unsafe_address(Node*& base, Node* offset, BasicType type, bool can_cast) {
2176 Node* uncasted_base = base;
2177 int kind = classify_unsafe_addr(uncasted_base, offset, type);
2178 if (kind == Type::RawPtr) {
2179 return basic_plus_adr(top(), uncasted_base, offset);
2180 } else if (kind == Type::AnyPtr) {
2181 assert(base == uncasted_base, "unexpected base change");
2182 if (can_cast) {
2183 if (!_gvn.type(base)->speculative_maybe_null() &&
2184 !too_many_traps(Deoptimization::Reason_speculate_null_check)) {
2185 // According to profiling, this access is always on
2186 // heap. Casting the base to not null and thus avoiding membars
2187 // around the access should allow better optimizations
2188 Node* null_ctl = top();
2189 base = null_check_oop(base, &null_ctl, true, true, true);
2190 assert(null_ctl->is_top(), "no null control here");
2191 return basic_plus_adr(base, offset);
2192 } else if (_gvn.type(base)->speculative_always_null() &&
2193 !too_many_traps(Deoptimization::Reason_speculate_null_assert)) {
2194 // According to profiling, this access is always off
2195 // heap.
2196 base = null_assert(base);
2197 Node* raw_base = _gvn.transform(new CastX2PNode(offset));
2198 offset = MakeConX(0);
2199 return basic_plus_adr(top(), raw_base, offset);
2200 }
2201 }
2202 // We don't know if it's an on heap or off heap access. Fall back
2203 // to raw memory access.
2204 Node* raw = _gvn.transform(new CheckCastPPNode(control(), base, TypeRawPtr::BOTTOM));
2205 return basic_plus_adr(top(), raw, offset);
2206 } else {
2207 assert(base == uncasted_base, "unexpected base change");
2208 // We know it's an on heap access so base can't be null
2209 if (TypePtr::NULL_PTR->higher_equal(_gvn.type(base))) {
2210 base = must_be_not_null(base, true);
2211 }
2212 return basic_plus_adr(base, offset);
2213 }
2214 }
2215
2216 //--------------------------inline_number_methods-----------------------------
2217 // inline int Integer.numberOfLeadingZeros(int)
2218 // inline int Long.numberOfLeadingZeros(long)
2219 //
2220 // inline int Integer.numberOfTrailingZeros(int)
2221 // inline int Long.numberOfTrailingZeros(long)
2222 //
2223 // inline int Integer.bitCount(int)
2224 // inline int Long.bitCount(long)
2225 //
2226 // inline char Character.reverseBytes(char)
2227 // inline short Short.reverseBytes(short)
2228 // inline int Integer.reverseBytes(int)
2229 // inline long Long.reverseBytes(long)
2230 bool LibraryCallKit::inline_number_methods(vmIntrinsics::ID id) {
2231 Node* arg = argument(0);
2232 Node* n = nullptr;
2233 switch (id) {
2234 case vmIntrinsics::_numberOfLeadingZeros_i: n = new CountLeadingZerosINode( arg); break;
2235 case vmIntrinsics::_numberOfLeadingZeros_l: n = new CountLeadingZerosLNode( arg); break;
2236 case vmIntrinsics::_numberOfTrailingZeros_i: n = new CountTrailingZerosINode(arg); break;
2237 case vmIntrinsics::_numberOfTrailingZeros_l: n = new CountTrailingZerosLNode(arg); break;
2238 case vmIntrinsics::_bitCount_i: n = new PopCountINode( arg); break;
2239 case vmIntrinsics::_bitCount_l: n = new PopCountLNode( arg); break;
2240 case vmIntrinsics::_reverseBytes_c: n = new ReverseBytesUSNode( arg); break;
2241 case vmIntrinsics::_reverseBytes_s: n = new ReverseBytesSNode( arg); break;
2242 case vmIntrinsics::_reverseBytes_i: n = new ReverseBytesINode( arg); break;
2243 case vmIntrinsics::_reverseBytes_l: n = new ReverseBytesLNode( arg); break;
2244 case vmIntrinsics::_reverse_i: n = new ReverseINode( arg); break;
2245 case vmIntrinsics::_reverse_l: n = new ReverseLNode( arg); break;
2246 default: fatal_unexpected_iid(id); break;
2247 }
2248 set_result(_gvn.transform(n));
2249 return true;
2250 }
2251
2252 //--------------------------inline_bitshuffle_methods-----------------------------
2253 // inline int Integer.compress(int, int)
2254 // inline int Integer.expand(int, int)
2255 // inline long Long.compress(long, long)
2256 // inline long Long.expand(long, long)
2257 bool LibraryCallKit::inline_bitshuffle_methods(vmIntrinsics::ID id) {
2258 Node* n = nullptr;
2259 switch (id) {
2260 case vmIntrinsics::_compress_i: n = new CompressBitsNode(argument(0), argument(1), TypeInt::INT); break;
2261 case vmIntrinsics::_expand_i: n = new ExpandBitsNode(argument(0), argument(1), TypeInt::INT); break;
2262 case vmIntrinsics::_compress_l: n = new CompressBitsNode(argument(0), argument(2), TypeLong::LONG); break;
2263 case vmIntrinsics::_expand_l: n = new ExpandBitsNode(argument(0), argument(2), TypeLong::LONG); break;
2264 default: fatal_unexpected_iid(id); break;
2265 }
2266 set_result(_gvn.transform(n));
2267 return true;
2268 }
2269
2270 //--------------------------inline_number_methods-----------------------------
2271 // inline int Integer.compareUnsigned(int, int)
2272 // inline int Long.compareUnsigned(long, long)
2273 bool LibraryCallKit::inline_compare_unsigned(vmIntrinsics::ID id) {
2274 Node* arg1 = argument(0);
2275 Node* arg2 = (id == vmIntrinsics::_compareUnsigned_l) ? argument(2) : argument(1);
2276 Node* n = nullptr;
2277 switch (id) {
2278 case vmIntrinsics::_compareUnsigned_i: n = new CmpU3Node(arg1, arg2); break;
2279 case vmIntrinsics::_compareUnsigned_l: n = new CmpUL3Node(arg1, arg2); break;
2280 default: fatal_unexpected_iid(id); break;
2281 }
2282 set_result(_gvn.transform(n));
2283 return true;
2284 }
2285
2286 //--------------------------inline_unsigned_divmod_methods-----------------------------
2287 // inline int Integer.divideUnsigned(int, int)
2288 // inline int Integer.remainderUnsigned(int, int)
2289 // inline long Long.divideUnsigned(long, long)
2290 // inline long Long.remainderUnsigned(long, long)
2291 bool LibraryCallKit::inline_divmod_methods(vmIntrinsics::ID id) {
2292 Node* n = nullptr;
2293 switch (id) {
2294 case vmIntrinsics::_divideUnsigned_i: {
2295 zero_check_int(argument(1));
2296 // Compile-time detect of null-exception
2297 if (stopped()) {
2298 return true; // keep the graph constructed so far
2299 }
2300 n = new UDivINode(control(), argument(0), argument(1));
2301 break;
2302 }
2303 case vmIntrinsics::_divideUnsigned_l: {
2304 zero_check_long(argument(2));
2305 // Compile-time detect of null-exception
2306 if (stopped()) {
2307 return true; // keep the graph constructed so far
2308 }
2309 n = new UDivLNode(control(), argument(0), argument(2));
2310 break;
2311 }
2312 case vmIntrinsics::_remainderUnsigned_i: {
2313 zero_check_int(argument(1));
2314 // Compile-time detect of null-exception
2315 if (stopped()) {
2316 return true; // keep the graph constructed so far
2317 }
2318 n = new UModINode(control(), argument(0), argument(1));
2319 break;
2320 }
2321 case vmIntrinsics::_remainderUnsigned_l: {
2322 zero_check_long(argument(2));
2323 // Compile-time detect of null-exception
2324 if (stopped()) {
2325 return true; // keep the graph constructed so far
2326 }
2327 n = new UModLNode(control(), argument(0), argument(2));
2328 break;
2329 }
2330 default: fatal_unexpected_iid(id); break;
2331 }
2332 set_result(_gvn.transform(n));
2333 return true;
2334 }
2335
2336 //----------------------------inline_unsafe_access----------------------------
2337
2338 const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type) {
2339 // Attempt to infer a sharper value type from the offset and base type.
2340 ciKlass* sharpened_klass = nullptr;
2341
2342 // See if it is an instance field, with an object type.
2343 if (alias_type->field() != nullptr) {
2344 if (alias_type->field()->type()->is_klass()) {
2345 sharpened_klass = alias_type->field()->type()->as_klass();
2346 }
2347 }
2348
2349 const TypeOopPtr* result = nullptr;
2350 // See if it is a narrow oop array.
2351 if (adr_type->isa_aryptr()) {
2352 if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
2353 const TypeOopPtr* elem_type = adr_type->is_aryptr()->elem()->make_oopptr();
2354 if (elem_type != nullptr && elem_type->is_loaded()) {
2355 // Sharpen the value type.
2356 result = elem_type;
2357 }
2358 }
2359 }
2360
2361 // The sharpened class might be unloaded if there is no class loader
2362 // contraint in place.
2363 if (result == nullptr && sharpened_klass != nullptr && sharpened_klass->is_loaded()) {
2364 // Sharpen the value type.
2365 result = TypeOopPtr::make_from_klass(sharpened_klass);
2366 }
2367 if (result != nullptr) {
2368 #ifndef PRODUCT
2369 if (C->print_intrinsics() || C->print_inlining()) {
2370 tty->print(" from base type: "); adr_type->dump(); tty->cr();
2371 tty->print(" sharpened value: "); result->dump(); tty->cr();
2372 }
2373 #endif
2374 }
2375 return result;
2376 }
2377
2378 DecoratorSet LibraryCallKit::mo_decorator_for_access_kind(AccessKind kind) {
2379 switch (kind) {
2380 case Relaxed:
2381 return MO_UNORDERED;
2382 case Opaque:
2383 return MO_RELAXED;
2384 case Acquire:
2385 return MO_ACQUIRE;
2386 case Release:
2387 return MO_RELEASE;
2388 case Volatile:
2389 return MO_SEQ_CST;
2390 default:
2391 ShouldNotReachHere();
2392 return 0;
2393 }
2394 }
2395
2396 bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, const AccessKind kind, const bool unaligned) {
2397 if (callee()->is_static()) return false; // caller must have the capability!
2398 DecoratorSet decorators = C2_UNSAFE_ACCESS;
2399 guarantee(!is_store || kind != Acquire, "Acquire accesses can be produced only for loads");
2400 guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
2401 assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
2402
2403 if (is_reference_type(type)) {
2404 decorators |= ON_UNKNOWN_OOP_REF;
2405 }
2406
2407 if (unaligned) {
2408 decorators |= C2_UNALIGNED;
2409 }
2410
2411 #ifndef PRODUCT
2412 {
2413 ResourceMark rm;
2414 // Check the signatures.
2415 ciSignature* sig = callee()->signature();
2416 #ifdef ASSERT
2417 if (!is_store) {
2418 // Object getReference(Object base, int/long offset), etc.
2419 BasicType rtype = sig->return_type()->basic_type();
2420 assert(rtype == type, "getter must return the expected value");
2421 assert(sig->count() == 2, "oop getter has 2 arguments");
2422 assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2423 assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2424 } else {
2425 // void putReference(Object base, int/long offset, Object x), etc.
2426 assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
2427 assert(sig->count() == 3, "oop putter has 3 arguments");
2428 assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
2429 assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
2430 BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
2431 assert(vtype == type, "putter must accept the expected value");
2432 }
2433 #endif // ASSERT
2434 }
2435 #endif //PRODUCT
2436
2437 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
2438
2439 Node* receiver = argument(0); // type: oop
2440
2441 // Build address expression.
2442 Node* heap_base_oop = top();
2443
2444 // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2445 Node* base = argument(1); // type: oop
2446 // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2447 Node* offset = argument(2); // type: long
2448 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2449 // to be plain byte offsets, which are also the same as those accepted
2450 // by oopDesc::field_addr.
2451 assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2452 "fieldOffset must be byte-scaled");
2453 // 32-bit machines ignore the high half!
2454 offset = ConvL2X(offset);
2455
2456 // Save state and restore on bailout
2457 SavedState old_state(this);
2458
2459 Node* adr = make_unsafe_address(base, offset, type, kind == Relaxed);
2460 assert(!stopped(), "Inlining of unsafe access failed: address construction stopped unexpectedly");
2461
2462 if (_gvn.type(base->uncast())->isa_ptr() == TypePtr::NULL_PTR) {
2463 if (type != T_OBJECT) {
2464 decorators |= IN_NATIVE; // off-heap primitive access
2465 } else {
2466 return false; // off-heap oop accesses are not supported
2467 }
2468 } else {
2469 heap_base_oop = base; // on-heap or mixed access
2470 }
2471
2472 // Can base be null? Otherwise, always on-heap access.
2473 bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(base));
2474
2475 if (!can_access_non_heap) {
2476 decorators |= IN_HEAP;
2477 }
2478
2479 Node* val = is_store ? argument(4) : nullptr;
2480
2481 const TypePtr* adr_type = _gvn.type(adr)->isa_ptr();
2482 if (adr_type == TypePtr::NULL_PTR) {
2483 return false; // off-heap access with zero address
2484 }
2485
2486 // Try to categorize the address.
2487 Compile::AliasType* alias_type = C->alias_type(adr_type);
2488 assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2489
2490 if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2491 alias_type->adr_type() == TypeAryPtr::RANGE) {
2492 return false; // not supported
2493 }
2494
2495 bool mismatched = false;
2496 BasicType bt = alias_type->basic_type();
2497 if (bt != T_ILLEGAL) {
2498 assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
2499 if (bt == T_BYTE && adr_type->isa_aryptr()) {
2500 // Alias type doesn't differentiate between byte[] and boolean[]).
2501 // Use address type to get the element type.
2502 bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2503 }
2504 if (is_reference_type(bt, true)) {
2505 // accessing an array field with getReference is not a mismatch
2506 bt = T_OBJECT;
2507 }
2508 if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2509 // Don't intrinsify mismatched object accesses
2510 return false;
2511 }
2512 mismatched = (bt != type);
2513 } else if (alias_type->adr_type()->isa_oopptr()) {
2514 mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
2515 }
2516
2517 old_state.discard();
2518 assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2519
2520 if (mismatched) {
2521 decorators |= C2_MISMATCHED;
2522 }
2523
2524 // First guess at the value type.
2525 const Type *value_type = Type::get_const_basic_type(type);
2526
2527 // Figure out the memory ordering.
2528 decorators |= mo_decorator_for_access_kind(kind);
2529
2530 if (!is_store && type == T_OBJECT) {
2531 const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2532 if (tjp != nullptr) {
2533 value_type = tjp;
2534 }
2535 }
2536
2537 receiver = null_check(receiver);
2538 if (stopped()) {
2539 return true;
2540 }
2541 // Heap pointers get a null-check from the interpreter,
2542 // as a courtesy. However, this is not guaranteed by Unsafe,
2543 // and it is not possible to fully distinguish unintended nulls
2544 // from intended ones in this API.
2545
2546 if (!is_store) {
2547 Node* p = nullptr;
2548 // Try to constant fold a load from a constant field
2549 ciField* field = alias_type->field();
2550 if (heap_base_oop != top() && field != nullptr && field->is_constant() && !mismatched) {
2551 // final or stable field
2552 p = make_constant_from_field(field, heap_base_oop);
2553 }
2554
2555 if (p == nullptr) { // Could not constant fold the load
2556 p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);
2557 // Normalize the value returned by getBoolean in the following cases
2558 if (type == T_BOOLEAN &&
2559 (mismatched ||
2560 heap_base_oop == top() || // - heap_base_oop is null or
2561 (can_access_non_heap && field == nullptr)) // - heap_base_oop is potentially null
2562 // and the unsafe access is made to large offset
2563 // (i.e., larger than the maximum offset necessary for any
2564 // field access)
2565 ) {
2566 IdealKit ideal = IdealKit(this);
2567 #define __ ideal.
2568 IdealVariable normalized_result(ideal);
2569 __ declarations_done();
2570 __ set(normalized_result, p);
2571 __ if_then(p, BoolTest::ne, ideal.ConI(0));
2572 __ set(normalized_result, ideal.ConI(1));
2573 ideal.end_if();
2574 final_sync(ideal);
2575 p = __ value(normalized_result);
2576 #undef __
2577 }
2578 }
2579 if (type == T_ADDRESS) {
2580 p = gvn().transform(new CastP2XNode(nullptr, p));
2581 p = ConvX2UL(p);
2582 }
2583 // The load node has the control of the preceding MemBarCPUOrder. All
2584 // following nodes will have the control of the MemBarCPUOrder inserted at
2585 // the end of this method. So, pushing the load onto the stack at a later
2586 // point is fine.
2587 set_result(p);
2588 } else {
2589 if (bt == T_ADDRESS) {
2590 // Repackage the long as a pointer.
2591 val = ConvL2X(val);
2592 val = gvn().transform(new CastX2PNode(val));
2593 }
2594 access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators);
2595 }
2596
2597 return true;
2598 }
2599
2600 //----------------------------inline_unsafe_load_store----------------------------
2601 // This method serves a couple of different customers (depending on LoadStoreKind):
2602 //
2603 // LS_cmp_swap:
2604 //
2605 // boolean compareAndSetReference(Object o, long offset, Object expected, Object x);
2606 // boolean compareAndSetInt( Object o, long offset, int expected, int x);
2607 // boolean compareAndSetLong( Object o, long offset, long expected, long x);
2608 //
2609 // LS_cmp_swap_weak:
2610 //
2611 // boolean weakCompareAndSetReference( Object o, long offset, Object expected, Object x);
2612 // boolean weakCompareAndSetReferencePlain( Object o, long offset, Object expected, Object x);
2613 // boolean weakCompareAndSetReferenceAcquire(Object o, long offset, Object expected, Object x);
2614 // boolean weakCompareAndSetReferenceRelease(Object o, long offset, Object expected, Object x);
2615 //
2616 // boolean weakCompareAndSetInt( Object o, long offset, int expected, int x);
2617 // boolean weakCompareAndSetIntPlain( Object o, long offset, int expected, int x);
2618 // boolean weakCompareAndSetIntAcquire( Object o, long offset, int expected, int x);
2619 // boolean weakCompareAndSetIntRelease( Object o, long offset, int expected, int x);
2620 //
2621 // boolean weakCompareAndSetLong( Object o, long offset, long expected, long x);
2622 // boolean weakCompareAndSetLongPlain( Object o, long offset, long expected, long x);
2623 // boolean weakCompareAndSetLongAcquire( Object o, long offset, long expected, long x);
2624 // boolean weakCompareAndSetLongRelease( Object o, long offset, long expected, long x);
2625 //
2626 // LS_cmp_exchange:
2627 //
2628 // Object compareAndExchangeReferenceVolatile(Object o, long offset, Object expected, Object x);
2629 // Object compareAndExchangeReferenceAcquire( Object o, long offset, Object expected, Object x);
2630 // Object compareAndExchangeReferenceRelease( Object o, long offset, Object expected, Object x);
2631 //
2632 // Object compareAndExchangeIntVolatile( Object o, long offset, Object expected, Object x);
2633 // Object compareAndExchangeIntAcquire( Object o, long offset, Object expected, Object x);
2634 // Object compareAndExchangeIntRelease( Object o, long offset, Object expected, Object x);
2635 //
2636 // Object compareAndExchangeLongVolatile( Object o, long offset, Object expected, Object x);
2637 // Object compareAndExchangeLongAcquire( Object o, long offset, Object expected, Object x);
2638 // Object compareAndExchangeLongRelease( Object o, long offset, Object expected, Object x);
2639 //
2640 // LS_get_add:
2641 //
2642 // int getAndAddInt( Object o, long offset, int delta)
2643 // long getAndAddLong(Object o, long offset, long delta)
2644 //
2645 // LS_get_set:
2646 //
2647 // int getAndSet(Object o, long offset, int newValue)
2648 // long getAndSet(Object o, long offset, long newValue)
2649 // Object getAndSet(Object o, long offset, Object newValue)
2650 //
2651 bool LibraryCallKit::inline_unsafe_load_store(const BasicType type, const LoadStoreKind kind, const AccessKind access_kind) {
2652 // This basic scheme here is the same as inline_unsafe_access, but
2653 // differs in enough details that combining them would make the code
2654 // overly confusing. (This is a true fact! I originally combined
2655 // them, but even I was confused by it!) As much code/comments as
2656 // possible are retained from inline_unsafe_access though to make
2657 // the correspondences clearer. - dl
2658
2659 if (callee()->is_static()) return false; // caller must have the capability!
2660
2661 DecoratorSet decorators = C2_UNSAFE_ACCESS;
2662 decorators |= mo_decorator_for_access_kind(access_kind);
2663
2664 #ifndef PRODUCT
2665 BasicType rtype;
2666 {
2667 ResourceMark rm;
2668 // Check the signatures.
2669 ciSignature* sig = callee()->signature();
2670 rtype = sig->return_type()->basic_type();
2671 switch(kind) {
2672 case LS_get_add:
2673 case LS_get_set: {
2674 // Check the signatures.
2675 #ifdef ASSERT
2676 assert(rtype == type, "get and set must return the expected type");
2677 assert(sig->count() == 3, "get and set has 3 arguments");
2678 assert(sig->type_at(0)->basic_type() == T_OBJECT, "get and set base is object");
2679 assert(sig->type_at(1)->basic_type() == T_LONG, "get and set offset is long");
2680 assert(sig->type_at(2)->basic_type() == type, "get and set must take expected type as new value/delta");
2681 assert(access_kind == Volatile, "mo is not passed to intrinsic nodes in current implementation");
2682 #endif // ASSERT
2683 break;
2684 }
2685 case LS_cmp_swap:
2686 case LS_cmp_swap_weak: {
2687 // Check the signatures.
2688 #ifdef ASSERT
2689 assert(rtype == T_BOOLEAN, "CAS must return boolean");
2690 assert(sig->count() == 4, "CAS has 4 arguments");
2691 assert(sig->type_at(0)->basic_type() == T_OBJECT, "CAS base is object");
2692 assert(sig->type_at(1)->basic_type() == T_LONG, "CAS offset is long");
2693 #endif // ASSERT
2694 break;
2695 }
2696 case LS_cmp_exchange: {
2697 // Check the signatures.
2698 #ifdef ASSERT
2699 assert(rtype == type, "CAS must return the expected type");
2700 assert(sig->count() == 4, "CAS has 4 arguments");
2701 assert(sig->type_at(0)->basic_type() == T_OBJECT, "CAS base is object");
2702 assert(sig->type_at(1)->basic_type() == T_LONG, "CAS offset is long");
2703 #endif // ASSERT
2704 break;
2705 }
2706 default:
2707 ShouldNotReachHere();
2708 }
2709 }
2710 #endif //PRODUCT
2711
2712 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
2713
2714 // Get arguments:
2715 Node* receiver = nullptr;
2716 Node* base = nullptr;
2717 Node* offset = nullptr;
2718 Node* oldval = nullptr;
2719 Node* newval = nullptr;
2720 switch(kind) {
2721 case LS_cmp_swap:
2722 case LS_cmp_swap_weak:
2723 case LS_cmp_exchange: {
2724 const bool two_slot_type = type2size[type] == 2;
2725 receiver = argument(0); // type: oop
2726 base = argument(1); // type: oop
2727 offset = argument(2); // type: long
2728 oldval = argument(4); // type: oop, int, or long
2729 newval = argument(two_slot_type ? 6 : 5); // type: oop, int, or long
2730 break;
2731 }
2732 case LS_get_add:
2733 case LS_get_set: {
2734 receiver = argument(0); // type: oop
2735 base = argument(1); // type: oop
2736 offset = argument(2); // type: long
2737 oldval = nullptr;
2738 newval = argument(4); // type: oop, int, or long
2739 break;
2740 }
2741 default:
2742 ShouldNotReachHere();
2743 }
2744
2745 // Build field offset expression.
2746 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2747 // to be plain byte offsets, which are also the same as those accepted
2748 // by oopDesc::field_addr.
2749 assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
2750 // 32-bit machines ignore the high half of long offsets
2751 offset = ConvL2X(offset);
2752 // Save state and restore on bailout
2753 SavedState old_state(this);
2754 Node* adr = make_unsafe_address(base, offset,type, false);
2755 const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2756
2757 Compile::AliasType* alias_type = C->alias_type(adr_type);
2758 BasicType bt = alias_type->basic_type();
2759 if (bt != T_ILLEGAL &&
2760 (is_reference_type(bt) != (type == T_OBJECT))) {
2761 // Don't intrinsify mismatched object accesses.
2762 return false;
2763 }
2764
2765 old_state.discard();
2766
2767 // For CAS, unlike inline_unsafe_access, there seems no point in
2768 // trying to refine types. Just use the coarse types here.
2769 assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2770 const Type *value_type = Type::get_const_basic_type(type);
2771
2772 switch (kind) {
2773 case LS_get_set:
2774 case LS_cmp_exchange: {
2775 if (type == T_OBJECT) {
2776 const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2777 if (tjp != nullptr) {
2778 value_type = tjp;
2779 }
2780 }
2781 break;
2782 }
2783 case LS_cmp_swap:
2784 case LS_cmp_swap_weak:
2785 case LS_get_add:
2786 break;
2787 default:
2788 ShouldNotReachHere();
2789 }
2790
2791 // Null check receiver.
2792 receiver = null_check(receiver);
2793 if (stopped()) {
2794 return true;
2795 }
2796
2797 int alias_idx = C->get_alias_index(adr_type);
2798
2799 if (is_reference_type(type)) {
2800 decorators |= IN_HEAP | ON_UNKNOWN_OOP_REF;
2801
2802 // Transformation of a value which could be null pointer (CastPP #null)
2803 // could be delayed during Parse (for example, in adjust_map_after_if()).
2804 // Execute transformation here to avoid barrier generation in such case.
2805 if (_gvn.type(newval) == TypePtr::NULL_PTR)
2806 newval = _gvn.makecon(TypePtr::NULL_PTR);
2807
2808 if (oldval != nullptr && _gvn.type(oldval) == TypePtr::NULL_PTR) {
2809 // Refine the value to a null constant, when it is known to be null
2810 oldval = _gvn.makecon(TypePtr::NULL_PTR);
2811 }
2812 }
2813
2814 Node* result = nullptr;
2815 switch (kind) {
2816 case LS_cmp_exchange: {
2817 result = access_atomic_cmpxchg_val_at(base, adr, adr_type, alias_idx,
2818 oldval, newval, value_type, type, decorators);
2819 break;
2820 }
2821 case LS_cmp_swap_weak:
2822 decorators |= C2_WEAK_CMPXCHG;
2823 case LS_cmp_swap: {
2824 result = access_atomic_cmpxchg_bool_at(base, adr, adr_type, alias_idx,
2825 oldval, newval, value_type, type, decorators);
2826 break;
2827 }
2828 case LS_get_set: {
2829 result = access_atomic_xchg_at(base, adr, adr_type, alias_idx,
2830 newval, value_type, type, decorators);
2831 break;
2832 }
2833 case LS_get_add: {
2834 result = access_atomic_add_at(base, adr, adr_type, alias_idx,
2835 newval, value_type, type, decorators);
2836 break;
2837 }
2838 default:
2839 ShouldNotReachHere();
2840 }
2841
2842 assert(type2size[result->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
2843 set_result(result);
2844 return true;
2845 }
2846
2847 bool LibraryCallKit::inline_unsafe_fence(vmIntrinsics::ID id) {
2848 // Regardless of form, don't allow previous ld/st to move down,
2849 // then issue acquire, release, or volatile mem_bar.
2850 insert_mem_bar(Op_MemBarCPUOrder);
2851 switch(id) {
2852 case vmIntrinsics::_loadFence:
2853 insert_mem_bar(Op_LoadFence);
2854 return true;
2855 case vmIntrinsics::_storeFence:
2856 insert_mem_bar(Op_StoreFence);
2857 return true;
2858 case vmIntrinsics::_storeStoreFence:
2859 insert_mem_bar(Op_StoreStoreFence);
2860 return true;
2861 case vmIntrinsics::_fullFence:
2862 insert_mem_bar(Op_MemBarFull);
2863 return true;
2864 default:
2865 fatal_unexpected_iid(id);
2866 return false;
2867 }
2868 }
2869
2870 bool LibraryCallKit::inline_onspinwait() {
2871 insert_mem_bar(Op_OnSpinWait);
2872 return true;
2873 }
2874
2875 bool LibraryCallKit::klass_needs_init_guard(Node* kls) {
2876 if (!kls->is_Con()) {
2877 return true;
2878 }
2879 const TypeInstKlassPtr* klsptr = kls->bottom_type()->isa_instklassptr();
2880 if (klsptr == nullptr) {
2881 return true;
2882 }
2883 ciInstanceKlass* ik = klsptr->instance_klass();
2884 // don't need a guard for a klass that is already initialized
2885 return !ik->is_initialized();
2886 }
2887
2888 //----------------------------inline_unsafe_writeback0-------------------------
2889 // public native void Unsafe.writeback0(long address)
2890 bool LibraryCallKit::inline_unsafe_writeback0() {
2891 if (!Matcher::has_match_rule(Op_CacheWB)) {
2892 return false;
2893 }
2894 #ifndef PRODUCT
2895 assert(Matcher::has_match_rule(Op_CacheWBPreSync), "found match rule for CacheWB but not CacheWBPreSync");
2896 assert(Matcher::has_match_rule(Op_CacheWBPostSync), "found match rule for CacheWB but not CacheWBPostSync");
2897 ciSignature* sig = callee()->signature();
2898 assert(sig->type_at(0)->basic_type() == T_LONG, "Unsafe_writeback0 address is long!");
2899 #endif
2900 null_check_receiver(); // null-check, then ignore
2901 Node *addr = argument(1);
2902 addr = new CastX2PNode(addr);
2903 addr = _gvn.transform(addr);
2904 Node *flush = new CacheWBNode(control(), memory(TypeRawPtr::BOTTOM), addr);
2905 flush = _gvn.transform(flush);
2906 set_memory(flush, TypeRawPtr::BOTTOM);
2907 return true;
2908 }
2909
2910 //----------------------------inline_unsafe_writeback0-------------------------
2911 // public native void Unsafe.writeback0(long address)
2912 bool LibraryCallKit::inline_unsafe_writebackSync0(bool is_pre) {
2913 if (is_pre && !Matcher::has_match_rule(Op_CacheWBPreSync)) {
2914 return false;
2915 }
2916 if (!is_pre && !Matcher::has_match_rule(Op_CacheWBPostSync)) {
2917 return false;
2918 }
2919 #ifndef PRODUCT
2920 assert(Matcher::has_match_rule(Op_CacheWB),
2921 (is_pre ? "found match rule for CacheWBPreSync but not CacheWB"
2922 : "found match rule for CacheWBPostSync but not CacheWB"));
2923
2924 #endif
2925 null_check_receiver(); // null-check, then ignore
2926 Node *sync;
2927 if (is_pre) {
2928 sync = new CacheWBPreSyncNode(control(), memory(TypeRawPtr::BOTTOM));
2929 } else {
2930 sync = new CacheWBPostSyncNode(control(), memory(TypeRawPtr::BOTTOM));
2931 }
2932 sync = _gvn.transform(sync);
2933 set_memory(sync, TypeRawPtr::BOTTOM);
2934 return true;
2935 }
2936
2937 //----------------------------inline_unsafe_allocate---------------------------
2938 // public native Object Unsafe.allocateInstance(Class<?> cls);
2939 bool LibraryCallKit::inline_unsafe_allocate() {
2940
2941 #if INCLUDE_JVMTI
2942 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
2943 return false;
2944 }
2945 #endif //INCLUDE_JVMTI
2946
2947 if (callee()->is_static()) return false; // caller must have the capability!
2948
2949 null_check_receiver(); // null-check, then ignore
2950 Node* cls = null_check(argument(1));
2951 if (stopped()) return true;
2952
2953 Node* kls = load_klass_from_mirror(cls, false, nullptr, 0);
2954 kls = null_check(kls);
2955 if (stopped()) return true; // argument was like int.class
2956
2957 #if INCLUDE_JVMTI
2958 // Don't try to access new allocated obj in the intrinsic.
2959 // It causes perfomance issues even when jvmti event VmObjectAlloc is disabled.
2960 // Deoptimize and allocate in interpreter instead.
2961 Node* addr = makecon(TypeRawPtr::make((address) &JvmtiExport::_should_notify_object_alloc));
2962 Node* should_post_vm_object_alloc = make_load(this->control(), addr, TypeInt::INT, T_INT, MemNode::unordered);
2963 Node* chk = _gvn.transform(new CmpINode(should_post_vm_object_alloc, intcon(0)));
2964 Node* tst = _gvn.transform(new BoolNode(chk, BoolTest::eq));
2965 {
2966 BuildCutout unless(this, tst, PROB_MAX);
2967 uncommon_trap(Deoptimization::Reason_intrinsic,
2968 Deoptimization::Action_make_not_entrant);
2969 }
2970 if (stopped()) {
2971 return true;
2972 }
2973 #endif //INCLUDE_JVMTI
2974
2975 Node* test = nullptr;
2976 if (LibraryCallKit::klass_needs_init_guard(kls)) {
2977 // Note: The argument might still be an illegal value like
2978 // Serializable.class or Object[].class. The runtime will handle it.
2979 // But we must make an explicit check for initialization.
2980 Node* insp = basic_plus_adr(top(), kls, in_bytes(InstanceKlass::init_state_offset()));
2981 // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
2982 // can generate code to load it as unsigned byte.
2983 Node* inst = make_load(nullptr, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::acquire);
2984 Node* bits = intcon(InstanceKlass::fully_initialized);
2985 test = _gvn.transform(new SubINode(inst, bits));
2986 // The 'test' is non-zero if we need to take a slow path.
2987 }
2988
2989 Node* obj = new_instance(kls, test);
2990 set_result(obj);
2991 return true;
2992 }
2993
2994 //------------------------inline_native_time_funcs--------------
2995 // inline code for System.currentTimeMillis() and System.nanoTime()
2996 // these have the same type and signature
2997 bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
2998 const TypeFunc* tf = OptoRuntime::void_long_Type();
2999 const TypePtr* no_memory_effects = nullptr;
3000 Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
3001 Node* value = _gvn.transform(new ProjNode(time, TypeFunc::Parms+0));
3002 #ifdef ASSERT
3003 Node* value_top = _gvn.transform(new ProjNode(time, TypeFunc::Parms+1));
3004 assert(value_top == top(), "second value must be top");
3005 #endif
3006 set_result(value);
3007 return true;
3008 }
3009
3010 //--------------------inline_native_vthread_start_transition--------------------
3011 // inline void startTransition(boolean is_mount);
3012 // inline void startFinalTransition();
3013 // Pseudocode of implementation:
3014 //
3015 // java_lang_Thread::set_is_in_vthread_transition(vthread, true);
3016 // carrier->set_is_in_vthread_transition(true);
3017 // OrderAccess::storeload();
3018 // int disable_requests = java_lang_Thread::vthread_transition_disable_count(vthread)
3019 // + global_vthread_transition_disable_count();
3020 // if (disable_requests > 0) {
3021 // slow path: runtime call
3022 // }
3023 bool LibraryCallKit::inline_native_vthread_start_transition(address funcAddr, const char* funcName, bool is_final_transition) {
3024 Node* vt_oop = _gvn.transform(must_be_not_null(argument(0), true)); // VirtualThread this argument
3025 IdealKit ideal(this);
3026
3027 Node* thread = ideal.thread();
3028 Node* jt_addr = basic_plus_adr(top(), thread, in_bytes(JavaThread::is_in_vthread_transition_offset()));
3029 Node* vt_addr = basic_plus_adr(vt_oop, java_lang_Thread::is_in_vthread_transition_offset());
3030 access_store_at(nullptr, jt_addr, _gvn.type(jt_addr)->is_ptr(), ideal.ConI(1), TypeInt::BOOL, T_BOOLEAN, IN_NATIVE | MO_UNORDERED);
3031 access_store_at(nullptr, vt_addr, _gvn.type(vt_addr)->is_ptr(), ideal.ConI(1), TypeInt::BOOL, T_BOOLEAN, IN_NATIVE | MO_UNORDERED);
3032 insert_mem_bar(Op_MemBarStoreLoad);
3033 ideal.sync_kit(this);
3034
3035 Node* global_disable_addr = makecon(TypeRawPtr::make((address)MountUnmountDisabler::global_vthread_transition_disable_count_address()));
3036 Node* global_disable = ideal.load(ideal.ctrl(), global_disable_addr, TypeInt::INT, T_INT, Compile::AliasIdxRaw, true /*require_atomic_access*/);
3037 Node* vt_disable_addr = basic_plus_adr(vt_oop, java_lang_Thread::vthread_transition_disable_count_offset());
3038 const TypePtr* vt_disable_addr_t = _gvn.type(vt_disable_addr)->is_ptr();
3039 Node* vt_disable = ideal.load(ideal.ctrl(), vt_disable_addr, TypeInt::INT, T_INT, C->get_alias_index(vt_disable_addr_t), true /*require_atomic_access*/);
3040 Node* disabled = _gvn.transform(new AddINode(global_disable, vt_disable));
3041
3042 ideal.if_then(disabled, BoolTest::ne, ideal.ConI(0)); {
3043 sync_kit(ideal);
3044 Node* is_mount = is_final_transition ? ideal.ConI(0) : _gvn.transform(argument(1));
3045 const TypeFunc* tf = OptoRuntime::vthread_transition_Type();
3046 make_runtime_call(RC_NO_LEAF, tf, funcAddr, funcName, TypePtr::BOTTOM, vt_oop, is_mount);
3047 ideal.sync_kit(this);
3048 }
3049 ideal.end_if();
3050
3051 final_sync(ideal);
3052 return true;
3053 }
3054
3055 bool LibraryCallKit::inline_native_vthread_end_transition(address funcAddr, const char* funcName, bool is_first_transition) {
3056 Node* vt_oop = _gvn.transform(must_be_not_null(argument(0), true)); // VirtualThread this argument
3057 IdealKit ideal(this);
3058
3059 Node* _notify_jvmti_addr = makecon(TypeRawPtr::make((address)MountUnmountDisabler::notify_jvmti_events_address()));
3060 Node* _notify_jvmti = ideal.load(ideal.ctrl(), _notify_jvmti_addr, TypeInt::BOOL, T_BOOLEAN, Compile::AliasIdxRaw);
3061
3062 ideal.if_then(_notify_jvmti, BoolTest::eq, ideal.ConI(1)); {
3063 sync_kit(ideal);
3064 Node* is_mount = is_first_transition ? ideal.ConI(1) : _gvn.transform(argument(1));
3065 const TypeFunc* tf = OptoRuntime::vthread_transition_Type();
3066 make_runtime_call(RC_NO_LEAF, tf, funcAddr, funcName, TypePtr::BOTTOM, vt_oop, is_mount);
3067 ideal.sync_kit(this);
3068 } ideal.else_(); {
3069 Node* thread = ideal.thread();
3070 Node* jt_addr = basic_plus_adr(top(), thread, in_bytes(JavaThread::is_in_vthread_transition_offset()));
3071 Node* vt_addr = basic_plus_adr(vt_oop, java_lang_Thread::is_in_vthread_transition_offset());
3072
3073 sync_kit(ideal);
3074 access_store_at(nullptr, jt_addr, _gvn.type(jt_addr)->is_ptr(), ideal.ConI(0), TypeInt::BOOL, T_BOOLEAN, IN_NATIVE | MO_UNORDERED);
3075 access_store_at(nullptr, vt_addr, _gvn.type(vt_addr)->is_ptr(), ideal.ConI(0), TypeInt::BOOL, T_BOOLEAN, IN_NATIVE | MO_UNORDERED);
3076 ideal.sync_kit(this);
3077 } ideal.end_if();
3078
3079 final_sync(ideal);
3080 return true;
3081 }
3082
3083 #if INCLUDE_JVMTI
3084
3085 // Always update the is_disable_suspend bit.
3086 bool LibraryCallKit::inline_native_notify_jvmti_sync() {
3087 if (!DoJVMTIVirtualThreadTransitions) {
3088 return true;
3089 }
3090 IdealKit ideal(this);
3091
3092 {
3093 // unconditionally update the is_disable_suspend bit in current JavaThread
3094 Node* thread = ideal.thread();
3095 Node* arg = _gvn.transform(argument(0)); // argument for notification
3096 Node* addr = basic_plus_adr(top(), thread, in_bytes(JavaThread::is_disable_suspend_offset()));
3097 const TypePtr *addr_type = _gvn.type(addr)->isa_ptr();
3098
3099 sync_kit(ideal);
3100 access_store_at(nullptr, addr, addr_type, arg, _gvn.type(arg), T_BOOLEAN, IN_NATIVE | MO_UNORDERED);
3101 ideal.sync_kit(this);
3102 }
3103 final_sync(ideal);
3104
3105 return true;
3106 }
3107
3108 #endif // INCLUDE_JVMTI
3109
3110 #ifdef JFR_HAVE_INTRINSICS
3111
3112 /**
3113 * if oop->klass != null
3114 * // normal class
3115 * epoch = _epoch_state ? 2 : 1
3116 * if oop->klass->trace_id & ((epoch << META_SHIFT) | epoch)) != epoch {
3117 * ... // enter slow path when the klass is first recorded or the epoch of JFR shifts
3118 * }
3119 * id = oop->klass->trace_id >> TRACE_ID_SHIFT // normal class path
3120 * else
3121 * // primitive class
3122 * if oop->array_klass != null
3123 * id = (oop->array_klass->trace_id >> TRACE_ID_SHIFT) + 1 // primitive class path
3124 * else
3125 * id = LAST_TYPE_ID + 1 // void class path
3126 * if (!signaled)
3127 * signaled = true
3128 */
3129 bool LibraryCallKit::inline_native_classID() {
3130 Node* cls = argument(0);
3131
3132 IdealKit ideal(this);
3133 #define __ ideal.
3134 IdealVariable result(ideal); __ declarations_done();
3135 Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, immutable_memory(),
3136 basic_plus_adr(cls, java_lang_Class::klass_offset()),
3137 TypeRawPtr::BOTTOM, TypeInstKlassPtr::OBJECT_OR_NULL));
3138
3139
3140 __ if_then(kls, BoolTest::ne, null()); {
3141 Node* kls_trace_id_addr = basic_plus_adr(kls, in_bytes(KLASS_TRACE_ID_OFFSET));
3142 Node* kls_trace_id_raw = ideal.load(ideal.ctrl(), kls_trace_id_addr,TypeLong::LONG, T_LONG, Compile::AliasIdxRaw);
3143
3144 Node* epoch_address = makecon(TypeRawPtr::make(JfrIntrinsicSupport::epoch_address()));
3145 Node* epoch = ideal.load(ideal.ctrl(), epoch_address, TypeInt::BOOL, T_BOOLEAN, Compile::AliasIdxRaw);
3146 epoch = _gvn.transform(new LShiftLNode(longcon(1), epoch));
3147 Node* mask = _gvn.transform(new LShiftLNode(epoch, intcon(META_SHIFT)));
3148 mask = _gvn.transform(new OrLNode(mask, epoch));
3149 Node* kls_trace_id_raw_and_mask = _gvn.transform(new AndLNode(kls_trace_id_raw, mask));
3150
3151 float unlikely = PROB_UNLIKELY(0.999);
3152 __ if_then(kls_trace_id_raw_and_mask, BoolTest::ne, epoch, unlikely); {
3153 sync_kit(ideal);
3154 make_runtime_call(RC_LEAF,
3155 OptoRuntime::class_id_load_barrier_Type(),
3156 CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::load_barrier),
3157 "class id load barrier",
3158 TypePtr::BOTTOM,
3159 kls);
3160 ideal.sync_kit(this);
3161 } __ end_if();
3162
3163 ideal.set(result, _gvn.transform(new URShiftLNode(kls_trace_id_raw, ideal.ConI(TRACE_ID_SHIFT))));
3164 } __ else_(); {
3165 Node* array_kls = _gvn.transform(LoadKlassNode::make(_gvn, immutable_memory(),
3166 basic_plus_adr(cls, java_lang_Class::array_klass_offset()),
3167 TypeRawPtr::BOTTOM, TypeInstKlassPtr::OBJECT_OR_NULL));
3168 __ if_then(array_kls, BoolTest::ne, null()); {
3169 Node* array_kls_trace_id_addr = basic_plus_adr(array_kls, in_bytes(KLASS_TRACE_ID_OFFSET));
3170 Node* array_kls_trace_id_raw = ideal.load(ideal.ctrl(), array_kls_trace_id_addr, TypeLong::LONG, T_LONG, Compile::AliasIdxRaw);
3171 Node* array_kls_trace_id = _gvn.transform(new URShiftLNode(array_kls_trace_id_raw, ideal.ConI(TRACE_ID_SHIFT)));
3172 ideal.set(result, _gvn.transform(new AddLNode(array_kls_trace_id, longcon(1))));
3173 } __ else_(); {
3174 // void class case
3175 ideal.set(result, _gvn.transform(longcon(LAST_TYPE_ID + 1)));
3176 } __ end_if();
3177
3178 Node* signaled_flag_address = makecon(TypeRawPtr::make(JfrIntrinsicSupport::signal_address()));
3179 Node* signaled = ideal.load(ideal.ctrl(), signaled_flag_address, TypeInt::BOOL, T_BOOLEAN, Compile::AliasIdxRaw, true, MemNode::acquire);
3180 __ if_then(signaled, BoolTest::ne, ideal.ConI(1)); {
3181 ideal.store(ideal.ctrl(), signaled_flag_address, ideal.ConI(1), T_BOOLEAN, Compile::AliasIdxRaw, MemNode::release, true);
3182 } __ end_if();
3183 } __ end_if();
3184
3185 final_sync(ideal);
3186 set_result(ideal.value(result));
3187 #undef __
3188 return true;
3189 }
3190
3191 //------------------------inline_native_jvm_commit------------------
3192 bool LibraryCallKit::inline_native_jvm_commit() {
3193 enum { _true_path = 1, _false_path = 2, PATH_LIMIT };
3194
3195 // Save input memory and i_o state.
3196 Node* input_memory_state = reset_memory();
3197 set_all_memory(input_memory_state);
3198 Node* input_io_state = i_o();
3199
3200 // TLS.
3201 Node* tls_ptr = _gvn.transform(new ThreadLocalNode());
3202 // Jfr java buffer.
3203 Node* java_buffer_offset = _gvn.transform(new AddPNode(top(), tls_ptr, _gvn.transform(MakeConX(in_bytes(JAVA_BUFFER_OFFSET_JFR)))));
3204 Node* java_buffer = _gvn.transform(new LoadPNode(control(), input_memory_state, java_buffer_offset, TypePtr::BOTTOM, TypeRawPtr::NOTNULL, MemNode::unordered));
3205 Node* java_buffer_pos_offset = _gvn.transform(new AddPNode(top(), java_buffer, _gvn.transform(MakeConX(in_bytes(JFR_BUFFER_POS_OFFSET)))));
3206
3207 // Load the current value of the notified field in the JfrThreadLocal.
3208 Node* notified_offset = basic_plus_adr(top(), tls_ptr, in_bytes(NOTIFY_OFFSET_JFR));
3209 Node* notified = make_load(control(), notified_offset, TypeInt::BOOL, T_BOOLEAN, MemNode::unordered);
3210
3211 // Test for notification.
3212 Node* notified_cmp = _gvn.transform(new CmpINode(notified, _gvn.intcon(1)));
3213 Node* test_notified = _gvn.transform(new BoolNode(notified_cmp, BoolTest::eq));
3214 IfNode* iff_notified = create_and_map_if(control(), test_notified, PROB_MIN, COUNT_UNKNOWN);
3215
3216 // True branch, is notified.
3217 Node* is_notified = _gvn.transform(new IfTrueNode(iff_notified));
3218 set_control(is_notified);
3219
3220 // Reset notified state.
3221 store_to_memory(control(), notified_offset, _gvn.intcon(0), T_BOOLEAN, MemNode::unordered);
3222 Node* notified_reset_memory = reset_memory();
3223
3224 // Iff notified, the return address of the commit method is the current position of the backing java buffer. This is used to reset the event writer.
3225 Node* current_pos_X = _gvn.transform(new LoadXNode(control(), input_memory_state, java_buffer_pos_offset, TypeRawPtr::NOTNULL, TypeX_X, MemNode::unordered));
3226 // Convert the machine-word to a long.
3227 Node* current_pos = _gvn.transform(ConvX2L(current_pos_X));
3228
3229 // False branch, not notified.
3230 Node* not_notified = _gvn.transform(new IfFalseNode(iff_notified));
3231 set_control(not_notified);
3232 set_all_memory(input_memory_state);
3233
3234 // Arg is the next position as a long.
3235 Node* arg = argument(0);
3236 // Convert long to machine-word.
3237 Node* next_pos_X = _gvn.transform(ConvL2X(arg));
3238
3239 // Store the next_position to the underlying jfr java buffer.
3240 store_to_memory(control(), java_buffer_pos_offset, next_pos_X, LP64_ONLY(T_LONG) NOT_LP64(T_INT), MemNode::release);
3241
3242 Node* commit_memory = reset_memory();
3243 set_all_memory(commit_memory);
3244
3245 // Now load the flags from off the java buffer and decide if the buffer is a lease. If so, it needs to be returned post-commit.
3246 Node* java_buffer_flags_offset = _gvn.transform(new AddPNode(top(), java_buffer, _gvn.transform(MakeConX(in_bytes(JFR_BUFFER_FLAGS_OFFSET)))));
3247 Node* flags = make_load(control(), java_buffer_flags_offset, TypeInt::UBYTE, T_BYTE, MemNode::unordered);
3248 Node* lease_constant = _gvn.transform(_gvn.intcon(4));
3249
3250 // And flags with lease constant.
3251 Node* lease = _gvn.transform(new AndINode(flags, lease_constant));
3252
3253 // Branch on lease to conditionalize returning the leased java buffer.
3254 Node* lease_cmp = _gvn.transform(new CmpINode(lease, lease_constant));
3255 Node* test_lease = _gvn.transform(new BoolNode(lease_cmp, BoolTest::eq));
3256 IfNode* iff_lease = create_and_map_if(control(), test_lease, PROB_MIN, COUNT_UNKNOWN);
3257
3258 // False branch, not a lease.
3259 Node* not_lease = _gvn.transform(new IfFalseNode(iff_lease));
3260
3261 // True branch, is lease.
3262 Node* is_lease = _gvn.transform(new IfTrueNode(iff_lease));
3263 set_control(is_lease);
3264
3265 // Make a runtime call, which can safepoint, to return the leased buffer. This updates both the JfrThreadLocal and the Java event writer oop.
3266 Node* call_return_lease = make_runtime_call(RC_NO_LEAF,
3267 OptoRuntime::void_void_Type(),
3268 SharedRuntime::jfr_return_lease(),
3269 "return_lease", TypePtr::BOTTOM);
3270 Node* call_return_lease_control = _gvn.transform(new ProjNode(call_return_lease, TypeFunc::Control));
3271
3272 RegionNode* lease_compare_rgn = new RegionNode(PATH_LIMIT);
3273 record_for_igvn(lease_compare_rgn);
3274 PhiNode* lease_compare_mem = new PhiNode(lease_compare_rgn, Type::MEMORY, TypePtr::BOTTOM);
3275 record_for_igvn(lease_compare_mem);
3276 PhiNode* lease_compare_io = new PhiNode(lease_compare_rgn, Type::ABIO);
3277 record_for_igvn(lease_compare_io);
3278 PhiNode* lease_result_value = new PhiNode(lease_compare_rgn, TypeLong::LONG);
3279 record_for_igvn(lease_result_value);
3280
3281 // Update control and phi nodes.
3282 lease_compare_rgn->init_req(_true_path, call_return_lease_control);
3283 lease_compare_rgn->init_req(_false_path, not_lease);
3284
3285 lease_compare_mem->init_req(_true_path, _gvn.transform(reset_memory()));
3286 lease_compare_mem->init_req(_false_path, commit_memory);
3287
3288 lease_compare_io->init_req(_true_path, i_o());
3289 lease_compare_io->init_req(_false_path, input_io_state);
3290
3291 lease_result_value->init_req(_true_path, _gvn.longcon(0)); // if the lease was returned, return 0L.
3292 lease_result_value->init_req(_false_path, arg); // if not lease, return new updated position.
3293
3294 RegionNode* result_rgn = new RegionNode(PATH_LIMIT);
3295 PhiNode* result_mem = new PhiNode(result_rgn, Type::MEMORY, TypePtr::BOTTOM);
3296 PhiNode* result_io = new PhiNode(result_rgn, Type::ABIO);
3297 PhiNode* result_value = new PhiNode(result_rgn, TypeLong::LONG);
3298
3299 // Update control and phi nodes.
3300 result_rgn->init_req(_true_path, is_notified);
3301 result_rgn->init_req(_false_path, _gvn.transform(lease_compare_rgn));
3302
3303 result_mem->init_req(_true_path, notified_reset_memory);
3304 result_mem->init_req(_false_path, _gvn.transform(lease_compare_mem));
3305
3306 result_io->init_req(_true_path, input_io_state);
3307 result_io->init_req(_false_path, _gvn.transform(lease_compare_io));
3308
3309 result_value->init_req(_true_path, current_pos);
3310 result_value->init_req(_false_path, _gvn.transform(lease_result_value));
3311
3312 // Set output state.
3313 set_control(_gvn.transform(result_rgn));
3314 set_all_memory(_gvn.transform(result_mem));
3315 set_i_o(_gvn.transform(result_io));
3316 set_result(result_rgn, result_value);
3317 return true;
3318 }
3319
3320 /*
3321 * The intrinsic is a model of this pseudo-code:
3322 *
3323 * JfrThreadLocal* const tl = Thread::jfr_thread_local()
3324 * jobject h_event_writer = tl->java_event_writer();
3325 * if (h_event_writer == nullptr) {
3326 * return nullptr;
3327 * }
3328 * oop threadObj = Thread::threadObj();
3329 * oop vthread = java_lang_Thread::vthread(threadObj);
3330 * traceid tid;
3331 * bool pinVirtualThread;
3332 * bool excluded;
3333 * if (vthread != threadObj) { // i.e. current thread is virtual
3334 * tid = java_lang_Thread::tid(vthread);
3335 * u2 vthread_epoch_raw = java_lang_Thread::jfr_epoch(vthread);
3336 * pinVirtualThread = VMContinuations;
3337 * excluded = vthread_epoch_raw & excluded_mask;
3338 * if (!excluded) {
3339 * traceid current_epoch = JfrTraceIdEpoch::current_generation();
3340 * u2 vthread_epoch = vthread_epoch_raw & epoch_mask;
3341 * if (vthread_epoch != current_epoch) {
3342 * write_checkpoint();
3343 * }
3344 * }
3345 * } else {
3346 * tid = java_lang_Thread::tid(threadObj);
3347 * u2 thread_epoch_raw = java_lang_Thread::jfr_epoch(threadObj);
3348 * pinVirtualThread = false;
3349 * excluded = thread_epoch_raw & excluded_mask;
3350 * }
3351 * oop event_writer = JNIHandles::resolve_non_null(h_event_writer);
3352 * traceid tid_in_event_writer = getField(event_writer, "threadID");
3353 * if (tid_in_event_writer != tid) {
3354 * setField(event_writer, "pinVirtualThread", pinVirtualThread);
3355 * setField(event_writer, "excluded", excluded);
3356 * setField(event_writer, "threadID", tid);
3357 * }
3358 * return event_writer
3359 */
3360 bool LibraryCallKit::inline_native_getEventWriter() {
3361 enum { _true_path = 1, _false_path = 2, PATH_LIMIT };
3362
3363 // Save input memory and i_o state.
3364 Node* input_memory_state = reset_memory();
3365 set_all_memory(input_memory_state);
3366 Node* input_io_state = i_o();
3367
3368 // The most significant bit of the u2 is used to denote thread exclusion
3369 Node* excluded_shift = _gvn.intcon(15);
3370 Node* excluded_mask = _gvn.intcon(1 << 15);
3371 // The epoch generation is the range [1-32767]
3372 Node* epoch_mask = _gvn.intcon(32767);
3373
3374 // TLS
3375 Node* tls_ptr = _gvn.transform(new ThreadLocalNode());
3376
3377 // Load the address of java event writer jobject handle from the jfr_thread_local structure.
3378 Node* jobj_ptr = basic_plus_adr(top(), tls_ptr, in_bytes(THREAD_LOCAL_WRITER_OFFSET_JFR));
3379
3380 // Load the eventwriter jobject handle.
3381 Node* jobj = make_load(control(), jobj_ptr, TypeRawPtr::BOTTOM, T_ADDRESS, MemNode::unordered);
3382
3383 // Null check the jobject handle.
3384 Node* jobj_cmp_null = _gvn.transform(new CmpPNode(jobj, null()));
3385 Node* test_jobj_not_equal_null = _gvn.transform(new BoolNode(jobj_cmp_null, BoolTest::ne));
3386 IfNode* iff_jobj_not_equal_null = create_and_map_if(control(), test_jobj_not_equal_null, PROB_MAX, COUNT_UNKNOWN);
3387
3388 // False path, jobj is null.
3389 Node* jobj_is_null = _gvn.transform(new IfFalseNode(iff_jobj_not_equal_null));
3390
3391 // True path, jobj is not null.
3392 Node* jobj_is_not_null = _gvn.transform(new IfTrueNode(iff_jobj_not_equal_null));
3393
3394 set_control(jobj_is_not_null);
3395
3396 // Load the threadObj for the CarrierThread.
3397 Node* threadObj = generate_current_thread(tls_ptr);
3398
3399 // Load the vthread.
3400 Node* vthread = generate_virtual_thread(tls_ptr);
3401
3402 // If vthread != threadObj, this is a virtual thread.
3403 Node* vthread_cmp_threadObj = _gvn.transform(new CmpPNode(vthread, threadObj));
3404 Node* test_vthread_not_equal_threadObj = _gvn.transform(new BoolNode(vthread_cmp_threadObj, BoolTest::ne));
3405 IfNode* iff_vthread_not_equal_threadObj =
3406 create_and_map_if(jobj_is_not_null, test_vthread_not_equal_threadObj, PROB_FAIR, COUNT_UNKNOWN);
3407
3408 // False branch, fallback to threadObj.
3409 Node* vthread_equal_threadObj = _gvn.transform(new IfFalseNode(iff_vthread_not_equal_threadObj));
3410 set_control(vthread_equal_threadObj);
3411
3412 // Load the tid field from the vthread object.
3413 Node* thread_obj_tid = load_field_from_object(threadObj, "tid", "J");
3414
3415 // Load the raw epoch value from the threadObj.
3416 Node* threadObj_epoch_offset = basic_plus_adr(threadObj, java_lang_Thread::jfr_epoch_offset());
3417 Node* threadObj_epoch_raw = access_load_at(threadObj, threadObj_epoch_offset,
3418 _gvn.type(threadObj_epoch_offset)->isa_ptr(),
3419 TypeInt::CHAR, T_CHAR,
3420 IN_HEAP | MO_UNORDERED | C2_MISMATCHED | C2_CONTROL_DEPENDENT_LOAD);
3421
3422 // Mask off the excluded information from the epoch.
3423 Node * threadObj_is_excluded = _gvn.transform(new AndINode(threadObj_epoch_raw, excluded_mask));
3424
3425 // True branch, this is a virtual thread.
3426 Node* vthread_not_equal_threadObj = _gvn.transform(new IfTrueNode(iff_vthread_not_equal_threadObj));
3427 set_control(vthread_not_equal_threadObj);
3428
3429 // Load the tid field from the vthread object.
3430 Node* vthread_tid = load_field_from_object(vthread, "tid", "J");
3431
3432 // Continuation support determines if a virtual thread should be pinned.
3433 Node* global_addr = makecon(TypeRawPtr::make((address)&VMContinuations));
3434 Node* continuation_support = make_load(control(), global_addr, TypeInt::BOOL, T_BOOLEAN, MemNode::unordered);
3435
3436 // Load the raw epoch value from the vthread.
3437 Node* vthread_epoch_offset = basic_plus_adr(vthread, java_lang_Thread::jfr_epoch_offset());
3438 Node* vthread_epoch_raw = access_load_at(vthread, vthread_epoch_offset, _gvn.type(vthread_epoch_offset)->is_ptr(),
3439 TypeInt::CHAR, T_CHAR,
3440 IN_HEAP | MO_UNORDERED | C2_MISMATCHED | C2_CONTROL_DEPENDENT_LOAD);
3441
3442 // Mask off the excluded information from the epoch.
3443 Node * vthread_is_excluded = _gvn.transform(new AndINode(vthread_epoch_raw, _gvn.transform(excluded_mask)));
3444
3445 // Branch on excluded to conditionalize updating the epoch for the virtual thread.
3446 Node* is_excluded_cmp = _gvn.transform(new CmpINode(vthread_is_excluded, _gvn.transform(excluded_mask)));
3447 Node* test_not_excluded = _gvn.transform(new BoolNode(is_excluded_cmp, BoolTest::ne));
3448 IfNode* iff_not_excluded = create_and_map_if(control(), test_not_excluded, PROB_MAX, COUNT_UNKNOWN);
3449
3450 // False branch, vthread is excluded, no need to write epoch info.
3451 Node* excluded = _gvn.transform(new IfFalseNode(iff_not_excluded));
3452
3453 // True branch, vthread is included, update epoch info.
3454 Node* included = _gvn.transform(new IfTrueNode(iff_not_excluded));
3455 set_control(included);
3456
3457 // Get epoch value.
3458 Node* epoch = _gvn.transform(new AndINode(vthread_epoch_raw, _gvn.transform(epoch_mask)));
3459
3460 // Load the current epoch generation. The value is unsigned 16-bit, so we type it as T_CHAR.
3461 Node* epoch_generation_address = makecon(TypeRawPtr::make(JfrIntrinsicSupport::epoch_generation_address()));
3462 Node* current_epoch_generation = make_load(control(), epoch_generation_address, TypeInt::CHAR, T_CHAR, MemNode::unordered);
3463
3464 // Compare the epoch in the vthread to the current epoch generation.
3465 Node* const epoch_cmp = _gvn.transform(new CmpUNode(current_epoch_generation, epoch));
3466 Node* test_epoch_not_equal = _gvn.transform(new BoolNode(epoch_cmp, BoolTest::ne));
3467 IfNode* iff_epoch_not_equal = create_and_map_if(control(), test_epoch_not_equal, PROB_FAIR, COUNT_UNKNOWN);
3468
3469 // False path, epoch is equal, checkpoint information is valid.
3470 Node* epoch_is_equal = _gvn.transform(new IfFalseNode(iff_epoch_not_equal));
3471
3472 // True path, epoch is not equal, write a checkpoint for the vthread.
3473 Node* epoch_is_not_equal = _gvn.transform(new IfTrueNode(iff_epoch_not_equal));
3474
3475 set_control(epoch_is_not_equal);
3476
3477 // Make a runtime call, which can safepoint, to write a checkpoint for the vthread for this epoch.
3478 // The call also updates the native thread local thread id and the vthread with the current epoch.
3479 Node* call_write_checkpoint = make_runtime_call(RC_NO_LEAF,
3480 OptoRuntime::jfr_write_checkpoint_Type(),
3481 SharedRuntime::jfr_write_checkpoint(),
3482 "write_checkpoint", TypePtr::BOTTOM);
3483 Node* call_write_checkpoint_control = _gvn.transform(new ProjNode(call_write_checkpoint, TypeFunc::Control));
3484
3485 // vthread epoch != current epoch
3486 RegionNode* epoch_compare_rgn = new RegionNode(PATH_LIMIT);
3487 record_for_igvn(epoch_compare_rgn);
3488 PhiNode* epoch_compare_mem = new PhiNode(epoch_compare_rgn, Type::MEMORY, TypePtr::BOTTOM);
3489 record_for_igvn(epoch_compare_mem);
3490 PhiNode* epoch_compare_io = new PhiNode(epoch_compare_rgn, Type::ABIO);
3491 record_for_igvn(epoch_compare_io);
3492
3493 // Update control and phi nodes.
3494 epoch_compare_rgn->init_req(_true_path, call_write_checkpoint_control);
3495 epoch_compare_rgn->init_req(_false_path, epoch_is_equal);
3496 epoch_compare_mem->init_req(_true_path, _gvn.transform(reset_memory()));
3497 epoch_compare_mem->init_req(_false_path, input_memory_state);
3498 epoch_compare_io->init_req(_true_path, i_o());
3499 epoch_compare_io->init_req(_false_path, input_io_state);
3500
3501 // excluded != true
3502 RegionNode* exclude_compare_rgn = new RegionNode(PATH_LIMIT);
3503 record_for_igvn(exclude_compare_rgn);
3504 PhiNode* exclude_compare_mem = new PhiNode(exclude_compare_rgn, Type::MEMORY, TypePtr::BOTTOM);
3505 record_for_igvn(exclude_compare_mem);
3506 PhiNode* exclude_compare_io = new PhiNode(exclude_compare_rgn, Type::ABIO);
3507 record_for_igvn(exclude_compare_io);
3508
3509 // Update control and phi nodes.
3510 exclude_compare_rgn->init_req(_true_path, _gvn.transform(epoch_compare_rgn));
3511 exclude_compare_rgn->init_req(_false_path, excluded);
3512 exclude_compare_mem->init_req(_true_path, _gvn.transform(epoch_compare_mem));
3513 exclude_compare_mem->init_req(_false_path, input_memory_state);
3514 exclude_compare_io->init_req(_true_path, _gvn.transform(epoch_compare_io));
3515 exclude_compare_io->init_req(_false_path, input_io_state);
3516
3517 // vthread != threadObj
3518 RegionNode* vthread_compare_rgn = new RegionNode(PATH_LIMIT);
3519 record_for_igvn(vthread_compare_rgn);
3520 PhiNode* vthread_compare_mem = new PhiNode(vthread_compare_rgn, Type::MEMORY, TypePtr::BOTTOM);
3521 PhiNode* vthread_compare_io = new PhiNode(vthread_compare_rgn, Type::ABIO);
3522 record_for_igvn(vthread_compare_io);
3523 PhiNode* tid = new PhiNode(vthread_compare_rgn, TypeLong::LONG);
3524 record_for_igvn(tid);
3525 PhiNode* exclusion = new PhiNode(vthread_compare_rgn, TypeInt::CHAR);
3526 record_for_igvn(exclusion);
3527 PhiNode* pinVirtualThread = new PhiNode(vthread_compare_rgn, TypeInt::BOOL);
3528 record_for_igvn(pinVirtualThread);
3529
3530 // Update control and phi nodes.
3531 vthread_compare_rgn->init_req(_true_path, _gvn.transform(exclude_compare_rgn));
3532 vthread_compare_rgn->init_req(_false_path, vthread_equal_threadObj);
3533 vthread_compare_mem->init_req(_true_path, _gvn.transform(exclude_compare_mem));
3534 vthread_compare_mem->init_req(_false_path, input_memory_state);
3535 vthread_compare_io->init_req(_true_path, _gvn.transform(exclude_compare_io));
3536 vthread_compare_io->init_req(_false_path, input_io_state);
3537 tid->init_req(_true_path, _gvn.transform(vthread_tid));
3538 tid->init_req(_false_path, _gvn.transform(thread_obj_tid));
3539 exclusion->init_req(_true_path, _gvn.transform(vthread_is_excluded));
3540 exclusion->init_req(_false_path, _gvn.transform(threadObj_is_excluded));
3541 pinVirtualThread->init_req(_true_path, _gvn.transform(continuation_support));
3542 pinVirtualThread->init_req(_false_path, _gvn.intcon(0));
3543
3544 // Update branch state.
3545 set_control(_gvn.transform(vthread_compare_rgn));
3546 set_all_memory(_gvn.transform(vthread_compare_mem));
3547 set_i_o(_gvn.transform(vthread_compare_io));
3548
3549 // Load the event writer oop by dereferencing the jobject handle.
3550 ciKlass* klass_EventWriter = env()->find_system_klass(ciSymbol::make("jdk/jfr/internal/event/EventWriter"));
3551 assert(klass_EventWriter->is_loaded(), "invariant");
3552 ciInstanceKlass* const instklass_EventWriter = klass_EventWriter->as_instance_klass();
3553 const TypeKlassPtr* const aklass = TypeKlassPtr::make(instklass_EventWriter);
3554 const TypeOopPtr* const xtype = aklass->as_instance_type();
3555 Node* jobj_untagged = _gvn.transform(new AddPNode(top(), jobj, _gvn.MakeConX(-JNIHandles::TypeTag::global)));
3556 Node* event_writer = access_load(jobj_untagged, xtype, T_OBJECT, IN_NATIVE | C2_CONTROL_DEPENDENT_LOAD);
3557
3558 // Load the current thread id from the event writer object.
3559 Node* const event_writer_tid = load_field_from_object(event_writer, "threadID", "J");
3560 // Get the field offset to, conditionally, store an updated tid value later.
3561 Node* const event_writer_tid_field = field_address_from_object(event_writer, "threadID", "J", false);
3562 // Get the field offset to, conditionally, store an updated exclusion value later.
3563 Node* const event_writer_excluded_field = field_address_from_object(event_writer, "excluded", "Z", false);
3564 // Get the field offset to, conditionally, store an updated pinVirtualThread value later.
3565 Node* const event_writer_pin_field = field_address_from_object(event_writer, "pinVirtualThread", "Z", false);
3566
3567 RegionNode* event_writer_tid_compare_rgn = new RegionNode(PATH_LIMIT);
3568 record_for_igvn(event_writer_tid_compare_rgn);
3569 PhiNode* event_writer_tid_compare_mem = new PhiNode(event_writer_tid_compare_rgn, Type::MEMORY, TypePtr::BOTTOM);
3570 record_for_igvn(event_writer_tid_compare_mem);
3571 PhiNode* event_writer_tid_compare_io = new PhiNode(event_writer_tid_compare_rgn, Type::ABIO);
3572 record_for_igvn(event_writer_tid_compare_io);
3573
3574 // Compare the current tid from the thread object to what is currently stored in the event writer object.
3575 Node* const tid_cmp = _gvn.transform(new CmpLNode(event_writer_tid, _gvn.transform(tid)));
3576 Node* test_tid_not_equal = _gvn.transform(new BoolNode(tid_cmp, BoolTest::ne));
3577 IfNode* iff_tid_not_equal = create_and_map_if(_gvn.transform(vthread_compare_rgn), test_tid_not_equal, PROB_FAIR, COUNT_UNKNOWN);
3578
3579 // False path, tids are the same.
3580 Node* tid_is_equal = _gvn.transform(new IfFalseNode(iff_tid_not_equal));
3581
3582 // True path, tid is not equal, need to update the tid in the event writer.
3583 Node* tid_is_not_equal = _gvn.transform(new IfTrueNode(iff_tid_not_equal));
3584 record_for_igvn(tid_is_not_equal);
3585
3586 // Store the pin state to the event writer.
3587 store_to_memory(tid_is_not_equal, event_writer_pin_field, _gvn.transform(pinVirtualThread), T_BOOLEAN, MemNode::unordered);
3588
3589 // Store the exclusion state to the event writer.
3590 Node* excluded_bool = _gvn.transform(new URShiftINode(_gvn.transform(exclusion), excluded_shift));
3591 store_to_memory(tid_is_not_equal, event_writer_excluded_field, excluded_bool, T_BOOLEAN, MemNode::unordered);
3592
3593 // Store the tid to the event writer.
3594 store_to_memory(tid_is_not_equal, event_writer_tid_field, tid, T_LONG, MemNode::unordered);
3595
3596 // Update control and phi nodes.
3597 event_writer_tid_compare_rgn->init_req(_true_path, tid_is_not_equal);
3598 event_writer_tid_compare_rgn->init_req(_false_path, tid_is_equal);
3599 event_writer_tid_compare_mem->init_req(_true_path, _gvn.transform(reset_memory()));
3600 event_writer_tid_compare_mem->init_req(_false_path, _gvn.transform(vthread_compare_mem));
3601 event_writer_tid_compare_io->init_req(_true_path, _gvn.transform(i_o()));
3602 event_writer_tid_compare_io->init_req(_false_path, _gvn.transform(vthread_compare_io));
3603
3604 // Result of top level CFG, Memory, IO and Value.
3605 RegionNode* result_rgn = new RegionNode(PATH_LIMIT);
3606 PhiNode* result_mem = new PhiNode(result_rgn, Type::MEMORY, TypePtr::BOTTOM);
3607 PhiNode* result_io = new PhiNode(result_rgn, Type::ABIO);
3608 PhiNode* result_value = new PhiNode(result_rgn, TypeInstPtr::BOTTOM);
3609
3610 // Result control.
3611 result_rgn->init_req(_true_path, _gvn.transform(event_writer_tid_compare_rgn));
3612 result_rgn->init_req(_false_path, jobj_is_null);
3613
3614 // Result memory.
3615 result_mem->init_req(_true_path, _gvn.transform(event_writer_tid_compare_mem));
3616 result_mem->init_req(_false_path, _gvn.transform(input_memory_state));
3617
3618 // Result IO.
3619 result_io->init_req(_true_path, _gvn.transform(event_writer_tid_compare_io));
3620 result_io->init_req(_false_path, _gvn.transform(input_io_state));
3621
3622 // Result value.
3623 result_value->init_req(_true_path, _gvn.transform(event_writer)); // return event writer oop
3624 result_value->init_req(_false_path, null()); // return null
3625
3626 // Set output state.
3627 set_control(_gvn.transform(result_rgn));
3628 set_all_memory(_gvn.transform(result_mem));
3629 set_i_o(_gvn.transform(result_io));
3630 set_result(result_rgn, result_value);
3631 return true;
3632 }
3633
3634 /*
3635 * The intrinsic is a model of this pseudo-code:
3636 *
3637 * JfrThreadLocal* const tl = thread->jfr_thread_local();
3638 * if (carrierThread != thread) { // is virtual thread
3639 * const u2 vthread_epoch_raw = java_lang_Thread::jfr_epoch(thread);
3640 * bool excluded = vthread_epoch_raw & excluded_mask;
3641 * AtomicAccess::store(&tl->_contextual_tid, java_lang_Thread::tid(thread));
3642 * AtomicAccess::store(&tl->_contextual_thread_excluded, is_excluded);
3643 * if (!excluded) {
3644 * const u2 vthread_epoch = vthread_epoch_raw & epoch_mask;
3645 * AtomicAccess::store(&tl->_vthread_epoch, vthread_epoch);
3646 * }
3647 * AtomicAccess::release_store(&tl->_vthread, true);
3648 * return;
3649 * }
3650 * AtomicAccess::release_store(&tl->_vthread, false);
3651 */
3652 void LibraryCallKit::extend_setCurrentThread(Node* jt, Node* thread) {
3653 enum { _true_path = 1, _false_path = 2, PATH_LIMIT };
3654
3655 Node* input_memory_state = reset_memory();
3656 set_all_memory(input_memory_state);
3657
3658 // The most significant bit of the u2 is used to denote thread exclusion
3659 Node* excluded_mask = _gvn.intcon(1 << 15);
3660 // The epoch generation is the range [1-32767]
3661 Node* epoch_mask = _gvn.intcon(32767);
3662
3663 Node* const carrierThread = generate_current_thread(jt);
3664 // If thread != carrierThread, this is a virtual thread.
3665 Node* thread_cmp_carrierThread = _gvn.transform(new CmpPNode(thread, carrierThread));
3666 Node* test_thread_not_equal_carrierThread = _gvn.transform(new BoolNode(thread_cmp_carrierThread, BoolTest::ne));
3667 IfNode* iff_thread_not_equal_carrierThread =
3668 create_and_map_if(control(), test_thread_not_equal_carrierThread, PROB_FAIR, COUNT_UNKNOWN);
3669
3670 Node* vthread_offset = basic_plus_adr(top(), jt, in_bytes(THREAD_LOCAL_OFFSET_JFR + VTHREAD_OFFSET_JFR));
3671
3672 // False branch, is carrierThread.
3673 Node* thread_equal_carrierThread = _gvn.transform(new IfFalseNode(iff_thread_not_equal_carrierThread));
3674 // Store release
3675 Node* vthread_false_memory = store_to_memory(thread_equal_carrierThread, vthread_offset, _gvn.intcon(0), T_BOOLEAN, MemNode::release, true);
3676
3677 set_all_memory(input_memory_state);
3678
3679 // True branch, is virtual thread.
3680 Node* thread_not_equal_carrierThread = _gvn.transform(new IfTrueNode(iff_thread_not_equal_carrierThread));
3681 set_control(thread_not_equal_carrierThread);
3682
3683 // Load the raw epoch value from the vthread.
3684 Node* epoch_offset = basic_plus_adr(thread, java_lang_Thread::jfr_epoch_offset());
3685 Node* epoch_raw = access_load_at(thread, epoch_offset, _gvn.type(epoch_offset)->is_ptr(), TypeInt::CHAR, T_CHAR,
3686 IN_HEAP | MO_UNORDERED | C2_MISMATCHED | C2_CONTROL_DEPENDENT_LOAD);
3687
3688 // Mask off the excluded information from the epoch.
3689 Node * const is_excluded = _gvn.transform(new AndINode(epoch_raw, _gvn.transform(excluded_mask)));
3690
3691 // Load the tid field from the thread.
3692 Node* tid = load_field_from_object(thread, "tid", "J");
3693
3694 // Store the vthread tid to the jfr thread local.
3695 Node* thread_id_offset = basic_plus_adr(top(), jt, in_bytes(THREAD_LOCAL_OFFSET_JFR + VTHREAD_ID_OFFSET_JFR));
3696 Node* tid_memory = store_to_memory(control(), thread_id_offset, tid, T_LONG, MemNode::unordered, true);
3697
3698 // Branch is_excluded to conditionalize updating the epoch .
3699 Node* excluded_cmp = _gvn.transform(new CmpINode(is_excluded, _gvn.transform(excluded_mask)));
3700 Node* test_excluded = _gvn.transform(new BoolNode(excluded_cmp, BoolTest::eq));
3701 IfNode* iff_excluded = create_and_map_if(control(), test_excluded, PROB_MIN, COUNT_UNKNOWN);
3702
3703 // True branch, vthread is excluded, no need to write epoch info.
3704 Node* excluded = _gvn.transform(new IfTrueNode(iff_excluded));
3705 set_control(excluded);
3706 Node* vthread_is_excluded = _gvn.intcon(1);
3707
3708 // False branch, vthread is included, update epoch info.
3709 Node* included = _gvn.transform(new IfFalseNode(iff_excluded));
3710 set_control(included);
3711 Node* vthread_is_included = _gvn.intcon(0);
3712
3713 // Get epoch value.
3714 Node* epoch = _gvn.transform(new AndINode(epoch_raw, _gvn.transform(epoch_mask)));
3715
3716 // Store the vthread epoch to the jfr thread local.
3717 Node* vthread_epoch_offset = basic_plus_adr(top(), jt, in_bytes(THREAD_LOCAL_OFFSET_JFR + VTHREAD_EPOCH_OFFSET_JFR));
3718 Node* included_memory = store_to_memory(control(), vthread_epoch_offset, epoch, T_CHAR, MemNode::unordered, true);
3719
3720 RegionNode* excluded_rgn = new RegionNode(PATH_LIMIT);
3721 record_for_igvn(excluded_rgn);
3722 PhiNode* excluded_mem = new PhiNode(excluded_rgn, Type::MEMORY, TypePtr::BOTTOM);
3723 record_for_igvn(excluded_mem);
3724 PhiNode* exclusion = new PhiNode(excluded_rgn, TypeInt::BOOL);
3725 record_for_igvn(exclusion);
3726
3727 // Merge the excluded control and memory.
3728 excluded_rgn->init_req(_true_path, excluded);
3729 excluded_rgn->init_req(_false_path, included);
3730 excluded_mem->init_req(_true_path, tid_memory);
3731 excluded_mem->init_req(_false_path, included_memory);
3732 exclusion->init_req(_true_path, _gvn.transform(vthread_is_excluded));
3733 exclusion->init_req(_false_path, _gvn.transform(vthread_is_included));
3734
3735 // Set intermediate state.
3736 set_control(_gvn.transform(excluded_rgn));
3737 set_all_memory(excluded_mem);
3738
3739 // Store the vthread exclusion state to the jfr thread local.
3740 Node* thread_local_excluded_offset = basic_plus_adr(top(), jt, in_bytes(THREAD_LOCAL_OFFSET_JFR + VTHREAD_EXCLUDED_OFFSET_JFR));
3741 store_to_memory(control(), thread_local_excluded_offset, _gvn.transform(exclusion), T_BOOLEAN, MemNode::unordered, true);
3742
3743 // Store release
3744 Node * vthread_true_memory = store_to_memory(control(), vthread_offset, _gvn.intcon(1), T_BOOLEAN, MemNode::release, true);
3745
3746 RegionNode* thread_compare_rgn = new RegionNode(PATH_LIMIT);
3747 record_for_igvn(thread_compare_rgn);
3748 PhiNode* thread_compare_mem = new PhiNode(thread_compare_rgn, Type::MEMORY, TypePtr::BOTTOM);
3749 record_for_igvn(thread_compare_mem);
3750 PhiNode* vthread = new PhiNode(thread_compare_rgn, TypeInt::BOOL);
3751 record_for_igvn(vthread);
3752
3753 // Merge the thread_compare control and memory.
3754 thread_compare_rgn->init_req(_true_path, control());
3755 thread_compare_rgn->init_req(_false_path, thread_equal_carrierThread);
3756 thread_compare_mem->init_req(_true_path, vthread_true_memory);
3757 thread_compare_mem->init_req(_false_path, vthread_false_memory);
3758
3759 // Set output state.
3760 set_control(_gvn.transform(thread_compare_rgn));
3761 set_all_memory(_gvn.transform(thread_compare_mem));
3762 }
3763
3764 #endif // JFR_HAVE_INTRINSICS
3765
3766 //------------------------inline_native_currentCarrierThread------------------
3767 bool LibraryCallKit::inline_native_currentCarrierThread() {
3768 Node* junk = nullptr;
3769 set_result(generate_current_thread(junk));
3770 return true;
3771 }
3772
3773 //------------------------inline_native_currentThread------------------
3774 bool LibraryCallKit::inline_native_currentThread() {
3775 Node* junk = nullptr;
3776 set_result(generate_virtual_thread(junk));
3777 return true;
3778 }
3779
3780 //------------------------inline_native_setVthread------------------
3781 bool LibraryCallKit::inline_native_setCurrentThread() {
3782 assert(C->method()->changes_current_thread(),
3783 "method changes current Thread but is not annotated ChangesCurrentThread");
3784 Node* arr = argument(1);
3785 Node* thread = _gvn.transform(new ThreadLocalNode());
3786 Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::vthread_offset()));
3787 Node* thread_obj_handle
3788 = make_load(nullptr, p, p->bottom_type()->is_ptr(), T_OBJECT, MemNode::unordered);
3789 thread_obj_handle = _gvn.transform(thread_obj_handle);
3790 const TypePtr *adr_type = _gvn.type(thread_obj_handle)->isa_ptr();
3791 access_store_at(nullptr, thread_obj_handle, adr_type, arr, _gvn.type(arr), T_OBJECT, IN_NATIVE | MO_UNORDERED);
3792
3793 // Change the _monitor_owner_id of the JavaThread
3794 Node* tid = load_field_from_object(arr, "tid", "J");
3795 Node* monitor_owner_id_offset = basic_plus_adr(top(), thread, in_bytes(JavaThread::monitor_owner_id_offset()));
3796 store_to_memory(control(), monitor_owner_id_offset, tid, T_LONG, MemNode::unordered, true);
3797
3798 JFR_ONLY(extend_setCurrentThread(thread, arr);)
3799 return true;
3800 }
3801
3802 const Type* LibraryCallKit::scopedValueCache_type() {
3803 ciKlass* objects_klass = ciObjArrayKlass::make(env()->Object_klass());
3804 const TypeOopPtr* etype = TypeOopPtr::make_from_klass(env()->Object_klass());
3805 const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS);
3806
3807 // Because we create the scopedValue cache lazily we have to make the
3808 // type of the result BotPTR.
3809 bool xk = etype->klass_is_exact();
3810 const Type* objects_type = TypeAryPtr::make(TypePtr::BotPTR, arr0, objects_klass, xk, 0);
3811 return objects_type;
3812 }
3813
3814 Node* LibraryCallKit::scopedValueCache_helper() {
3815 Node* thread = _gvn.transform(new ThreadLocalNode());
3816 Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::scopedValueCache_offset()));
3817 // We cannot use immutable_memory() because we might flip onto a
3818 // different carrier thread, at which point we'll need to use that
3819 // carrier thread's cache.
3820 // return _gvn.transform(LoadNode::make(_gvn, nullptr, immutable_memory(), p, p->bottom_type()->is_ptr(),
3821 // TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered));
3822 return make_load(nullptr, p, p->bottom_type()->is_ptr(), T_ADDRESS, MemNode::unordered);
3823 }
3824
3825 //------------------------inline_native_scopedValueCache------------------
3826 bool LibraryCallKit::inline_native_scopedValueCache() {
3827 Node* cache_obj_handle = scopedValueCache_helper();
3828 const Type* objects_type = scopedValueCache_type();
3829 set_result(access_load(cache_obj_handle, objects_type, T_OBJECT, IN_NATIVE));
3830
3831 return true;
3832 }
3833
3834 //------------------------inline_native_setScopedValueCache------------------
3835 bool LibraryCallKit::inline_native_setScopedValueCache() {
3836 Node* arr = argument(0);
3837 Node* cache_obj_handle = scopedValueCache_helper();
3838 const Type* objects_type = scopedValueCache_type();
3839
3840 const TypePtr *adr_type = _gvn.type(cache_obj_handle)->isa_ptr();
3841 access_store_at(nullptr, cache_obj_handle, adr_type, arr, objects_type, T_OBJECT, IN_NATIVE | MO_UNORDERED);
3842
3843 return true;
3844 }
3845
3846 //------------------------inline_native_Continuation_pin and unpin-----------
3847
3848 // Shared implementation routine for both pin and unpin.
3849 bool LibraryCallKit::inline_native_Continuation_pinning(bool unpin) {
3850 enum { _true_path = 1, _false_path = 2, PATH_LIMIT };
3851
3852 // Save input memory.
3853 Node* input_memory_state = reset_memory();
3854 set_all_memory(input_memory_state);
3855
3856 // TLS
3857 Node* tls_ptr = _gvn.transform(new ThreadLocalNode());
3858 Node* last_continuation_offset = basic_plus_adr(top(), tls_ptr, in_bytes(JavaThread::cont_entry_offset()));
3859 Node* last_continuation = make_load(control(), last_continuation_offset, last_continuation_offset->get_ptr_type(), T_ADDRESS, MemNode::unordered);
3860
3861 // Null check the last continuation object.
3862 Node* continuation_cmp_null = _gvn.transform(new CmpPNode(last_continuation, null()));
3863 Node* test_continuation_not_equal_null = _gvn.transform(new BoolNode(continuation_cmp_null, BoolTest::ne));
3864 IfNode* iff_continuation_not_equal_null = create_and_map_if(control(), test_continuation_not_equal_null, PROB_MAX, COUNT_UNKNOWN);
3865
3866 // False path, last continuation is null.
3867 Node* continuation_is_null = _gvn.transform(new IfFalseNode(iff_continuation_not_equal_null));
3868
3869 // True path, last continuation is not null.
3870 Node* continuation_is_not_null = _gvn.transform(new IfTrueNode(iff_continuation_not_equal_null));
3871
3872 set_control(continuation_is_not_null);
3873
3874 // Load the pin count from the last continuation.
3875 Node* pin_count_offset = basic_plus_adr(top(), last_continuation, in_bytes(ContinuationEntry::pin_count_offset()));
3876 Node* pin_count = make_load(control(), pin_count_offset, TypeInt::INT, T_INT, MemNode::unordered);
3877
3878 // The loaded pin count is compared against a context specific rhs for over/underflow detection.
3879 Node* pin_count_rhs;
3880 if (unpin) {
3881 pin_count_rhs = _gvn.intcon(0);
3882 } else {
3883 pin_count_rhs = _gvn.intcon(UINT32_MAX);
3884 }
3885 Node* pin_count_cmp = _gvn.transform(new CmpUNode(_gvn.transform(pin_count), pin_count_rhs));
3886 Node* test_pin_count_over_underflow = _gvn.transform(new BoolNode(pin_count_cmp, BoolTest::eq));
3887 IfNode* iff_pin_count_over_underflow = create_and_map_if(control(), test_pin_count_over_underflow, PROB_MIN, COUNT_UNKNOWN);
3888
3889 // True branch, pin count over/underflow.
3890 Node* pin_count_over_underflow = _gvn.transform(new IfTrueNode(iff_pin_count_over_underflow));
3891 {
3892 // Trap (but not deoptimize (Action_none)) and continue in the interpreter
3893 // which will throw IllegalStateException for pin count over/underflow.
3894 // No memory changed so far - we can use memory create by reset_memory()
3895 // at the beginning of this intrinsic. No need to call reset_memory() again.
3896 PreserveJVMState pjvms(this);
3897 set_control(pin_count_over_underflow);
3898 uncommon_trap(Deoptimization::Reason_intrinsic,
3899 Deoptimization::Action_none);
3900 assert(stopped(), "invariant");
3901 }
3902
3903 // False branch, no pin count over/underflow. Increment or decrement pin count and store back.
3904 Node* valid_pin_count = _gvn.transform(new IfFalseNode(iff_pin_count_over_underflow));
3905 set_control(valid_pin_count);
3906
3907 Node* next_pin_count;
3908 if (unpin) {
3909 next_pin_count = _gvn.transform(new SubINode(pin_count, _gvn.intcon(1)));
3910 } else {
3911 next_pin_count = _gvn.transform(new AddINode(pin_count, _gvn.intcon(1)));
3912 }
3913
3914 store_to_memory(control(), pin_count_offset, next_pin_count, T_INT, MemNode::unordered);
3915
3916 // Result of top level CFG and Memory.
3917 RegionNode* result_rgn = new RegionNode(PATH_LIMIT);
3918 record_for_igvn(result_rgn);
3919 PhiNode* result_mem = new PhiNode(result_rgn, Type::MEMORY, TypePtr::BOTTOM);
3920 record_for_igvn(result_mem);
3921
3922 result_rgn->init_req(_true_path, _gvn.transform(valid_pin_count));
3923 result_rgn->init_req(_false_path, _gvn.transform(continuation_is_null));
3924 result_mem->init_req(_true_path, _gvn.transform(reset_memory()));
3925 result_mem->init_req(_false_path, _gvn.transform(input_memory_state));
3926
3927 // Set output state.
3928 set_control(_gvn.transform(result_rgn));
3929 set_all_memory(_gvn.transform(result_mem));
3930
3931 return true;
3932 }
3933
3934 //---------------------------load_mirror_from_klass----------------------------
3935 // Given a klass oop, load its java mirror (a java.lang.Class oop).
3936 Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
3937 Node* p = basic_plus_adr(top(), klass, in_bytes(Klass::java_mirror_offset()));
3938 Node* load = make_load(nullptr, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
3939 // mirror = ((OopHandle)mirror)->resolve();
3940 return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE);
3941 }
3942
3943 //-----------------------load_klass_from_mirror_common-------------------------
3944 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
3945 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
3946 // and branch to the given path on the region.
3947 // If never_see_null, take an uncommon trap on null, so we can optimistically
3948 // compile for the non-null case.
3949 // If the region is null, force never_see_null = true.
3950 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
3951 bool never_see_null,
3952 RegionNode* region,
3953 int null_path,
3954 int offset) {
3955 if (region == nullptr) never_see_null = true;
3956 Node* p = basic_plus_adr(mirror, offset);
3957 const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
3958 Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
3959 Node* null_ctl = top();
3960 kls = null_check_oop(kls, &null_ctl, never_see_null);
3961 if (region != nullptr) {
3962 // Set region->in(null_path) if the mirror is a primitive (e.g, int.class).
3963 region->init_req(null_path, null_ctl);
3964 } else {
3965 assert(null_ctl == top(), "no loose ends");
3966 }
3967 return kls;
3968 }
3969
3970 //--------------------(inline_native_Class_query helpers)---------------------
3971 // Use this for JVM_ACC_INTERFACE.
3972 // Fall through if (mods & mask) == bits, take the guard otherwise.
3973 Node* LibraryCallKit::generate_klass_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region,
3974 ByteSize offset, const Type* type, BasicType bt) {
3975 // Branch around if the given klass has the given modifier bit set.
3976 // Like generate_guard, adds a new path onto the region.
3977 Node* modp = basic_plus_adr(top(), kls, in_bytes(offset));
3978 Node* mods = make_load(nullptr, modp, type, bt, MemNode::unordered);
3979 Node* mask = intcon(modifier_mask);
3980 Node* bits = intcon(modifier_bits);
3981 Node* mbit = _gvn.transform(new AndINode(mods, mask));
3982 Node* cmp = _gvn.transform(new CmpINode(mbit, bits));
3983 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
3984 return generate_fair_guard(bol, region);
3985 }
3986 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
3987 return generate_klass_flags_guard(kls, JVM_ACC_INTERFACE, 0, region,
3988 InstanceKlass::access_flags_offset(), TypeInt::CHAR, T_CHAR);
3989 }
3990
3991 // Use this for testing if Klass is_hidden, has_finalizer, and is_cloneable_fast.
3992 Node* LibraryCallKit::generate_misc_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
3993 return generate_klass_flags_guard(kls, modifier_mask, modifier_bits, region,
3994 Klass::misc_flags_offset(), TypeInt::UBYTE, T_BOOLEAN);
3995 }
3996
3997 Node* LibraryCallKit::generate_hidden_class_guard(Node* kls, RegionNode* region) {
3998 return generate_misc_flags_guard(kls, KlassFlags::_misc_is_hidden_class, 0, region);
3999 }
4000
4001 //-------------------------inline_native_Class_query-------------------
4002 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
4003 const Type* return_type = TypeInt::BOOL;
4004 Node* prim_return_value = top(); // what happens if it's a primitive class?
4005 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
4006 bool expect_prim = false; // most of these guys expect to work on refs
4007
4008 enum { _normal_path = 1, _prim_path = 2, PATH_LIMIT };
4009
4010 Node* mirror = argument(0);
4011 Node* obj = top();
4012
4013 switch (id) {
4014 case vmIntrinsics::_isInstance:
4015 // nothing is an instance of a primitive type
4016 prim_return_value = intcon(0);
4017 obj = argument(1);
4018 break;
4019 case vmIntrinsics::_isHidden:
4020 prim_return_value = intcon(0);
4021 break;
4022 case vmIntrinsics::_getSuperclass:
4023 prim_return_value = null();
4024 return_type = TypeInstPtr::MIRROR->cast_to_ptr_type(TypePtr::BotPTR);
4025 break;
4026 default:
4027 fatal_unexpected_iid(id);
4028 break;
4029 }
4030
4031 const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
4032 if (mirror_con == nullptr) return false; // cannot happen?
4033
4034 #ifndef PRODUCT
4035 if (C->print_intrinsics() || C->print_inlining()) {
4036 ciType* k = mirror_con->java_mirror_type();
4037 if (k) {
4038 tty->print("Inlining %s on constant Class ", vmIntrinsics::name_at(intrinsic_id()));
4039 k->print_name();
4040 tty->cr();
4041 }
4042 }
4043 #endif
4044
4045 // Null-check the mirror, and the mirror's klass ptr (in case it is a primitive).
4046 RegionNode* region = new RegionNode(PATH_LIMIT);
4047 record_for_igvn(region);
4048 PhiNode* phi = new PhiNode(region, return_type);
4049
4050 // The mirror will never be null of Reflection.getClassAccessFlags, however
4051 // it may be null for Class.isInstance or Class.getModifiers. Throw a NPE
4052 // if it is. See bug 4774291.
4053
4054 // For Reflection.getClassAccessFlags(), the null check occurs in
4055 // the wrong place; see inline_unsafe_access(), above, for a similar
4056 // situation.
4057 mirror = null_check(mirror);
4058 // If mirror or obj is dead, only null-path is taken.
4059 if (stopped()) return true;
4060
4061 if (expect_prim) never_see_null = false; // expect nulls (meaning prims)
4062
4063 // Now load the mirror's klass metaobject, and null-check it.
4064 // Side-effects region with the control path if the klass is null.
4065 Node* kls = load_klass_from_mirror(mirror, never_see_null, region, _prim_path);
4066 // If kls is null, we have a primitive mirror.
4067 phi->init_req(_prim_path, prim_return_value);
4068 if (stopped()) { set_result(region, phi); return true; }
4069 bool safe_for_replace = (region->in(_prim_path) == top());
4070
4071 Node* p; // handy temp
4072 Node* null_ctl;
4073
4074 // Now that we have the non-null klass, we can perform the real query.
4075 // For constant classes, the query will constant-fold in LoadNode::Value.
4076 Node* query_value = top();
4077 switch (id) {
4078 case vmIntrinsics::_isInstance:
4079 // nothing is an instance of a primitive type
4080 query_value = gen_instanceof(obj, kls, safe_for_replace);
4081 break;
4082
4083 case vmIntrinsics::_isHidden:
4084 // (To verify this code sequence, check the asserts in JVM_IsHiddenClass.)
4085 if (generate_hidden_class_guard(kls, region) != nullptr)
4086 // A guard was added. If the guard is taken, it was an hidden class.
4087 phi->add_req(intcon(1));
4088 // If we fall through, it's a plain class.
4089 query_value = intcon(0);
4090 break;
4091
4092
4093 case vmIntrinsics::_getSuperclass:
4094 // The rules here are somewhat unfortunate, but we can still do better
4095 // with random logic than with a JNI call.
4096 // Interfaces store null or Object as _super, but must report null.
4097 // Arrays store an intermediate super as _super, but must report Object.
4098 // Other types can report the actual _super.
4099 // (To verify this code sequence, check the asserts in JVM_IsInterface.)
4100 if (generate_array_guard(kls, region) != nullptr) {
4101 // A guard was added. If the guard is taken, it was an array.
4102 phi->add_req(makecon(TypeInstPtr::make(env()->Object_klass()->java_mirror())));
4103 }
4104 // Check for interface after array since this checks AccessFlags offset into InstanceKlass.
4105 // In other words, we are accessing subtype-specific information, so we need to determine the subtype first.
4106 if (generate_interface_guard(kls, region) != nullptr) {
4107 // A guard was added. If the guard is taken, it was an interface.
4108 phi->add_req(null());
4109 }
4110 // If we fall through, it's a plain class. Get its _super.
4111 p = basic_plus_adr(top(), kls, in_bytes(Klass::super_offset()));
4112 kls = _gvn.transform(LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeInstKlassPtr::OBJECT_OR_NULL));
4113 null_ctl = top();
4114 kls = null_check_oop(kls, &null_ctl);
4115 if (null_ctl != top()) {
4116 // If the guard is taken, Object.superClass is null (both klass and mirror).
4117 region->add_req(null_ctl);
4118 phi ->add_req(null());
4119 }
4120 if (!stopped()) {
4121 query_value = load_mirror_from_klass(kls);
4122 }
4123 break;
4124
4125 default:
4126 fatal_unexpected_iid(id);
4127 break;
4128 }
4129
4130 // Fall-through is the normal case of a query to a real class.
4131 phi->init_req(1, query_value);
4132 region->init_req(1, control());
4133
4134 C->set_has_split_ifs(true); // Has chance for split-if optimization
4135 set_result(region, phi);
4136 return true;
4137 }
4138
4139 //-------------------------inline_Class_cast-------------------
4140 bool LibraryCallKit::inline_Class_cast() {
4141 Node* mirror = argument(0); // Class
4142 Node* obj = argument(1);
4143 const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
4144 if (mirror_con == nullptr) {
4145 return false; // dead path (mirror->is_top()).
4146 }
4147 if (obj == nullptr || obj->is_top()) {
4148 return false; // dead path
4149 }
4150 const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr();
4151
4152 // First, see if Class.cast() can be folded statically.
4153 // java_mirror_type() returns non-null for compile-time Class constants.
4154 ciType* tm = mirror_con->java_mirror_type();
4155 if (tm != nullptr && tm->is_klass() &&
4156 tp != nullptr) {
4157 if (!tp->is_loaded()) {
4158 // Don't use intrinsic when class is not loaded.
4159 return false;
4160 } else {
4161 int static_res = C->static_subtype_check(TypeKlassPtr::make(tm->as_klass(), Type::trust_interfaces), tp->as_klass_type());
4162 if (static_res == Compile::SSC_always_true) {
4163 // isInstance() is true - fold the code.
4164 set_result(obj);
4165 return true;
4166 } else if (static_res == Compile::SSC_always_false) {
4167 // Don't use intrinsic, have to throw ClassCastException.
4168 // If the reference is null, the non-intrinsic bytecode will
4169 // be optimized appropriately.
4170 return false;
4171 }
4172 }
4173 }
4174
4175 // Bailout intrinsic and do normal inlining if exception path is frequent.
4176 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
4177 return false;
4178 }
4179
4180 // Generate dynamic checks.
4181 // Class.cast() is java implementation of _checkcast bytecode.
4182 // Do checkcast (Parse::do_checkcast()) optimizations here.
4183
4184 mirror = null_check(mirror);
4185 // If mirror is dead, only null-path is taken.
4186 if (stopped()) {
4187 return true;
4188 }
4189
4190 // Not-subtype or the mirror's klass ptr is null (in case it is a primitive).
4191 enum { _bad_type_path = 1, _prim_path = 2, PATH_LIMIT };
4192 RegionNode* region = new RegionNode(PATH_LIMIT);
4193 record_for_igvn(region);
4194
4195 // Now load the mirror's klass metaobject, and null-check it.
4196 // If kls is null, we have a primitive mirror and
4197 // nothing is an instance of a primitive type.
4198 Node* kls = load_klass_from_mirror(mirror, false, region, _prim_path);
4199
4200 Node* res = top();
4201 if (!stopped()) {
4202 Node* bad_type_ctrl = top();
4203 // Do checkcast optimizations.
4204 res = gen_checkcast(obj, kls, &bad_type_ctrl);
4205 region->init_req(_bad_type_path, bad_type_ctrl);
4206 }
4207 if (region->in(_prim_path) != top() ||
4208 region->in(_bad_type_path) != top()) {
4209 // Let Interpreter throw ClassCastException.
4210 PreserveJVMState pjvms(this);
4211 set_control(_gvn.transform(region));
4212 uncommon_trap(Deoptimization::Reason_intrinsic,
4213 Deoptimization::Action_maybe_recompile);
4214 }
4215 if (!stopped()) {
4216 set_result(res);
4217 }
4218 return true;
4219 }
4220
4221
4222 //--------------------------inline_native_subtype_check------------------------
4223 // This intrinsic takes the JNI calls out of the heart of
4224 // UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
4225 bool LibraryCallKit::inline_native_subtype_check() {
4226 // Pull both arguments off the stack.
4227 Node* args[2]; // two java.lang.Class mirrors: superc, subc
4228 args[0] = argument(0);
4229 args[1] = argument(1);
4230 Node* klasses[2]; // corresponding Klasses: superk, subk
4231 klasses[0] = klasses[1] = top();
4232
4233 enum {
4234 // A full decision tree on {superc is prim, subc is prim}:
4235 _prim_0_path = 1, // {P,N} => false
4236 // {P,P} & superc!=subc => false
4237 _prim_same_path, // {P,P} & superc==subc => true
4238 _prim_1_path, // {N,P} => false
4239 _ref_subtype_path, // {N,N} & subtype check wins => true
4240 _both_ref_path, // {N,N} & subtype check loses => false
4241 PATH_LIMIT
4242 };
4243
4244 RegionNode* region = new RegionNode(PATH_LIMIT);
4245 Node* phi = new PhiNode(region, TypeInt::BOOL);
4246 record_for_igvn(region);
4247
4248 const TypePtr* adr_type = TypeRawPtr::BOTTOM; // memory type of loads
4249 const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
4250 int class_klass_offset = java_lang_Class::klass_offset();
4251
4252 // First null-check both mirrors and load each mirror's klass metaobject.
4253 int which_arg;
4254 for (which_arg = 0; which_arg <= 1; which_arg++) {
4255 Node* arg = args[which_arg];
4256 arg = null_check(arg);
4257 if (stopped()) break;
4258 args[which_arg] = arg;
4259
4260 Node* p = basic_plus_adr(arg, class_klass_offset);
4261 Node* kls = LoadKlassNode::make(_gvn, immutable_memory(), p, adr_type, kls_type);
4262 klasses[which_arg] = _gvn.transform(kls);
4263 }
4264
4265 // Having loaded both klasses, test each for null.
4266 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
4267 for (which_arg = 0; which_arg <= 1; which_arg++) {
4268 Node* kls = klasses[which_arg];
4269 Node* null_ctl = top();
4270 kls = null_check_oop(kls, &null_ctl, never_see_null);
4271 int prim_path = (which_arg == 0 ? _prim_0_path : _prim_1_path);
4272 region->init_req(prim_path, null_ctl);
4273 if (stopped()) break;
4274 klasses[which_arg] = kls;
4275 }
4276
4277 if (!stopped()) {
4278 // now we have two reference types, in klasses[0..1]
4279 Node* subk = klasses[1]; // the argument to isAssignableFrom
4280 Node* superk = klasses[0]; // the receiver
4281 region->set_req(_both_ref_path, gen_subtype_check(subk, superk));
4282 // now we have a successful reference subtype check
4283 region->set_req(_ref_subtype_path, control());
4284 }
4285
4286 // If both operands are primitive (both klasses null), then
4287 // we must return true when they are identical primitives.
4288 // It is convenient to test this after the first null klass check.
4289 set_control(region->in(_prim_0_path)); // go back to first null check
4290 if (!stopped()) {
4291 // Since superc is primitive, make a guard for the superc==subc case.
4292 Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1]));
4293 Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq));
4294 generate_guard(bol_eq, region, PROB_FAIR);
4295 if (region->req() == PATH_LIMIT+1) {
4296 // A guard was added. If the added guard is taken, superc==subc.
4297 region->swap_edges(PATH_LIMIT, _prim_same_path);
4298 region->del_req(PATH_LIMIT);
4299 }
4300 region->set_req(_prim_0_path, control()); // Not equal after all.
4301 }
4302
4303 // these are the only paths that produce 'true':
4304 phi->set_req(_prim_same_path, intcon(1));
4305 phi->set_req(_ref_subtype_path, intcon(1));
4306
4307 // pull together the cases:
4308 assert(region->req() == PATH_LIMIT, "sane region");
4309 for (uint i = 1; i < region->req(); i++) {
4310 Node* ctl = region->in(i);
4311 if (ctl == nullptr || ctl == top()) {
4312 region->set_req(i, top());
4313 phi ->set_req(i, top());
4314 } else if (phi->in(i) == nullptr) {
4315 phi->set_req(i, intcon(0)); // all other paths produce 'false'
4316 }
4317 }
4318
4319 set_control(_gvn.transform(region));
4320 set_result(_gvn.transform(phi));
4321 return true;
4322 }
4323
4324 //---------------------generate_array_guard_common------------------------
4325 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region,
4326 bool obj_array, bool not_array, Node** obj) {
4327
4328 if (stopped()) {
4329 return nullptr;
4330 }
4331
4332 // If obj_array/non_array==false/false:
4333 // Branch around if the given klass is in fact an array (either obj or prim).
4334 // If obj_array/non_array==false/true:
4335 // Branch around if the given klass is not an array klass of any kind.
4336 // If obj_array/non_array==true/true:
4337 // Branch around if the kls is not an oop array (kls is int[], String, etc.)
4338 // If obj_array/non_array==true/false:
4339 // Branch around if the kls is an oop array (Object[] or subtype)
4340 //
4341 // Like generate_guard, adds a new path onto the region.
4342 jint layout_con = 0;
4343 Node* layout_val = get_layout_helper(kls, layout_con);
4344 if (layout_val == nullptr) {
4345 bool query = (obj_array
4346 ? Klass::layout_helper_is_objArray(layout_con)
4347 : Klass::layout_helper_is_array(layout_con));
4348 if (query == not_array) {
4349 return nullptr; // never a branch
4350 } else { // always a branch
4351 Node* always_branch = control();
4352 if (region != nullptr)
4353 region->add_req(always_branch);
4354 set_control(top());
4355 return always_branch;
4356 }
4357 }
4358 // Now test the correct condition.
4359 jint nval = (obj_array
4360 ? (jint)(Klass::_lh_array_tag_type_value
4361 << Klass::_lh_array_tag_shift)
4362 : Klass::_lh_neutral_value);
4363 Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval)));
4364 BoolTest::mask btest = BoolTest::lt; // correct for testing is_[obj]array
4365 // invert the test if we are looking for a non-array
4366 if (not_array) btest = BoolTest(btest).negate();
4367 Node* bol = _gvn.transform(new BoolNode(cmp, btest));
4368 Node* ctrl = generate_fair_guard(bol, region);
4369 Node* is_array_ctrl = not_array ? control() : ctrl;
4370 if (obj != nullptr && is_array_ctrl != nullptr && is_array_ctrl != top()) {
4371 // Keep track of the fact that 'obj' is an array to prevent
4372 // array specific accesses from floating above the guard.
4373 *obj = _gvn.transform(new CastPPNode(is_array_ctrl, *obj, TypeAryPtr::BOTTOM));
4374 }
4375 return ctrl;
4376 }
4377
4378
4379 //-----------------------inline_native_newArray--------------------------
4380 // private static native Object java.lang.reflect.newArray(Class<?> componentType, int length);
4381 // private native Object Unsafe.allocateUninitializedArray0(Class<?> cls, int size);
4382 bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) {
4383 Node* mirror;
4384 Node* count_val;
4385 if (uninitialized) {
4386 null_check_receiver();
4387 mirror = argument(1);
4388 count_val = argument(2);
4389 } else {
4390 mirror = argument(0);
4391 count_val = argument(1);
4392 }
4393
4394 mirror = null_check(mirror);
4395 // If mirror or obj is dead, only null-path is taken.
4396 if (stopped()) return true;
4397
4398 enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
4399 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4400 PhiNode* result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
4401 PhiNode* result_io = new PhiNode(result_reg, Type::ABIO);
4402 PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4403
4404 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
4405 Node* klass_node = load_array_klass_from_mirror(mirror, never_see_null,
4406 result_reg, _slow_path);
4407 Node* normal_ctl = control();
4408 Node* no_array_ctl = result_reg->in(_slow_path);
4409
4410 // Generate code for the slow case. We make a call to newArray().
4411 set_control(no_array_ctl);
4412 if (!stopped()) {
4413 // Either the input type is void.class, or else the
4414 // array klass has not yet been cached. Either the
4415 // ensuing call will throw an exception, or else it
4416 // will cache the array klass for next time.
4417 PreserveJVMState pjvms(this);
4418 CallJavaNode* slow_call = nullptr;
4419 if (uninitialized) {
4420 // Generate optimized virtual call (holder class 'Unsafe' is final)
4421 slow_call = generate_method_call(vmIntrinsics::_allocateUninitializedArray, false, false, true);
4422 } else {
4423 slow_call = generate_method_call_static(vmIntrinsics::_newArray, true);
4424 }
4425 Node* slow_result = set_results_for_java_call(slow_call);
4426 // this->control() comes from set_results_for_java_call
4427 result_reg->set_req(_slow_path, control());
4428 result_val->set_req(_slow_path, slow_result);
4429 result_io ->set_req(_slow_path, i_o());
4430 result_mem->set_req(_slow_path, reset_memory());
4431 }
4432
4433 set_control(normal_ctl);
4434 if (!stopped()) {
4435 // Normal case: The array type has been cached in the java.lang.Class.
4436 // The following call works fine even if the array type is polymorphic.
4437 // It could be a dynamic mix of int[], boolean[], Object[], etc.
4438 Node* obj = new_array(klass_node, count_val, 0); // no arguments to push
4439 result_reg->init_req(_normal_path, control());
4440 result_val->init_req(_normal_path, obj);
4441 result_io ->init_req(_normal_path, i_o());
4442 result_mem->init_req(_normal_path, reset_memory());
4443
4444 if (uninitialized) {
4445 // Mark the allocation so that zeroing is skipped
4446 AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(obj);
4447 alloc->maybe_set_complete(&_gvn);
4448 }
4449 }
4450
4451 // Return the combined state.
4452 set_i_o( _gvn.transform(result_io) );
4453 set_all_memory( _gvn.transform(result_mem));
4454
4455 C->set_has_split_ifs(true); // Has chance for split-if optimization
4456 set_result(result_reg, result_val);
4457 return true;
4458 }
4459
4460 //----------------------inline_native_getLength--------------------------
4461 // public static native int java.lang.reflect.Array.getLength(Object array);
4462 bool LibraryCallKit::inline_native_getLength() {
4463 if (too_many_traps(Deoptimization::Reason_intrinsic)) return false;
4464
4465 Node* array = null_check(argument(0));
4466 // If array is dead, only null-path is taken.
4467 if (stopped()) return true;
4468
4469 // Deoptimize if it is a non-array.
4470 Node* non_array = generate_non_array_guard(load_object_klass(array), nullptr, &array);
4471
4472 if (non_array != nullptr) {
4473 PreserveJVMState pjvms(this);
4474 set_control(non_array);
4475 uncommon_trap(Deoptimization::Reason_intrinsic,
4476 Deoptimization::Action_maybe_recompile);
4477 }
4478
4479 // If control is dead, only non-array-path is taken.
4480 if (stopped()) return true;
4481
4482 // The works fine even if the array type is polymorphic.
4483 // It could be a dynamic mix of int[], boolean[], Object[], etc.
4484 Node* result = load_array_length(array);
4485
4486 C->set_has_split_ifs(true); // Has chance for split-if optimization
4487 set_result(result);
4488 return true;
4489 }
4490
4491 //------------------------inline_array_copyOf----------------------------
4492 // public static <T,U> T[] java.util.Arrays.copyOf( U[] original, int newLength, Class<? extends T[]> newType);
4493 // public static <T,U> T[] java.util.Arrays.copyOfRange(U[] original, int from, int to, Class<? extends T[]> newType);
4494 bool LibraryCallKit::inline_array_copyOf(bool is_copyOfRange) {
4495 if (too_many_traps(Deoptimization::Reason_intrinsic)) return false;
4496
4497 // Get the arguments.
4498 Node* original = argument(0);
4499 Node* start = is_copyOfRange? argument(1): intcon(0);
4500 Node* end = is_copyOfRange? argument(2): argument(1);
4501 Node* array_type_mirror = is_copyOfRange? argument(3): argument(2);
4502
4503 Node* newcopy = nullptr;
4504
4505 // Set the original stack and the reexecute bit for the interpreter to reexecute
4506 // the bytecode that invokes Arrays.copyOf if deoptimization happens.
4507 { PreserveReexecuteState preexecs(this);
4508 jvms()->set_should_reexecute(true);
4509
4510 array_type_mirror = null_check(array_type_mirror);
4511 original = null_check(original);
4512
4513 // Check if a null path was taken unconditionally.
4514 if (stopped()) return true;
4515
4516 Node* orig_length = load_array_length(original);
4517
4518 Node* klass_node = load_klass_from_mirror(array_type_mirror, false, nullptr, 0);
4519 klass_node = null_check(klass_node);
4520
4521 RegionNode* bailout = new RegionNode(1);
4522 record_for_igvn(bailout);
4523
4524 // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
4525 // Bail out if that is so.
4526 Node* not_objArray = generate_non_objArray_guard(klass_node, bailout);
4527 if (not_objArray != nullptr) {
4528 // Improve the klass node's type from the new optimistic assumption:
4529 ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
4530 const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/);
4531 Node* cast = new CastPPNode(control(), klass_node, akls);
4532 klass_node = _gvn.transform(cast);
4533 }
4534
4535 // Bail out if either start or end is negative.
4536 generate_negative_guard(start, bailout, &start);
4537 generate_negative_guard(end, bailout, &end);
4538
4539 Node* length = end;
4540 if (_gvn.type(start) != TypeInt::ZERO) {
4541 length = _gvn.transform(new SubINode(end, start));
4542 }
4543
4544 // Bail out if length is negative (i.e., if start > end).
4545 // Without this the new_array would throw
4546 // NegativeArraySizeException but IllegalArgumentException is what
4547 // should be thrown
4548 generate_negative_guard(length, bailout, &length);
4549
4550 // Bail out if start is larger than the original length
4551 Node* orig_tail = _gvn.transform(new SubINode(orig_length, start));
4552 generate_negative_guard(orig_tail, bailout, &orig_tail);
4553
4554 if (bailout->req() > 1) {
4555 PreserveJVMState pjvms(this);
4556 set_control(_gvn.transform(bailout));
4557 uncommon_trap(Deoptimization::Reason_intrinsic,
4558 Deoptimization::Action_maybe_recompile);
4559 }
4560
4561 if (!stopped()) {
4562 // How many elements will we copy from the original?
4563 // The answer is MinI(orig_tail, length).
4564 Node* moved = _gvn.transform(new MinINode(orig_tail, length));
4565
4566 // Generate a direct call to the right arraycopy function(s).
4567 // We know the copy is disjoint but we might not know if the
4568 // oop stores need checking.
4569 // Extreme case: Arrays.copyOf((Integer[])x, 10, String[].class).
4570 // This will fail a store-check if x contains any non-nulls.
4571
4572 // ArrayCopyNode:Ideal may transform the ArrayCopyNode to
4573 // loads/stores but it is legal only if we're sure the
4574 // Arrays.copyOf would succeed. So we need all input arguments
4575 // to the copyOf to be validated, including that the copy to the
4576 // new array won't trigger an ArrayStoreException. That subtype
4577 // check can be optimized if we know something on the type of
4578 // the input array from type speculation.
4579 if (_gvn.type(klass_node)->singleton()) {
4580 const TypeKlassPtr* subk = _gvn.type(load_object_klass(original))->is_klassptr();
4581 const TypeKlassPtr* superk = _gvn.type(klass_node)->is_klassptr();
4582
4583 int test = C->static_subtype_check(superk, subk);
4584 if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
4585 const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();
4586 if (t_original->speculative_type() != nullptr) {
4587 original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true);
4588 }
4589 }
4590 }
4591
4592 bool validated = false;
4593 // Reason_class_check rather than Reason_intrinsic because we
4594 // want to intrinsify even if this traps.
4595 if (!too_many_traps(Deoptimization::Reason_class_check)) {
4596 Node* not_subtype_ctrl = gen_subtype_check(original, klass_node);
4597
4598 if (not_subtype_ctrl != top()) {
4599 PreserveJVMState pjvms(this);
4600 set_control(not_subtype_ctrl);
4601 uncommon_trap(Deoptimization::Reason_class_check,
4602 Deoptimization::Action_make_not_entrant);
4603 assert(stopped(), "Should be stopped");
4604 }
4605 validated = true;
4606 }
4607
4608 if (!stopped()) {
4609 newcopy = new_array(klass_node, length, 0); // no arguments to push
4610
4611 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true, true,
4612 load_object_klass(original), klass_node);
4613 if (!is_copyOfRange) {
4614 ac->set_copyof(validated);
4615 } else {
4616 ac->set_copyofrange(validated);
4617 }
4618 Node* n = _gvn.transform(ac);
4619 if (n == ac) {
4620 ac->connect_outputs(this);
4621 } else {
4622 assert(validated, "shouldn't transform if all arguments not validated");
4623 set_all_memory(n);
4624 }
4625 }
4626 }
4627 } // original reexecute is set back here
4628
4629 C->set_has_split_ifs(true); // Has chance for split-if optimization
4630 if (!stopped()) {
4631 set_result(newcopy);
4632 }
4633 return true;
4634 }
4635
4636
4637 //----------------------generate_virtual_guard---------------------------
4638 // Helper for hashCode and clone. Peeks inside the vtable to avoid a call.
4639 Node* LibraryCallKit::generate_virtual_guard(Node* obj_klass,
4640 RegionNode* slow_region) {
4641 ciMethod* method = callee();
4642 int vtable_index = method->vtable_index();
4643 assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
4644 "bad index %d", vtable_index);
4645 // Get the Method* out of the appropriate vtable entry.
4646 int entry_offset = in_bytes(Klass::vtable_start_offset()) +
4647 vtable_index*vtableEntry::size_in_bytes() +
4648 in_bytes(vtableEntry::method_offset());
4649 Node* entry_addr = basic_plus_adr(top(), obj_klass, entry_offset);
4650 Node* target_call = make_load(nullptr, entry_addr, TypePtr::NOTNULL, T_ADDRESS, MemNode::unordered);
4651
4652 // Compare the target method with the expected method (e.g., Object.hashCode).
4653 const TypePtr* native_call_addr = TypeMetadataPtr::make(method);
4654
4655 Node* native_call = makecon(native_call_addr);
4656 Node* chk_native = _gvn.transform(new CmpPNode(target_call, native_call));
4657 Node* test_native = _gvn.transform(new BoolNode(chk_native, BoolTest::ne));
4658
4659 return generate_slow_guard(test_native, slow_region);
4660 }
4661
4662 //-----------------------generate_method_call----------------------------
4663 // Use generate_method_call to make a slow-call to the real
4664 // method if the fast path fails. An alternative would be to
4665 // use a stub like OptoRuntime::slow_arraycopy_Java.
4666 // This only works for expanding the current library call,
4667 // not another intrinsic. (E.g., don't use this for making an
4668 // arraycopy call inside of the copyOf intrinsic.)
4669 CallJavaNode*
4670 LibraryCallKit::generate_method_call(vmIntrinsicID method_id, bool is_virtual, bool is_static, bool res_not_null) {
4671 // When compiling the intrinsic method itself, do not use this technique.
4672 guarantee(callee() != C->method(), "cannot make slow-call to self");
4673
4674 ciMethod* method = callee();
4675 // ensure the JVMS we have will be correct for this call
4676 guarantee(method_id == method->intrinsic_id(), "must match");
4677
4678 const TypeFunc* tf = TypeFunc::make(method);
4679 if (res_not_null) {
4680 assert(tf->return_type() == T_OBJECT, "");
4681 const TypeTuple* range = tf->range();
4682 const Type** fields = TypeTuple::fields(range->cnt());
4683 fields[TypeFunc::Parms] = range->field_at(TypeFunc::Parms)->filter_speculative(TypePtr::NOTNULL);
4684 const TypeTuple* new_range = TypeTuple::make(range->cnt(), fields);
4685 tf = TypeFunc::make(tf->domain(), new_range);
4686 }
4687 CallJavaNode* slow_call;
4688 if (is_static) {
4689 assert(!is_virtual, "");
4690 slow_call = new CallStaticJavaNode(C, tf,
4691 SharedRuntime::get_resolve_static_call_stub(), method);
4692 } else if (is_virtual) {
4693 assert(!gvn().type(argument(0))->maybe_null(), "should not be null");
4694 int vtable_index = Method::invalid_vtable_index;
4695 if (UseInlineCaches) {
4696 // Suppress the vtable call
4697 } else {
4698 // hashCode and clone are not a miranda methods,
4699 // so the vtable index is fixed.
4700 // No need to use the linkResolver to get it.
4701 vtable_index = method->vtable_index();
4702 assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
4703 "bad index %d", vtable_index);
4704 }
4705 slow_call = new CallDynamicJavaNode(tf,
4706 SharedRuntime::get_resolve_virtual_call_stub(),
4707 method, vtable_index);
4708 } else { // neither virtual nor static: opt_virtual
4709 assert(!gvn().type(argument(0))->maybe_null(), "should not be null");
4710 slow_call = new CallStaticJavaNode(C, tf,
4711 SharedRuntime::get_resolve_opt_virtual_call_stub(), method);
4712 slow_call->set_optimized_virtual(true);
4713 }
4714 if (CallGenerator::is_inlined_method_handle_intrinsic(this->method(), bci(), callee())) {
4715 // To be able to issue a direct call (optimized virtual or virtual)
4716 // and skip a call to MH.linkTo*/invokeBasic adapter, additional information
4717 // about the method being invoked should be attached to the call site to
4718 // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
4719 slow_call->set_override_symbolic_info(true);
4720 }
4721 set_arguments_for_java_call(slow_call);
4722 set_edges_for_java_call(slow_call);
4723 return slow_call;
4724 }
4725
4726
4727 /**
4728 * Build special case code for calls to hashCode on an object. This call may
4729 * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
4730 * slightly different code.
4731 */
4732 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
4733 assert(is_static == callee()->is_static(), "correct intrinsic selection");
4734 assert(!(is_virtual && is_static), "either virtual, special, or static");
4735
4736 enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
4737
4738 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4739 PhiNode* result_val = new PhiNode(result_reg, TypeInt::INT);
4740 PhiNode* result_io = new PhiNode(result_reg, Type::ABIO);
4741 PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4742 Node* obj = nullptr;
4743 if (!is_static) {
4744 // Check for hashing null object
4745 obj = null_check_receiver();
4746 if (stopped()) return true; // unconditionally null
4747 result_reg->init_req(_null_path, top());
4748 result_val->init_req(_null_path, top());
4749 } else {
4750 // Do a null check, and return zero if null.
4751 // System.identityHashCode(null) == 0
4752 obj = argument(0);
4753 Node* null_ctl = top();
4754 obj = null_check_oop(obj, &null_ctl);
4755 result_reg->init_req(_null_path, null_ctl);
4756 result_val->init_req(_null_path, _gvn.intcon(0));
4757 }
4758
4759 // Unconditionally null? Then return right away.
4760 if (stopped()) {
4761 set_control( result_reg->in(_null_path));
4762 if (!stopped())
4763 set_result(result_val->in(_null_path));
4764 return true;
4765 }
4766
4767 // We only go to the fast case code if we pass a number of guards. The
4768 // paths which do not pass are accumulated in the slow_region.
4769 RegionNode* slow_region = new RegionNode(1);
4770 record_for_igvn(slow_region);
4771
4772 // If this is a virtual call, we generate a funny guard. We pull out
4773 // the vtable entry corresponding to hashCode() from the target object.
4774 // If the target method which we are calling happens to be the native
4775 // Object hashCode() method, we pass the guard. We do not need this
4776 // guard for non-virtual calls -- the caller is known to be the native
4777 // Object hashCode().
4778 if (is_virtual) {
4779 // After null check, get the object's klass.
4780 Node* obj_klass = load_object_klass(obj);
4781 generate_virtual_guard(obj_klass, slow_region);
4782 }
4783
4784 // Get the header out of the object, use LoadMarkNode when available
4785 Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
4786 // The control of the load must be null. Otherwise, the load can move before
4787 // the null check after castPP removal.
4788 Node* no_ctrl = nullptr;
4789 Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
4790
4791 if (!UseObjectMonitorTable) {
4792 // Test the header to see if it is safe to read w.r.t. locking.
4793 Node *lock_mask = _gvn.MakeConX(markWord::lock_mask_in_place);
4794 Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
4795 Node *monitor_val = _gvn.MakeConX(markWord::monitor_value);
4796 Node *chk_monitor = _gvn.transform(new CmpXNode(lmasked_header, monitor_val));
4797 Node *test_monitor = _gvn.transform(new BoolNode(chk_monitor, BoolTest::eq));
4798
4799 generate_slow_guard(test_monitor, slow_region);
4800 }
4801
4802 // Get the hash value and check to see that it has been properly assigned.
4803 // We depend on hash_mask being at most 32 bits and avoid the use of
4804 // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
4805 // vm: see markWord.hpp.
4806 Node *hash_mask = _gvn.intcon(markWord::hash_mask);
4807 Node *hash_shift = _gvn.intcon(markWord::hash_shift);
4808 Node *hshifted_header= _gvn.transform(new URShiftXNode(header, hash_shift));
4809 // This hack lets the hash bits live anywhere in the mark object now, as long
4810 // as the shift drops the relevant bits into the low 32 bits. Note that
4811 // Java spec says that HashCode is an int so there's no point in capturing
4812 // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build).
4813 hshifted_header = ConvX2I(hshifted_header);
4814 Node *hash_val = _gvn.transform(new AndINode(hshifted_header, hash_mask));
4815
4816 Node *no_hash_val = _gvn.intcon(markWord::no_hash);
4817 Node *chk_assigned = _gvn.transform(new CmpINode( hash_val, no_hash_val));
4818 Node *test_assigned = _gvn.transform(new BoolNode( chk_assigned, BoolTest::eq));
4819
4820 generate_slow_guard(test_assigned, slow_region);
4821
4822 Node* init_mem = reset_memory();
4823 // fill in the rest of the null path:
4824 result_io ->init_req(_null_path, i_o());
4825 result_mem->init_req(_null_path, init_mem);
4826
4827 result_val->init_req(_fast_path, hash_val);
4828 result_reg->init_req(_fast_path, control());
4829 result_io ->init_req(_fast_path, i_o());
4830 result_mem->init_req(_fast_path, init_mem);
4831
4832 // Generate code for the slow case. We make a call to hashCode().
4833 set_control(_gvn.transform(slow_region));
4834 if (!stopped()) {
4835 // No need for PreserveJVMState, because we're using up the present state.
4836 set_all_memory(init_mem);
4837 vmIntrinsics::ID hashCode_id = is_static ? vmIntrinsics::_identityHashCode : vmIntrinsics::_hashCode;
4838 CallJavaNode* slow_call = generate_method_call(hashCode_id, is_virtual, is_static, false);
4839 Node* slow_result = set_results_for_java_call(slow_call);
4840 // this->control() comes from set_results_for_java_call
4841 result_reg->init_req(_slow_path, control());
4842 result_val->init_req(_slow_path, slow_result);
4843 result_io ->set_req(_slow_path, i_o());
4844 result_mem ->set_req(_slow_path, reset_memory());
4845 }
4846
4847 // Return the combined state.
4848 set_i_o( _gvn.transform(result_io) );
4849 set_all_memory( _gvn.transform(result_mem));
4850
4851 set_result(result_reg, result_val);
4852 return true;
4853 }
4854
4855 //---------------------------inline_native_getClass----------------------------
4856 // public final native Class<?> java.lang.Object.getClass();
4857 //
4858 // Build special case code for calls to getClass on an object.
4859 bool LibraryCallKit::inline_native_getClass() {
4860 Node* obj = null_check_receiver();
4861 if (stopped()) return true;
4862 set_result(load_mirror_from_klass(load_object_klass(obj)));
4863 return true;
4864 }
4865
4866 //-----------------inline_native_Reflection_getCallerClass---------------------
4867 // public static native Class<?> sun.reflect.Reflection.getCallerClass();
4868 //
4869 // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
4870 //
4871 // NOTE: This code must perform the same logic as JVM_GetCallerClass
4872 // in that it must skip particular security frames and checks for
4873 // caller sensitive methods.
4874 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
4875 #ifndef PRODUCT
4876 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4877 tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
4878 }
4879 #endif
4880
4881 if (!jvms()->has_method()) {
4882 #ifndef PRODUCT
4883 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4884 tty->print_cr(" Bailing out because intrinsic was inlined at top level");
4885 }
4886 #endif
4887 return false;
4888 }
4889
4890 // Walk back up the JVM state to find the caller at the required
4891 // depth.
4892 JVMState* caller_jvms = jvms();
4893
4894 // Cf. JVM_GetCallerClass
4895 // NOTE: Start the loop at depth 1 because the current JVM state does
4896 // not include the Reflection.getCallerClass() frame.
4897 for (int n = 1; caller_jvms != nullptr; caller_jvms = caller_jvms->caller(), n++) {
4898 ciMethod* m = caller_jvms->method();
4899 switch (n) {
4900 case 0:
4901 fatal("current JVM state does not include the Reflection.getCallerClass frame");
4902 break;
4903 case 1:
4904 // Frame 0 and 1 must be caller sensitive (see JVM_GetCallerClass).
4905 if (!m->caller_sensitive()) {
4906 #ifndef PRODUCT
4907 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4908 tty->print_cr(" Bailing out: CallerSensitive annotation expected at frame %d", n);
4909 }
4910 #endif
4911 return false; // bail-out; let JVM_GetCallerClass do the work
4912 }
4913 break;
4914 default:
4915 if (!m->is_ignored_by_security_stack_walk()) {
4916 // We have reached the desired frame; return the holder class.
4917 // Acquire method holder as java.lang.Class and push as constant.
4918 ciInstanceKlass* caller_klass = caller_jvms->method()->holder();
4919 ciInstance* caller_mirror = caller_klass->java_mirror();
4920 set_result(makecon(TypeInstPtr::make(caller_mirror)));
4921
4922 #ifndef PRODUCT
4923 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4924 tty->print_cr(" Succeeded: caller = %d) %s.%s, JVMS depth = %d", n, caller_klass->name()->as_utf8(), caller_jvms->method()->name()->as_utf8(), jvms()->depth());
4925 tty->print_cr(" JVM state at this point:");
4926 for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
4927 ciMethod* m = jvms()->of_depth(i)->method();
4928 tty->print_cr(" %d) %s.%s", n, m->holder()->name()->as_utf8(), m->name()->as_utf8());
4929 }
4930 }
4931 #endif
4932 return true;
4933 }
4934 break;
4935 }
4936 }
4937
4938 #ifndef PRODUCT
4939 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4940 tty->print_cr(" Bailing out because caller depth exceeded inlining depth = %d", jvms()->depth());
4941 tty->print_cr(" JVM state at this point:");
4942 for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
4943 ciMethod* m = jvms()->of_depth(i)->method();
4944 tty->print_cr(" %d) %s.%s", n, m->holder()->name()->as_utf8(), m->name()->as_utf8());
4945 }
4946 }
4947 #endif
4948
4949 return false; // bail-out; let JVM_GetCallerClass do the work
4950 }
4951
4952 bool LibraryCallKit::inline_fp_conversions(vmIntrinsics::ID id) {
4953 Node* arg = argument(0);
4954 Node* result = nullptr;
4955
4956 switch (id) {
4957 case vmIntrinsics::_floatToRawIntBits: result = new MoveF2INode(arg); break;
4958 case vmIntrinsics::_intBitsToFloat: result = new MoveI2FNode(arg); break;
4959 case vmIntrinsics::_doubleToRawLongBits: result = new MoveD2LNode(arg); break;
4960 case vmIntrinsics::_longBitsToDouble: result = new MoveL2DNode(arg); break;
4961 case vmIntrinsics::_floatToFloat16: result = new ConvF2HFNode(arg); break;
4962 case vmIntrinsics::_float16ToFloat: result = new ConvHF2FNode(arg); break;
4963
4964 case vmIntrinsics::_doubleToLongBits: {
4965 // two paths (plus control) merge in a wood
4966 RegionNode *r = new RegionNode(3);
4967 Node *phi = new PhiNode(r, TypeLong::LONG);
4968
4969 Node *cmpisnan = _gvn.transform(new CmpDNode(arg, arg));
4970 // Build the boolean node
4971 Node *bolisnan = _gvn.transform(new BoolNode(cmpisnan, BoolTest::ne));
4972
4973 // Branch either way.
4974 // NaN case is less traveled, which makes all the difference.
4975 IfNode *ifisnan = create_and_xform_if(control(), bolisnan, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
4976 Node *opt_isnan = _gvn.transform(ifisnan);
4977 assert( opt_isnan->is_If(), "Expect an IfNode");
4978 IfNode *opt_ifisnan = (IfNode*)opt_isnan;
4979 Node *iftrue = _gvn.transform(new IfTrueNode(opt_ifisnan));
4980
4981 set_control(iftrue);
4982
4983 static const jlong nan_bits = CONST64(0x7ff8000000000000);
4984 Node *slow_result = longcon(nan_bits); // return NaN
4985 phi->init_req(1, _gvn.transform( slow_result ));
4986 r->init_req(1, iftrue);
4987
4988 // Else fall through
4989 Node *iffalse = _gvn.transform(new IfFalseNode(opt_ifisnan));
4990 set_control(iffalse);
4991
4992 phi->init_req(2, _gvn.transform(new MoveD2LNode(arg)));
4993 r->init_req(2, iffalse);
4994
4995 // Post merge
4996 set_control(_gvn.transform(r));
4997 record_for_igvn(r);
4998
4999 C->set_has_split_ifs(true); // Has chance for split-if optimization
5000 result = phi;
5001 assert(result->bottom_type()->isa_long(), "must be");
5002 break;
5003 }
5004
5005 case vmIntrinsics::_floatToIntBits: {
5006 // two paths (plus control) merge in a wood
5007 RegionNode *r = new RegionNode(3);
5008 Node *phi = new PhiNode(r, TypeInt::INT);
5009
5010 Node *cmpisnan = _gvn.transform(new CmpFNode(arg, arg));
5011 // Build the boolean node
5012 Node *bolisnan = _gvn.transform(new BoolNode(cmpisnan, BoolTest::ne));
5013
5014 // Branch either way.
5015 // NaN case is less traveled, which makes all the difference.
5016 IfNode *ifisnan = create_and_xform_if(control(), bolisnan, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
5017 Node *opt_isnan = _gvn.transform(ifisnan);
5018 assert( opt_isnan->is_If(), "Expect an IfNode");
5019 IfNode *opt_ifisnan = (IfNode*)opt_isnan;
5020 Node *iftrue = _gvn.transform(new IfTrueNode(opt_ifisnan));
5021
5022 set_control(iftrue);
5023
5024 static const jint nan_bits = 0x7fc00000;
5025 Node *slow_result = makecon(TypeInt::make(nan_bits)); // return NaN
5026 phi->init_req(1, _gvn.transform( slow_result ));
5027 r->init_req(1, iftrue);
5028
5029 // Else fall through
5030 Node *iffalse = _gvn.transform(new IfFalseNode(opt_ifisnan));
5031 set_control(iffalse);
5032
5033 phi->init_req(2, _gvn.transform(new MoveF2INode(arg)));
5034 r->init_req(2, iffalse);
5035
5036 // Post merge
5037 set_control(_gvn.transform(r));
5038 record_for_igvn(r);
5039
5040 C->set_has_split_ifs(true); // Has chance for split-if optimization
5041 result = phi;
5042 assert(result->bottom_type()->isa_int(), "must be");
5043 break;
5044 }
5045
5046 default:
5047 fatal_unexpected_iid(id);
5048 break;
5049 }
5050 set_result(_gvn.transform(result));
5051 return true;
5052 }
5053
5054 bool LibraryCallKit::inline_fp_range_check(vmIntrinsics::ID id) {
5055 Node* arg = argument(0);
5056 Node* result = nullptr;
5057
5058 switch (id) {
5059 case vmIntrinsics::_floatIsInfinite:
5060 result = new IsInfiniteFNode(arg);
5061 break;
5062 case vmIntrinsics::_floatIsFinite:
5063 result = new IsFiniteFNode(arg);
5064 break;
5065 case vmIntrinsics::_doubleIsInfinite:
5066 result = new IsInfiniteDNode(arg);
5067 break;
5068 case vmIntrinsics::_doubleIsFinite:
5069 result = new IsFiniteDNode(arg);
5070 break;
5071 default:
5072 fatal_unexpected_iid(id);
5073 break;
5074 }
5075 set_result(_gvn.transform(result));
5076 return true;
5077 }
5078
5079 //----------------------inline_unsafe_copyMemory-------------------------
5080 // public native void Unsafe.copyMemory0(Object srcBase, long srcOffset, Object destBase, long destOffset, long bytes);
5081
5082 static bool has_wide_mem(PhaseGVN& gvn, Node* addr, Node* base) {
5083 const TypeAryPtr* addr_t = gvn.type(addr)->isa_aryptr();
5084 const Type* base_t = gvn.type(base);
5085
5086 bool in_native = (base_t == TypePtr::NULL_PTR);
5087 bool in_heap = !TypePtr::NULL_PTR->higher_equal(base_t);
5088 bool is_mixed = !in_heap && !in_native;
5089
5090 if (is_mixed) {
5091 return true; // mixed accesses can touch both on-heap and off-heap memory
5092 }
5093 if (in_heap) {
5094 bool is_prim_array = (addr_t != nullptr) && (addr_t->elem() != Type::BOTTOM);
5095 if (!is_prim_array) {
5096 // Though Unsafe.copyMemory() ensures at runtime for on-heap accesses that base is a primitive array,
5097 // there's not enough type information available to determine proper memory slice for it.
5098 return true;
5099 }
5100 }
5101 return false;
5102 }
5103
5104 bool LibraryCallKit::inline_unsafe_copyMemory() {
5105 if (callee()->is_static()) return false; // caller must have the capability!
5106 null_check_receiver(); // null-check receiver
5107 if (stopped()) return true;
5108
5109 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
5110
5111 Node* src_base = argument(1); // type: oop
5112 Node* src_off = ConvL2X(argument(2)); // type: long
5113 Node* dst_base = argument(4); // type: oop
5114 Node* dst_off = ConvL2X(argument(5)); // type: long
5115 Node* size = ConvL2X(argument(7)); // type: long
5116
5117 assert(Unsafe_field_offset_to_byte_offset(11) == 11,
5118 "fieldOffset must be byte-scaled");
5119
5120 Node* src_addr = make_unsafe_address(src_base, src_off);
5121 Node* dst_addr = make_unsafe_address(dst_base, dst_off);
5122
5123 Node* thread = _gvn.transform(new ThreadLocalNode());
5124 Node* doing_unsafe_access_addr = basic_plus_adr(top(), thread, in_bytes(JavaThread::doing_unsafe_access_offset()));
5125 BasicType doing_unsafe_access_bt = T_BYTE;
5126 assert((sizeof(bool) * CHAR_BIT) == 8, "not implemented");
5127
5128 // update volatile field
5129 store_to_memory(control(), doing_unsafe_access_addr, intcon(1), doing_unsafe_access_bt, MemNode::unordered);
5130
5131 int flags = RC_LEAF | RC_NO_FP;
5132
5133 const TypePtr* dst_type = TypePtr::BOTTOM;
5134
5135 // Adjust memory effects of the runtime call based on input values.
5136 if (!has_wide_mem(_gvn, src_addr, src_base) &&
5137 !has_wide_mem(_gvn, dst_addr, dst_base)) {
5138 dst_type = _gvn.type(dst_addr)->is_ptr(); // narrow out memory
5139
5140 const TypePtr* src_type = _gvn.type(src_addr)->is_ptr();
5141 if (C->get_alias_index(src_type) == C->get_alias_index(dst_type)) {
5142 flags |= RC_NARROW_MEM; // narrow in memory
5143 }
5144 }
5145
5146 // Call it. Note that the length argument is not scaled.
5147 make_runtime_call(flags,
5148 OptoRuntime::fast_arraycopy_Type(),
5149 StubRoutines::unsafe_arraycopy(),
5150 "unsafe_arraycopy",
5151 dst_type,
5152 src_addr, dst_addr, size XTOP);
5153
5154 store_to_memory(control(), doing_unsafe_access_addr, intcon(0), doing_unsafe_access_bt, MemNode::unordered);
5155
5156 return true;
5157 }
5158
5159 // unsafe_setmemory(void *base, ulong offset, size_t length, char fill_value);
5160 // Fill 'length' bytes starting from 'base[offset]' with 'fill_value'
5161 bool LibraryCallKit::inline_unsafe_setMemory() {
5162 if (callee()->is_static()) return false; // caller must have the capability!
5163 null_check_receiver(); // null-check receiver
5164 if (stopped()) return true;
5165
5166 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
5167
5168 Node* dst_base = argument(1); // type: oop
5169 Node* dst_off = ConvL2X(argument(2)); // type: long
5170 Node* size = ConvL2X(argument(4)); // type: long
5171 Node* byte = argument(6); // type: byte
5172
5173 assert(Unsafe_field_offset_to_byte_offset(11) == 11,
5174 "fieldOffset must be byte-scaled");
5175
5176 Node* dst_addr = make_unsafe_address(dst_base, dst_off);
5177
5178 Node* thread = _gvn.transform(new ThreadLocalNode());
5179 Node* doing_unsafe_access_addr = basic_plus_adr(top(), thread, in_bytes(JavaThread::doing_unsafe_access_offset()));
5180 BasicType doing_unsafe_access_bt = T_BYTE;
5181 assert((sizeof(bool) * CHAR_BIT) == 8, "not implemented");
5182
5183 // update volatile field
5184 store_to_memory(control(), doing_unsafe_access_addr, intcon(1), doing_unsafe_access_bt, MemNode::unordered);
5185
5186 int flags = RC_LEAF | RC_NO_FP;
5187
5188 const TypePtr* dst_type = TypePtr::BOTTOM;
5189
5190 // Adjust memory effects of the runtime call based on input values.
5191 if (!has_wide_mem(_gvn, dst_addr, dst_base)) {
5192 dst_type = _gvn.type(dst_addr)->is_ptr(); // narrow out memory
5193
5194 flags |= RC_NARROW_MEM; // narrow in memory
5195 }
5196
5197 // Call it. Note that the length argument is not scaled.
5198 make_runtime_call(flags,
5199 OptoRuntime::unsafe_setmemory_Type(),
5200 StubRoutines::unsafe_setmemory(),
5201 "unsafe_setmemory",
5202 dst_type,
5203 dst_addr, size XTOP, byte);
5204
5205 store_to_memory(control(), doing_unsafe_access_addr, intcon(0), doing_unsafe_access_bt, MemNode::unordered);
5206
5207 return true;
5208 }
5209
5210 #undef XTOP
5211
5212 //------------------------clone_coping-----------------------------------
5213 // Helper function for inline_native_clone.
5214 void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array) {
5215 assert(obj_size != nullptr, "");
5216 Node* raw_obj = alloc_obj->in(1);
5217 assert(alloc_obj->is_CheckCastPP() && raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
5218
5219 AllocateNode* alloc = nullptr;
5220 if (ReduceBulkZeroing &&
5221 // If we are implementing an array clone without knowing its source type
5222 // (can happen when compiling the array-guarded branch of a reflective
5223 // Object.clone() invocation), initialize the array within the allocation.
5224 // This is needed because some GCs (e.g. ZGC) might fall back in this case
5225 // to a runtime clone call that assumes fully initialized source arrays.
5226 (!is_array || obj->get_ptr_type()->isa_aryptr() != nullptr)) {
5227 // We will be completely responsible for initializing this object -
5228 // mark Initialize node as complete.
5229 alloc = AllocateNode::Ideal_allocation(alloc_obj);
5230 // The object was just allocated - there should be no any stores!
5231 guarantee(alloc != nullptr && alloc->maybe_set_complete(&_gvn), "");
5232 // Mark as complete_with_arraycopy so that on AllocateNode
5233 // expansion, we know this AllocateNode is initialized by an array
5234 // copy and a StoreStore barrier exists after the array copy.
5235 alloc->initialization()->set_complete_with_arraycopy();
5236 }
5237
5238 Node* size = _gvn.transform(obj_size);
5239 access_clone(obj, alloc_obj, size, is_array);
5240
5241 // Do not let reads from the cloned object float above the arraycopy.
5242 if (alloc != nullptr) {
5243 // Do not let stores that initialize this object be reordered with
5244 // a subsequent store that would make this object accessible by
5245 // other threads.
5246 // Record what AllocateNode this StoreStore protects so that
5247 // escape analysis can go from the MemBarStoreStoreNode to the
5248 // AllocateNode and eliminate the MemBarStoreStoreNode if possible
5249 // based on the escape status of the AllocateNode.
5250 insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
5251 } else {
5252 insert_mem_bar(Op_MemBarCPUOrder);
5253 }
5254 }
5255
5256 //------------------------inline_native_clone----------------------------
5257 // protected native Object java.lang.Object.clone();
5258 //
5259 // Here are the simple edge cases:
5260 // null receiver => normal trap
5261 // virtual and clone was overridden => slow path to out-of-line clone
5262 // not cloneable or finalizer => slow path to out-of-line Object.clone
5263 //
5264 // The general case has two steps, allocation and copying.
5265 // Allocation has two cases, and uses GraphKit::new_instance or new_array.
5266 //
5267 // Copying also has two cases, oop arrays and everything else.
5268 // Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
5269 // Everything else uses the tight inline loop supplied by CopyArrayNode.
5270 //
5271 // These steps fold up nicely if and when the cloned object's klass
5272 // can be sharply typed as an object array, a type array, or an instance.
5273 //
5274 bool LibraryCallKit::inline_native_clone(bool is_virtual) {
5275 PhiNode* result_val;
5276
5277 // Set the reexecute bit for the interpreter to reexecute
5278 // the bytecode that invokes Object.clone if deoptimization happens.
5279 { PreserveReexecuteState preexecs(this);
5280 jvms()->set_should_reexecute(true);
5281
5282 Node* obj = null_check_receiver();
5283 if (stopped()) return true;
5284
5285 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
5286
5287 // If we are going to clone an instance, we need its exact type to
5288 // know the number and types of fields to convert the clone to
5289 // loads/stores. Maybe a speculative type can help us.
5290 if (!obj_type->klass_is_exact() &&
5291 obj_type->speculative_type() != nullptr &&
5292 obj_type->speculative_type()->is_instance_klass()) {
5293 ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass();
5294 if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem &&
5295 !spec_ik->has_injected_fields()) {
5296 if (!obj_type->isa_instptr() ||
5297 obj_type->is_instptr()->instance_klass()->has_subklass()) {
5298 obj = maybe_cast_profiled_obj(obj, obj_type->speculative_type(), false);
5299 }
5300 }
5301 }
5302
5303 // Conservatively insert a memory barrier on all memory slices.
5304 // Do not let writes into the original float below the clone.
5305 insert_mem_bar(Op_MemBarCPUOrder);
5306
5307 // paths into result_reg:
5308 enum {
5309 _slow_path = 1, // out-of-line call to clone method (virtual or not)
5310 _objArray_path, // plain array allocation, plus arrayof_oop_arraycopy
5311 _array_path, // plain array allocation, plus arrayof_long_arraycopy
5312 _instance_path, // plain instance allocation, plus arrayof_long_arraycopy
5313 PATH_LIMIT
5314 };
5315 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
5316 result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
5317 PhiNode* result_i_o = new PhiNode(result_reg, Type::ABIO);
5318 PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
5319 record_for_igvn(result_reg);
5320
5321 Node* obj_klass = load_object_klass(obj);
5322 Node* array_obj = obj;
5323 Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)nullptr, &array_obj);
5324 if (array_ctl != nullptr) {
5325 // It's an array.
5326 PreserveJVMState pjvms(this);
5327 set_control(array_ctl);
5328 Node* obj_length = load_array_length(array_obj);
5329 Node* array_size = nullptr; // Size of the array without object alignment padding.
5330 Node* alloc_obj = new_array(obj_klass, obj_length, 0, &array_size, /*deoptimize_on_exception=*/true);
5331
5332 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
5333 if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Parsing)) {
5334 // If it is an oop array, it requires very special treatment,
5335 // because gc barriers are required when accessing the array.
5336 Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)nullptr);
5337 if (is_obja != nullptr) {
5338 PreserveJVMState pjvms2(this);
5339 set_control(is_obja);
5340 // Generate a direct call to the right arraycopy function(s).
5341 // Clones are always tightly coupled.
5342 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, array_obj, intcon(0), alloc_obj, intcon(0), obj_length, true, false);
5343 ac->set_clone_oop_array();
5344 Node* n = _gvn.transform(ac);
5345 assert(n == ac, "cannot disappear");
5346 ac->connect_outputs(this, /*deoptimize_on_exception=*/true);
5347
5348 result_reg->init_req(_objArray_path, control());
5349 result_val->init_req(_objArray_path, alloc_obj);
5350 result_i_o ->set_req(_objArray_path, i_o());
5351 result_mem ->set_req(_objArray_path, reset_memory());
5352 }
5353 }
5354 // Otherwise, there are no barriers to worry about.
5355 // (We can dispense with card marks if we know the allocation
5356 // comes out of eden (TLAB)... In fact, ReduceInitialCardMarks
5357 // causes the non-eden paths to take compensating steps to
5358 // simulate a fresh allocation, so that no further
5359 // card marks are required in compiled code to initialize
5360 // the object.)
5361
5362 if (!stopped()) {
5363 copy_to_clone(array_obj, alloc_obj, array_size, true);
5364
5365 // Present the results of the copy.
5366 result_reg->init_req(_array_path, control());
5367 result_val->init_req(_array_path, alloc_obj);
5368 result_i_o ->set_req(_array_path, i_o());
5369 result_mem ->set_req(_array_path, reset_memory());
5370 }
5371 }
5372
5373 // We only go to the instance fast case code if we pass a number of guards.
5374 // The paths which do not pass are accumulated in the slow_region.
5375 RegionNode* slow_region = new RegionNode(1);
5376 record_for_igvn(slow_region);
5377 if (!stopped()) {
5378 // It's an instance (we did array above). Make the slow-path tests.
5379 // If this is a virtual call, we generate a funny guard. We grab
5380 // the vtable entry corresponding to clone() from the target object.
5381 // If the target method which we are calling happens to be the
5382 // Object clone() method, we pass the guard. We do not need this
5383 // guard for non-virtual calls; the caller is known to be the native
5384 // Object clone().
5385 if (is_virtual) {
5386 generate_virtual_guard(obj_klass, slow_region);
5387 }
5388
5389 // The object must be easily cloneable and must not have a finalizer.
5390 // Both of these conditions may be checked in a single test.
5391 // We could optimize the test further, but we don't care.
5392 generate_misc_flags_guard(obj_klass,
5393 // Test both conditions:
5394 KlassFlags::_misc_is_cloneable_fast | KlassFlags::_misc_has_finalizer,
5395 // Must be cloneable but not finalizer:
5396 KlassFlags::_misc_is_cloneable_fast,
5397 slow_region);
5398 }
5399
5400 if (!stopped()) {
5401 // It's an instance, and it passed the slow-path tests.
5402 PreserveJVMState pjvms(this);
5403 Node* obj_size = nullptr; // Total object size, including object alignment padding.
5404 // Need to deoptimize on exception from allocation since Object.clone intrinsic
5405 // is reexecuted if deoptimization occurs and there could be problems when merging
5406 // exception state between multiple Object.clone versions (reexecute=true vs reexecute=false).
5407 Node* alloc_obj = new_instance(obj_klass, nullptr, &obj_size, /*deoptimize_on_exception=*/true);
5408
5409 copy_to_clone(obj, alloc_obj, obj_size, false);
5410
5411 // Present the results of the slow call.
5412 result_reg->init_req(_instance_path, control());
5413 result_val->init_req(_instance_path, alloc_obj);
5414 result_i_o ->set_req(_instance_path, i_o());
5415 result_mem ->set_req(_instance_path, reset_memory());
5416 }
5417
5418 // Generate code for the slow case. We make a call to clone().
5419 set_control(_gvn.transform(slow_region));
5420 if (!stopped()) {
5421 PreserveJVMState pjvms(this);
5422 CallJavaNode* slow_call = generate_method_call(vmIntrinsics::_clone, is_virtual, false, true);
5423 // We need to deoptimize on exception (see comment above)
5424 Node* slow_result = set_results_for_java_call(slow_call, false, /* deoptimize */ true);
5425 // this->control() comes from set_results_for_java_call
5426 result_reg->init_req(_slow_path, control());
5427 result_val->init_req(_slow_path, slow_result);
5428 result_i_o ->set_req(_slow_path, i_o());
5429 result_mem ->set_req(_slow_path, reset_memory());
5430 }
5431
5432 // Return the combined state.
5433 set_control( _gvn.transform(result_reg));
5434 set_i_o( _gvn.transform(result_i_o));
5435 set_all_memory( _gvn.transform(result_mem));
5436 } // original reexecute is set back here
5437
5438 set_result(_gvn.transform(result_val));
5439 return true;
5440 }
5441
5442 // If we have a tightly coupled allocation, the arraycopy may take care
5443 // of the array initialization. If one of the guards we insert between
5444 // the allocation and the arraycopy causes a deoptimization, an
5445 // uninitialized array will escape the compiled method. To prevent that
5446 // we set the JVM state for uncommon traps between the allocation and
5447 // the arraycopy to the state before the allocation so, in case of
5448 // deoptimization, we'll reexecute the allocation and the
5449 // initialization.
5450 JVMState* LibraryCallKit::arraycopy_restore_alloc_state(AllocateArrayNode* alloc, int& saved_reexecute_sp) {
5451 if (alloc != nullptr) {
5452 ciMethod* trap_method = alloc->jvms()->method();
5453 int trap_bci = alloc->jvms()->bci();
5454
5455 if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
5456 !C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_null_check)) {
5457 // Make sure there's no store between the allocation and the
5458 // arraycopy otherwise visible side effects could be rexecuted
5459 // in case of deoptimization and cause incorrect execution.
5460 bool no_interfering_store = true;
5461 Node* mem = alloc->in(TypeFunc::Memory);
5462 if (mem->is_MergeMem()) {
5463 for (MergeMemStream mms(merged_memory(), mem->as_MergeMem()); mms.next_non_empty2(); ) {
5464 Node* n = mms.memory();
5465 if (n != mms.memory2() && !(n->is_Proj() && n->in(0) == alloc->initialization())) {
5466 assert(n->is_Store(), "what else?");
5467 no_interfering_store = false;
5468 break;
5469 }
5470 }
5471 } else {
5472 for (MergeMemStream mms(merged_memory()); mms.next_non_empty(); ) {
5473 Node* n = mms.memory();
5474 if (n != mem && !(n->is_Proj() && n->in(0) == alloc->initialization())) {
5475 assert(n->is_Store(), "what else?");
5476 no_interfering_store = false;
5477 break;
5478 }
5479 }
5480 }
5481
5482 if (no_interfering_store) {
5483 SafePointNode* sfpt = create_safepoint_with_state_before_array_allocation(alloc);
5484
5485 JVMState* saved_jvms = jvms();
5486 saved_reexecute_sp = _reexecute_sp;
5487
5488 set_jvms(sfpt->jvms());
5489 _reexecute_sp = jvms()->sp();
5490
5491 return saved_jvms;
5492 }
5493 }
5494 }
5495 return nullptr;
5496 }
5497
5498 // Clone the JVMState of the array allocation and create a new safepoint with it. Re-push the array length to the stack
5499 // such that uncommon traps can be emitted to re-execute the array allocation in the interpreter.
5500 SafePointNode* LibraryCallKit::create_safepoint_with_state_before_array_allocation(const AllocateArrayNode* alloc) const {
5501 JVMState* old_jvms = alloc->jvms()->clone_shallow(C);
5502 uint size = alloc->req();
5503 SafePointNode* sfpt = new SafePointNode(size, old_jvms);
5504 old_jvms->set_map(sfpt);
5505 for (uint i = 0; i < size; i++) {
5506 sfpt->init_req(i, alloc->in(i));
5507 }
5508 // re-push array length for deoptimization
5509 sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp(), alloc->in(AllocateNode::ALength));
5510 old_jvms->set_sp(old_jvms->sp()+1);
5511 old_jvms->set_monoff(old_jvms->monoff()+1);
5512 old_jvms->set_scloff(old_jvms->scloff()+1);
5513 old_jvms->set_endoff(old_jvms->endoff()+1);
5514 old_jvms->set_should_reexecute(true);
5515
5516 sfpt->set_i_o(map()->i_o());
5517 sfpt->set_memory(map()->memory());
5518 sfpt->set_control(map()->control());
5519 return sfpt;
5520 }
5521
5522 // In case of a deoptimization, we restart execution at the
5523 // allocation, allocating a new array. We would leave an uninitialized
5524 // array in the heap that GCs wouldn't expect. Move the allocation
5525 // after the traps so we don't allocate the array if we
5526 // deoptimize. This is possible because tightly_coupled_allocation()
5527 // guarantees there's no observer of the allocated array at this point
5528 // and the control flow is simple enough.
5529 void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms_before_guards,
5530 int saved_reexecute_sp, uint new_idx) {
5531 if (saved_jvms_before_guards != nullptr && !stopped()) {
5532 replace_unrelated_uncommon_traps_with_alloc_state(alloc, saved_jvms_before_guards);
5533
5534 assert(alloc != nullptr, "only with a tightly coupled allocation");
5535 // restore JVM state to the state at the arraycopy
5536 saved_jvms_before_guards->map()->set_control(map()->control());
5537 assert(saved_jvms_before_guards->map()->memory() == map()->memory(), "memory state changed?");
5538 assert(saved_jvms_before_guards->map()->i_o() == map()->i_o(), "IO state changed?");
5539 // If we've improved the types of some nodes (null check) while
5540 // emitting the guards, propagate them to the current state
5541 map()->replaced_nodes().apply(saved_jvms_before_guards->map(), new_idx);
5542 set_jvms(saved_jvms_before_guards);
5543 _reexecute_sp = saved_reexecute_sp;
5544
5545 // Remove the allocation from above the guards
5546 CallProjections callprojs;
5547 alloc->extract_projections(&callprojs, true);
5548 InitializeNode* init = alloc->initialization();
5549 Node* alloc_mem = alloc->in(TypeFunc::Memory);
5550 C->gvn_replace_by(callprojs.fallthrough_ioproj, alloc->in(TypeFunc::I_O));
5551 init->replace_mem_projs_by(alloc_mem, C);
5552
5553 // The CastIINode created in GraphKit::new_array (in AllocateArrayNode::make_ideal_length) must stay below
5554 // the allocation (i.e. is only valid if the allocation succeeds):
5555 // 1) replace CastIINode with AllocateArrayNode's length here
5556 // 2) Create CastIINode again once allocation has moved (see below) at the end of this method
5557 //
5558 // Multiple identical CastIINodes might exist here. Each GraphKit::load_array_length() call will generate
5559 // new separate CastIINode (arraycopy guard checks or any array length use between array allocation and ararycopy)
5560 Node* init_control = init->proj_out(TypeFunc::Control);
5561 Node* alloc_length = alloc->Ideal_length();
5562 #ifdef ASSERT
5563 Node* prev_cast = nullptr;
5564 #endif
5565 for (uint i = 0; i < init_control->outcnt(); i++) {
5566 Node* init_out = init_control->raw_out(i);
5567 if (init_out->is_CastII() && init_out->in(TypeFunc::Control) == init_control && init_out->in(1) == alloc_length) {
5568 #ifdef ASSERT
5569 if (prev_cast == nullptr) {
5570 prev_cast = init_out;
5571 } else {
5572 if (prev_cast->cmp(*init_out) == false) {
5573 prev_cast->dump();
5574 init_out->dump();
5575 assert(false, "not equal CastIINode");
5576 }
5577 }
5578 #endif
5579 C->gvn_replace_by(init_out, alloc_length);
5580 }
5581 }
5582 C->gvn_replace_by(init->proj_out(TypeFunc::Control), alloc->in(0));
5583
5584 // move the allocation here (after the guards)
5585 _gvn.hash_delete(alloc);
5586 alloc->set_req(TypeFunc::Control, control());
5587 alloc->set_req(TypeFunc::I_O, i_o());
5588 Node *mem = reset_memory();
5589 set_all_memory(mem);
5590 alloc->set_req(TypeFunc::Memory, mem);
5591 set_control(init->proj_out_or_null(TypeFunc::Control));
5592 set_i_o(callprojs.fallthrough_ioproj);
5593
5594 // Update memory as done in GraphKit::set_output_for_allocation()
5595 const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
5596 const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
5597 if (ary_type->isa_aryptr() && length_type != nullptr) {
5598 ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
5599 }
5600 const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot);
5601 int elemidx = C->get_alias_index(telemref);
5602 // Need to properly move every memory projection for the Initialize
5603 #ifdef ASSERT
5604 int mark_idx = C->get_alias_index(ary_type->add_offset(oopDesc::mark_offset_in_bytes()));
5605 int klass_idx = C->get_alias_index(ary_type->add_offset(oopDesc::klass_offset_in_bytes()));
5606 #endif
5607 auto move_proj = [&](ProjNode* proj) {
5608 int alias_idx = C->get_alias_index(proj->adr_type());
5609 assert(alias_idx == Compile::AliasIdxRaw ||
5610 alias_idx == elemidx ||
5611 alias_idx == mark_idx ||
5612 alias_idx == klass_idx, "should be raw memory or array element type");
5613 set_memory(proj, alias_idx);
5614 };
5615 init->for_each_proj(move_proj, TypeFunc::Memory);
5616
5617 Node* allocx = _gvn.transform(alloc);
5618 assert(allocx == alloc, "where has the allocation gone?");
5619 assert(dest->is_CheckCastPP(), "not an allocation result?");
5620
5621 _gvn.hash_delete(dest);
5622 dest->set_req(0, control());
5623 Node* destx = _gvn.transform(dest);
5624 assert(destx == dest, "where has the allocation result gone?");
5625
5626 array_ideal_length(alloc, ary_type, true);
5627 }
5628 }
5629
5630 // Unrelated UCTs between the array allocation and the array copy, which are considered safe by tightly_coupled_allocation(),
5631 // need to be replaced by an UCT with a state before the array allocation (including the array length). This is necessary
5632 // because we could hit one of these UCTs (which are executed before the emitted array copy guards and the actual array
5633 // allocation which is moved down in arraycopy_move_allocation_here()). When later resuming execution in the interpreter,
5634 // we would have wrongly skipped the array allocation. To prevent this, we resume execution at the array allocation in
5635 // the interpreter similar to what we are doing for the newly emitted guards for the array copy.
5636 void LibraryCallKit::replace_unrelated_uncommon_traps_with_alloc_state(AllocateArrayNode* alloc,
5637 JVMState* saved_jvms_before_guards) {
5638 if (saved_jvms_before_guards->map()->control()->is_IfProj()) {
5639 // There is at least one unrelated uncommon trap which needs to be replaced.
5640 SafePointNode* sfpt = create_safepoint_with_state_before_array_allocation(alloc);
5641
5642 JVMState* saved_jvms = jvms();
5643 const int saved_reexecute_sp = _reexecute_sp;
5644 set_jvms(sfpt->jvms());
5645 _reexecute_sp = jvms()->sp();
5646
5647 replace_unrelated_uncommon_traps_with_alloc_state(saved_jvms_before_guards);
5648
5649 // Restore state
5650 set_jvms(saved_jvms);
5651 _reexecute_sp = saved_reexecute_sp;
5652 }
5653 }
5654
5655 // Replace the unrelated uncommon traps with new uncommon trap nodes by reusing the action and reason. The new uncommon
5656 // traps will have the state of the array allocation. Let the old uncommon trap nodes die.
5657 void LibraryCallKit::replace_unrelated_uncommon_traps_with_alloc_state(JVMState* saved_jvms_before_guards) {
5658 Node* if_proj = saved_jvms_before_guards->map()->control(); // Start the search right before the newly emitted guards
5659 while (if_proj->is_IfProj()) {
5660 CallStaticJavaNode* uncommon_trap = get_uncommon_trap_from_success_proj(if_proj);
5661 if (uncommon_trap != nullptr) {
5662 create_new_uncommon_trap(uncommon_trap);
5663 }
5664 assert(if_proj->in(0)->is_If(), "must be If");
5665 if_proj = if_proj->in(0)->in(0);
5666 }
5667 assert(if_proj->is_Proj() && if_proj->in(0)->is_Initialize(),
5668 "must have reached control projection of init node");
5669 }
5670
5671 void LibraryCallKit::create_new_uncommon_trap(CallStaticJavaNode* uncommon_trap_call) {
5672 const int trap_request = uncommon_trap_call->uncommon_trap_request();
5673 assert(trap_request != 0, "no valid UCT trap request");
5674 PreserveJVMState pjvms(this);
5675 set_control(uncommon_trap_call->in(0));
5676 uncommon_trap(Deoptimization::trap_request_reason(trap_request),
5677 Deoptimization::trap_request_action(trap_request));
5678 assert(stopped(), "Should be stopped");
5679 _gvn.hash_delete(uncommon_trap_call);
5680 uncommon_trap_call->set_req(0, top()); // not used anymore, kill it
5681 }
5682
5683 // Common checks for array sorting intrinsics arguments.
5684 // Returns `true` if checks passed.
5685 bool LibraryCallKit::check_array_sort_arguments(Node* elementType, Node* obj, BasicType& bt) {
5686 // check address of the class
5687 if (elementType == nullptr || elementType->is_top()) {
5688 return false; // dead path
5689 }
5690 const TypeInstPtr* elem_klass = gvn().type(elementType)->isa_instptr();
5691 if (elem_klass == nullptr) {
5692 return false; // dead path
5693 }
5694 // java_mirror_type() returns non-null for compile-time Class constants only
5695 ciType* elem_type = elem_klass->java_mirror_type();
5696 if (elem_type == nullptr) {
5697 return false;
5698 }
5699 bt = elem_type->basic_type();
5700 // Disable the intrinsic if the CPU does not support SIMD sort
5701 if (!Matcher::supports_simd_sort(bt)) {
5702 return false;
5703 }
5704 // check address of the array
5705 if (obj == nullptr || obj->is_top()) {
5706 return false; // dead path
5707 }
5708 const TypeAryPtr* obj_t = _gvn.type(obj)->isa_aryptr();
5709 if (obj_t == nullptr || obj_t->elem() == Type::BOTTOM) {
5710 return false; // failed input validation
5711 }
5712 return true;
5713 }
5714
5715 //------------------------------inline_array_partition-----------------------
5716 bool LibraryCallKit::inline_array_partition() {
5717 address stubAddr = StubRoutines::select_array_partition_function();
5718 if (stubAddr == nullptr) {
5719 return false; // Intrinsic's stub is not implemented on this platform
5720 }
5721 assert(callee()->signature()->size() == 9, "arrayPartition has 8 parameters (one long)");
5722
5723 // no receiver because it is a static method
5724 Node* elementType = argument(0);
5725 Node* obj = argument(1);
5726 Node* offset = argument(2); // long
5727 Node* fromIndex = argument(4);
5728 Node* toIndex = argument(5);
5729 Node* indexPivot1 = argument(6);
5730 Node* indexPivot2 = argument(7);
5731 // PartitionOperation: argument(8) is ignored
5732
5733 Node* pivotIndices = nullptr;
5734 BasicType bt = T_ILLEGAL;
5735
5736 if (!check_array_sort_arguments(elementType, obj, bt)) {
5737 return false;
5738 }
5739 null_check(obj);
5740 // If obj is dead, only null-path is taken.
5741 if (stopped()) {
5742 return true;
5743 }
5744 // Set the original stack and the reexecute bit for the interpreter to reexecute
5745 // the bytecode that invokes DualPivotQuicksort.partition() if deoptimization happens.
5746 { PreserveReexecuteState preexecs(this);
5747 jvms()->set_should_reexecute(true);
5748
5749 Node* obj_adr = make_unsafe_address(obj, offset);
5750
5751 // create the pivotIndices array of type int and size = 2
5752 Node* size = intcon(2);
5753 Node* klass_node = makecon(TypeKlassPtr::make(ciTypeArrayKlass::make(T_INT)));
5754 pivotIndices = new_array(klass_node, size, 0); // no arguments to push
5755 AllocateArrayNode* alloc = tightly_coupled_allocation(pivotIndices);
5756 guarantee(alloc != nullptr, "created above");
5757 Node* pivotIndices_adr = basic_plus_adr(pivotIndices, arrayOopDesc::base_offset_in_bytes(T_INT));
5758
5759 // pass the basic type enum to the stub
5760 Node* elemType = intcon(bt);
5761
5762 // Call the stub
5763 const char *stubName = "array_partition_stub";
5764 make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::array_partition_Type(),
5765 stubAddr, stubName, TypePtr::BOTTOM,
5766 obj_adr, elemType, fromIndex, toIndex, pivotIndices_adr,
5767 indexPivot1, indexPivot2);
5768
5769 } // original reexecute is set back here
5770
5771 if (!stopped()) {
5772 set_result(pivotIndices);
5773 }
5774
5775 return true;
5776 }
5777
5778
5779 //------------------------------inline_array_sort-----------------------
5780 bool LibraryCallKit::inline_array_sort() {
5781 address stubAddr = StubRoutines::select_arraysort_function();
5782 if (stubAddr == nullptr) {
5783 return false; // Intrinsic's stub is not implemented on this platform
5784 }
5785 assert(callee()->signature()->size() == 7, "arraySort has 6 parameters (one long)");
5786
5787 // no receiver because it is a static method
5788 Node* elementType = argument(0);
5789 Node* obj = argument(1);
5790 Node* offset = argument(2); // long
5791 Node* fromIndex = argument(4);
5792 Node* toIndex = argument(5);
5793 // SortOperation: argument(6) is ignored
5794
5795 BasicType bt = T_ILLEGAL;
5796
5797 if (!check_array_sort_arguments(elementType, obj, bt)) {
5798 return false;
5799 }
5800 null_check(obj);
5801 // If obj is dead, only null-path is taken.
5802 if (stopped()) {
5803 return true;
5804 }
5805 Node* obj_adr = make_unsafe_address(obj, offset);
5806
5807 // pass the basic type enum to the stub
5808 Node* elemType = intcon(bt);
5809
5810 // Call the stub.
5811 const char *stubName = "arraysort_stub";
5812 make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::array_sort_Type(),
5813 stubAddr, stubName, TypePtr::BOTTOM,
5814 obj_adr, elemType, fromIndex, toIndex);
5815
5816 return true;
5817 }
5818
5819
5820 //------------------------------inline_arraycopy-----------------------
5821 // public static native void java.lang.System.arraycopy(Object src, int srcPos,
5822 // Object dest, int destPos,
5823 // int length);
5824 bool LibraryCallKit::inline_arraycopy() {
5825 // Get the arguments.
5826 Node* src = argument(0); // type: oop
5827 Node* src_offset = argument(1); // type: int
5828 Node* dest = argument(2); // type: oop
5829 Node* dest_offset = argument(3); // type: int
5830 Node* length = argument(4); // type: int
5831
5832 uint new_idx = C->unique();
5833
5834 // Check for allocation before we add nodes that would confuse
5835 // tightly_coupled_allocation()
5836 AllocateArrayNode* alloc = tightly_coupled_allocation(dest);
5837
5838 int saved_reexecute_sp = -1;
5839 JVMState* saved_jvms_before_guards = arraycopy_restore_alloc_state(alloc, saved_reexecute_sp);
5840 // See arraycopy_restore_alloc_state() comment
5841 // if alloc == null we don't have to worry about a tightly coupled allocation so we can emit all needed guards
5842 // if saved_jvms_before_guards is not null (then alloc is not null) then we can handle guards and a tightly coupled allocation
5843 // if saved_jvms_before_guards is null and alloc is not null, we can't emit any guards
5844 bool can_emit_guards = (alloc == nullptr || saved_jvms_before_guards != nullptr);
5845
5846 // The following tests must be performed
5847 // (1) src and dest are arrays.
5848 // (2) src and dest arrays must have elements of the same BasicType
5849 // (3) src and dest must not be null.
5850 // (4) src_offset must not be negative.
5851 // (5) dest_offset must not be negative.
5852 // (6) length must not be negative.
5853 // (7) src_offset + length must not exceed length of src.
5854 // (8) dest_offset + length must not exceed length of dest.
5855 // (9) each element of an oop array must be assignable
5856
5857 // (3) src and dest must not be null.
5858 // always do this here because we need the JVM state for uncommon traps
5859 Node* null_ctl = top();
5860 src = saved_jvms_before_guards != nullptr ? null_check_oop(src, &null_ctl, true, true) : null_check(src, T_ARRAY);
5861 assert(null_ctl->is_top(), "no null control here");
5862 dest = null_check(dest, T_ARRAY);
5863
5864 if (!can_emit_guards) {
5865 // if saved_jvms_before_guards is null and alloc is not null, we don't emit any
5866 // guards but the arraycopy node could still take advantage of a
5867 // tightly allocated allocation. tightly_coupled_allocation() is
5868 // called again to make sure it takes the null check above into
5869 // account: the null check is mandatory and if it caused an
5870 // uncommon trap to be emitted then the allocation can't be
5871 // considered tightly coupled in this context.
5872 alloc = tightly_coupled_allocation(dest);
5873 }
5874
5875 bool validated = false;
5876
5877 const Type* src_type = _gvn.type(src);
5878 const Type* dest_type = _gvn.type(dest);
5879 const TypeAryPtr* top_src = src_type->isa_aryptr();
5880 const TypeAryPtr* top_dest = dest_type->isa_aryptr();
5881
5882 // Do we have the type of src?
5883 bool has_src = (top_src != nullptr && top_src->elem() != Type::BOTTOM);
5884 // Do we have the type of dest?
5885 bool has_dest = (top_dest != nullptr && top_dest->elem() != Type::BOTTOM);
5886 // Is the type for src from speculation?
5887 bool src_spec = false;
5888 // Is the type for dest from speculation?
5889 bool dest_spec = false;
5890
5891 if ((!has_src || !has_dest) && can_emit_guards) {
5892 // We don't have sufficient type information, let's see if
5893 // speculative types can help. We need to have types for both src
5894 // and dest so that it pays off.
5895
5896 // Do we already have or could we have type information for src
5897 bool could_have_src = has_src;
5898 // Do we already have or could we have type information for dest
5899 bool could_have_dest = has_dest;
5900
5901 ciKlass* src_k = nullptr;
5902 if (!has_src) {
5903 src_k = src_type->speculative_type_not_null();
5904 if (src_k != nullptr && src_k->is_array_klass()) {
5905 could_have_src = true;
5906 }
5907 }
5908
5909 ciKlass* dest_k = nullptr;
5910 if (!has_dest) {
5911 dest_k = dest_type->speculative_type_not_null();
5912 if (dest_k != nullptr && dest_k->is_array_klass()) {
5913 could_have_dest = true;
5914 }
5915 }
5916
5917 if (could_have_src && could_have_dest) {
5918 // This is going to pay off so emit the required guards
5919 if (!has_src) {
5920 src = maybe_cast_profiled_obj(src, src_k, true);
5921 src_type = _gvn.type(src);
5922 top_src = src_type->isa_aryptr();
5923 has_src = (top_src != nullptr && top_src->elem() != Type::BOTTOM);
5924 src_spec = true;
5925 }
5926 if (!has_dest) {
5927 dest = maybe_cast_profiled_obj(dest, dest_k, true);
5928 dest_type = _gvn.type(dest);
5929 top_dest = dest_type->isa_aryptr();
5930 has_dest = (top_dest != nullptr && top_dest->elem() != Type::BOTTOM);
5931 dest_spec = true;
5932 }
5933 }
5934 }
5935
5936 if (has_src && has_dest && can_emit_guards) {
5937 BasicType src_elem = top_src->isa_aryptr()->elem()->array_element_basic_type();
5938 BasicType dest_elem = top_dest->isa_aryptr()->elem()->array_element_basic_type();
5939 if (is_reference_type(src_elem, true)) src_elem = T_OBJECT;
5940 if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT;
5941
5942 if (src_elem == dest_elem && src_elem == T_OBJECT) {
5943 // If both arrays are object arrays then having the exact types
5944 // for both will remove the need for a subtype check at runtime
5945 // before the call and may make it possible to pick a faster copy
5946 // routine (without a subtype check on every element)
5947 // Do we have the exact type of src?
5948 bool could_have_src = src_spec;
5949 // Do we have the exact type of dest?
5950 bool could_have_dest = dest_spec;
5951 ciKlass* src_k = nullptr;
5952 ciKlass* dest_k = nullptr;
5953 if (!src_spec) {
5954 src_k = src_type->speculative_type_not_null();
5955 if (src_k != nullptr && src_k->is_array_klass()) {
5956 could_have_src = true;
5957 }
5958 }
5959 if (!dest_spec) {
5960 dest_k = dest_type->speculative_type_not_null();
5961 if (dest_k != nullptr && dest_k->is_array_klass()) {
5962 could_have_dest = true;
5963 }
5964 }
5965 if (could_have_src && could_have_dest) {
5966 // If we can have both exact types, emit the missing guards
5967 if (could_have_src && !src_spec) {
5968 src = maybe_cast_profiled_obj(src, src_k, true);
5969 }
5970 if (could_have_dest && !dest_spec) {
5971 dest = maybe_cast_profiled_obj(dest, dest_k, true);
5972 }
5973 }
5974 }
5975 }
5976
5977 ciMethod* trap_method = method();
5978 int trap_bci = bci();
5979 if (saved_jvms_before_guards != nullptr) {
5980 trap_method = alloc->jvms()->method();
5981 trap_bci = alloc->jvms()->bci();
5982 }
5983
5984 bool negative_length_guard_generated = false;
5985
5986 if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
5987 can_emit_guards &&
5988 !src->is_top() && !dest->is_top()) {
5989 // validate arguments: enables transformation the ArrayCopyNode
5990 validated = true;
5991
5992 RegionNode* slow_region = new RegionNode(1);
5993 record_for_igvn(slow_region);
5994
5995 // (1) src and dest are arrays.
5996 generate_non_array_guard(load_object_klass(src), slow_region, &src);
5997 generate_non_array_guard(load_object_klass(dest), slow_region, &dest);
5998
5999 // (2) src and dest arrays must have elements of the same BasicType
6000 // done at macro expansion or at Ideal transformation time
6001
6002 // (4) src_offset must not be negative.
6003 generate_negative_guard(src_offset, slow_region);
6004
6005 // (5) dest_offset must not be negative.
6006 generate_negative_guard(dest_offset, slow_region);
6007
6008 // (7) src_offset + length must not exceed length of src.
6009 generate_limit_guard(src_offset, length,
6010 load_array_length(src),
6011 slow_region);
6012
6013 // (8) dest_offset + length must not exceed length of dest.
6014 generate_limit_guard(dest_offset, length,
6015 load_array_length(dest),
6016 slow_region);
6017
6018 // (6) length must not be negative.
6019 // This is also checked in generate_arraycopy() during macro expansion, but
6020 // we also have to check it here for the case where the ArrayCopyNode will
6021 // be eliminated by Escape Analysis.
6022 if (EliminateAllocations) {
6023 generate_negative_guard(length, slow_region);
6024 negative_length_guard_generated = true;
6025 }
6026
6027 // (9) each element of an oop array must be assignable
6028 Node* dest_klass = load_object_klass(dest);
6029 if (src != dest) {
6030 Node* not_subtype_ctrl = gen_subtype_check(src, dest_klass);
6031
6032 if (not_subtype_ctrl != top()) {
6033 PreserveJVMState pjvms(this);
6034 set_control(not_subtype_ctrl);
6035 uncommon_trap(Deoptimization::Reason_intrinsic,
6036 Deoptimization::Action_make_not_entrant);
6037 assert(stopped(), "Should be stopped");
6038 }
6039 }
6040 {
6041 PreserveJVMState pjvms(this);
6042 set_control(_gvn.transform(slow_region));
6043 uncommon_trap(Deoptimization::Reason_intrinsic,
6044 Deoptimization::Action_make_not_entrant);
6045 assert(stopped(), "Should be stopped");
6046 }
6047
6048 const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr();
6049 const Type *toop = dest_klass_t->cast_to_exactness(false)->as_instance_type();
6050 src = _gvn.transform(new CheckCastPPNode(control(), src, toop));
6051 arraycopy_move_allocation_here(alloc, dest, saved_jvms_before_guards, saved_reexecute_sp, new_idx);
6052 }
6053
6054 if (stopped()) {
6055 return true;
6056 }
6057
6058 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != nullptr, negative_length_guard_generated,
6059 // Create LoadRange and LoadKlass nodes for use during macro expansion here
6060 // so the compiler has a chance to eliminate them: during macro expansion,
6061 // we have to set their control (CastPP nodes are eliminated).
6062 load_object_klass(src), load_object_klass(dest),
6063 load_array_length(src), load_array_length(dest));
6064
6065 ac->set_arraycopy(validated);
6066
6067 Node* n = _gvn.transform(ac);
6068 if (n == ac) {
6069 ac->connect_outputs(this);
6070 } else {
6071 assert(validated, "shouldn't transform if all arguments not validated");
6072 set_all_memory(n);
6073 }
6074 clear_upper_avx();
6075
6076
6077 return true;
6078 }
6079
6080
6081 // Helper function which determines if an arraycopy immediately follows
6082 // an allocation, with no intervening tests or other escapes for the object.
6083 AllocateArrayNode*
6084 LibraryCallKit::tightly_coupled_allocation(Node* ptr) {
6085 if (stopped()) return nullptr; // no fast path
6086 if (!C->do_aliasing()) return nullptr; // no MergeMems around
6087
6088 AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(ptr);
6089 if (alloc == nullptr) return nullptr;
6090
6091 Node* rawmem = memory(Compile::AliasIdxRaw);
6092 // Is the allocation's memory state untouched?
6093 if (!(rawmem->is_Proj() && rawmem->in(0)->is_Initialize())) {
6094 // Bail out if there have been raw-memory effects since the allocation.
6095 // (Example: There might have been a call or safepoint.)
6096 return nullptr;
6097 }
6098 rawmem = rawmem->in(0)->as_Initialize()->memory(Compile::AliasIdxRaw);
6099 if (!(rawmem->is_Proj() && rawmem->in(0) == alloc)) {
6100 return nullptr;
6101 }
6102
6103 // There must be no unexpected observers of this allocation.
6104 for (DUIterator_Fast imax, i = ptr->fast_outs(imax); i < imax; i++) {
6105 Node* obs = ptr->fast_out(i);
6106 if (obs != this->map()) {
6107 return nullptr;
6108 }
6109 }
6110
6111 // This arraycopy must unconditionally follow the allocation of the ptr.
6112 Node* alloc_ctl = ptr->in(0);
6113 Node* ctl = control();
6114 while (ctl != alloc_ctl) {
6115 // There may be guards which feed into the slow_region.
6116 // Any other control flow means that we might not get a chance
6117 // to finish initializing the allocated object.
6118 // Various low-level checks bottom out in uncommon traps. These
6119 // are considered safe since we've already checked above that
6120 // there is no unexpected observer of this allocation.
6121 if (get_uncommon_trap_from_success_proj(ctl) != nullptr) {
6122 assert(ctl->in(0)->is_If(), "must be If");
6123 ctl = ctl->in(0)->in(0);
6124 } else {
6125 return nullptr;
6126 }
6127 }
6128
6129 // If we get this far, we have an allocation which immediately
6130 // precedes the arraycopy, and we can take over zeroing the new object.
6131 // The arraycopy will finish the initialization, and provide
6132 // a new control state to which we will anchor the destination pointer.
6133
6134 return alloc;
6135 }
6136
6137 CallStaticJavaNode* LibraryCallKit::get_uncommon_trap_from_success_proj(Node* node) {
6138 if (node->is_IfProj()) {
6139 IfProjNode* other_proj = node->as_IfProj()->other_if_proj();
6140 for (DUIterator_Fast jmax, j = other_proj->fast_outs(jmax); j < jmax; j++) {
6141 Node* obs = other_proj->fast_out(j);
6142 if (obs->in(0) == other_proj && obs->is_CallStaticJava() &&
6143 (obs->as_CallStaticJava()->entry_point() == OptoRuntime::uncommon_trap_blob()->entry_point())) {
6144 return obs->as_CallStaticJava();
6145 }
6146 }
6147 }
6148 return nullptr;
6149 }
6150
6151 //-------------inline_encodeISOArray-----------------------------------
6152 // int sun.nio.cs.ISO_8859_1.Encoder#encodeISOArray0(byte[] sa, int sp, byte[] da, int dp, int len)
6153 // int java.lang.StringCoding#encodeISOArray0(byte[] sa, int sp, byte[] da, int dp, int len)
6154 // int java.lang.StringCoding#encodeAsciiArray0(char[] sa, int sp, byte[] da, int dp, int len)
6155 // encode char[] to byte[] in ISO_8859_1 or ASCII
6156 bool LibraryCallKit::inline_encodeISOArray(bool ascii) {
6157 assert(callee()->signature()->size() == 5, "encodeISOArray has 5 parameters");
6158 // no receiver since it is static method
6159 Node *src = argument(0);
6160 Node *src_offset = argument(1);
6161 Node *dst = argument(2);
6162 Node *dst_offset = argument(3);
6163 Node *length = argument(4);
6164
6165 // Cast source & target arrays to not-null
6166 src = must_be_not_null(src, true);
6167 dst = must_be_not_null(dst, true);
6168 if (stopped()) {
6169 return true;
6170 }
6171
6172 const TypeAryPtr* src_type = src->Value(&_gvn)->isa_aryptr();
6173 const TypeAryPtr* dst_type = dst->Value(&_gvn)->isa_aryptr();
6174 if (src_type == nullptr || src_type->elem() == Type::BOTTOM ||
6175 dst_type == nullptr || dst_type->elem() == Type::BOTTOM) {
6176 // failed array check
6177 return false;
6178 }
6179
6180 // Figure out the size and type of the elements we will be copying.
6181 BasicType src_elem = src_type->elem()->array_element_basic_type();
6182 BasicType dst_elem = dst_type->elem()->array_element_basic_type();
6183 if (!((src_elem == T_CHAR) || (src_elem== T_BYTE)) || dst_elem != T_BYTE) {
6184 return false;
6185 }
6186
6187 // Check source & target bounds
6188 generate_string_range_check(src, src_offset, length, src_elem == T_BYTE, true);
6189 generate_string_range_check(dst, dst_offset, length, false, true);
6190 if (stopped()) {
6191 return true;
6192 }
6193
6194 Node* src_start = array_element_address(src, src_offset, T_CHAR);
6195 Node* dst_start = array_element_address(dst, dst_offset, dst_elem);
6196 // 'src_start' points to src array + scaled offset
6197 // 'dst_start' points to dst array + scaled offset
6198
6199 const TypeAryPtr* mtype = TypeAryPtr::BYTES;
6200 Node* enc = new EncodeISOArrayNode(control(), memory(mtype), src_start, dst_start, length, ascii);
6201 enc = _gvn.transform(enc);
6202 Node* res_mem = _gvn.transform(new SCMemProjNode(enc));
6203 set_memory(res_mem, mtype);
6204 set_result(enc);
6205 clear_upper_avx();
6206
6207 return true;
6208 }
6209
6210 //-------------inline_multiplyToLen-----------------------------------
6211 bool LibraryCallKit::inline_multiplyToLen() {
6212 assert(UseMultiplyToLenIntrinsic, "not implemented on this platform");
6213
6214 address stubAddr = StubRoutines::multiplyToLen();
6215 if (stubAddr == nullptr) {
6216 return false; // Intrinsic's stub is not implemented on this platform
6217 }
6218 const char* stubName = "multiplyToLen";
6219
6220 assert(callee()->signature()->size() == 5, "multiplyToLen has 5 parameters");
6221
6222 // no receiver because it is a static method
6223 Node* x = argument(0);
6224 Node* xlen = argument(1);
6225 Node* y = argument(2);
6226 Node* ylen = argument(3);
6227 Node* z = argument(4);
6228
6229 x = must_be_not_null(x, true);
6230 y = must_be_not_null(y, true);
6231
6232 const TypeAryPtr* x_type = x->Value(&_gvn)->isa_aryptr();
6233 const TypeAryPtr* y_type = y->Value(&_gvn)->isa_aryptr();
6234 if (x_type == nullptr || x_type->elem() == Type::BOTTOM ||
6235 y_type == nullptr || y_type->elem() == Type::BOTTOM) {
6236 // failed array check
6237 return false;
6238 }
6239
6240 BasicType x_elem = x_type->elem()->array_element_basic_type();
6241 BasicType y_elem = y_type->elem()->array_element_basic_type();
6242 if (x_elem != T_INT || y_elem != T_INT) {
6243 return false;
6244 }
6245
6246 Node* x_start = array_element_address(x, intcon(0), x_elem);
6247 Node* y_start = array_element_address(y, intcon(0), y_elem);
6248 // 'x_start' points to x array + scaled xlen
6249 // 'y_start' points to y array + scaled ylen
6250
6251 Node* z_start = array_element_address(z, intcon(0), T_INT);
6252
6253 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
6254 OptoRuntime::multiplyToLen_Type(),
6255 stubAddr, stubName, TypePtr::BOTTOM,
6256 x_start, xlen, y_start, ylen, z_start);
6257
6258 C->set_has_split_ifs(true); // Has chance for split-if optimization
6259 set_result(z);
6260 return true;
6261 }
6262
6263 //-------------inline_squareToLen------------------------------------
6264 bool LibraryCallKit::inline_squareToLen() {
6265 assert(UseSquareToLenIntrinsic, "not implemented on this platform");
6266
6267 address stubAddr = StubRoutines::squareToLen();
6268 if (stubAddr == nullptr) {
6269 return false; // Intrinsic's stub is not implemented on this platform
6270 }
6271 const char* stubName = "squareToLen";
6272
6273 assert(callee()->signature()->size() == 4, "implSquareToLen has 4 parameters");
6274
6275 Node* x = argument(0);
6276 Node* len = argument(1);
6277 Node* z = argument(2);
6278 Node* zlen = argument(3);
6279
6280 x = must_be_not_null(x, true);
6281 z = must_be_not_null(z, true);
6282
6283 const TypeAryPtr* x_type = x->Value(&_gvn)->isa_aryptr();
6284 const TypeAryPtr* z_type = z->Value(&_gvn)->isa_aryptr();
6285 if (x_type == nullptr || x_type->elem() == Type::BOTTOM ||
6286 z_type == nullptr || z_type->elem() == Type::BOTTOM) {
6287 // failed array check
6288 return false;
6289 }
6290
6291 BasicType x_elem = x_type->elem()->array_element_basic_type();
6292 BasicType z_elem = z_type->elem()->array_element_basic_type();
6293 if (x_elem != T_INT || z_elem != T_INT) {
6294 return false;
6295 }
6296
6297
6298 Node* x_start = array_element_address(x, intcon(0), x_elem);
6299 Node* z_start = array_element_address(z, intcon(0), z_elem);
6300
6301 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
6302 OptoRuntime::squareToLen_Type(),
6303 stubAddr, stubName, TypePtr::BOTTOM,
6304 x_start, len, z_start, zlen);
6305
6306 set_result(z);
6307 return true;
6308 }
6309
6310 //-------------inline_mulAdd------------------------------------------
6311 bool LibraryCallKit::inline_mulAdd() {
6312 assert(UseMulAddIntrinsic, "not implemented on this platform");
6313
6314 address stubAddr = StubRoutines::mulAdd();
6315 if (stubAddr == nullptr) {
6316 return false; // Intrinsic's stub is not implemented on this platform
6317 }
6318 const char* stubName = "mulAdd";
6319
6320 assert(callee()->signature()->size() == 5, "mulAdd has 5 parameters");
6321
6322 Node* out = argument(0);
6323 Node* in = argument(1);
6324 Node* offset = argument(2);
6325 Node* len = argument(3);
6326 Node* k = argument(4);
6327
6328 in = must_be_not_null(in, true);
6329 out = must_be_not_null(out, true);
6330
6331 const TypeAryPtr* out_type = out->Value(&_gvn)->isa_aryptr();
6332 const TypeAryPtr* in_type = in->Value(&_gvn)->isa_aryptr();
6333 if (out_type == nullptr || out_type->elem() == Type::BOTTOM ||
6334 in_type == nullptr || in_type->elem() == Type::BOTTOM) {
6335 // failed array check
6336 return false;
6337 }
6338
6339 BasicType out_elem = out_type->elem()->array_element_basic_type();
6340 BasicType in_elem = in_type->elem()->array_element_basic_type();
6341 if (out_elem != T_INT || in_elem != T_INT) {
6342 return false;
6343 }
6344
6345 Node* outlen = load_array_length(out);
6346 Node* new_offset = _gvn.transform(new SubINode(outlen, offset));
6347 Node* out_start = array_element_address(out, intcon(0), out_elem);
6348 Node* in_start = array_element_address(in, intcon(0), in_elem);
6349
6350 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
6351 OptoRuntime::mulAdd_Type(),
6352 stubAddr, stubName, TypePtr::BOTTOM,
6353 out_start,in_start, new_offset, len, k);
6354 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
6355 set_result(result);
6356 return true;
6357 }
6358
6359 //-------------inline_montgomeryMultiply-----------------------------------
6360 bool LibraryCallKit::inline_montgomeryMultiply() {
6361 address stubAddr = StubRoutines::montgomeryMultiply();
6362 if (stubAddr == nullptr) {
6363 return false; // Intrinsic's stub is not implemented on this platform
6364 }
6365
6366 assert(UseMontgomeryMultiplyIntrinsic, "not implemented on this platform");
6367 const char* stubName = "montgomery_multiply";
6368
6369 assert(callee()->signature()->size() == 7, "montgomeryMultiply has 7 parameters");
6370
6371 Node* a = argument(0);
6372 Node* b = argument(1);
6373 Node* n = argument(2);
6374 Node* len = argument(3);
6375 Node* inv = argument(4);
6376 Node* m = argument(6);
6377
6378 const TypeAryPtr* a_type = a->Value(&_gvn)->isa_aryptr();
6379 const TypeAryPtr* b_type = b->Value(&_gvn)->isa_aryptr();
6380 const TypeAryPtr* n_type = n->Value(&_gvn)->isa_aryptr();
6381 const TypeAryPtr* m_type = m->Value(&_gvn)->isa_aryptr();
6382 if (a_type == nullptr || a_type->elem() == Type::BOTTOM ||
6383 b_type == nullptr || b_type->elem() == Type::BOTTOM ||
6384 n_type == nullptr || n_type->elem() == Type::BOTTOM ||
6385 m_type == nullptr || m_type->elem() == Type::BOTTOM) {
6386 // failed array check
6387 return false;
6388 }
6389
6390 BasicType a_elem = a_type->elem()->array_element_basic_type();
6391 BasicType b_elem = b_type->elem()->array_element_basic_type();
6392 BasicType n_elem = n_type->elem()->array_element_basic_type();
6393 BasicType m_elem = m_type->elem()->array_element_basic_type();
6394 if (a_elem != T_INT || b_elem != T_INT || n_elem != T_INT || m_elem != T_INT) {
6395 return false;
6396 }
6397
6398 // Make the call
6399 {
6400 Node* a_start = array_element_address(a, intcon(0), a_elem);
6401 Node* b_start = array_element_address(b, intcon(0), b_elem);
6402 Node* n_start = array_element_address(n, intcon(0), n_elem);
6403 Node* m_start = array_element_address(m, intcon(0), m_elem);
6404
6405 Node* call = make_runtime_call(RC_LEAF,
6406 OptoRuntime::montgomeryMultiply_Type(),
6407 stubAddr, stubName, TypePtr::BOTTOM,
6408 a_start, b_start, n_start, len, inv, top(),
6409 m_start);
6410 set_result(m);
6411 }
6412
6413 return true;
6414 }
6415
6416 bool LibraryCallKit::inline_montgomerySquare() {
6417 address stubAddr = StubRoutines::montgomerySquare();
6418 if (stubAddr == nullptr) {
6419 return false; // Intrinsic's stub is not implemented on this platform
6420 }
6421
6422 assert(UseMontgomerySquareIntrinsic, "not implemented on this platform");
6423 const char* stubName = "montgomery_square";
6424
6425 assert(callee()->signature()->size() == 6, "montgomerySquare has 6 parameters");
6426
6427 Node* a = argument(0);
6428 Node* n = argument(1);
6429 Node* len = argument(2);
6430 Node* inv = argument(3);
6431 Node* m = argument(5);
6432
6433 const TypeAryPtr* a_type = a->Value(&_gvn)->isa_aryptr();
6434 const TypeAryPtr* n_type = n->Value(&_gvn)->isa_aryptr();
6435 const TypeAryPtr* m_type = m->Value(&_gvn)->isa_aryptr();
6436 if (a_type == nullptr || a_type->elem() == Type::BOTTOM ||
6437 n_type == nullptr || n_type->elem() == Type::BOTTOM ||
6438 m_type == nullptr || m_type->elem() == Type::BOTTOM) {
6439 // failed array check
6440 return false;
6441 }
6442
6443 BasicType a_elem = a_type->elem()->array_element_basic_type();
6444 BasicType n_elem = n_type->elem()->array_element_basic_type();
6445 BasicType m_elem = m_type->elem()->array_element_basic_type();
6446 if (a_elem != T_INT || n_elem != T_INT || m_elem != T_INT) {
6447 return false;
6448 }
6449
6450 // Make the call
6451 {
6452 Node* a_start = array_element_address(a, intcon(0), a_elem);
6453 Node* n_start = array_element_address(n, intcon(0), n_elem);
6454 Node* m_start = array_element_address(m, intcon(0), m_elem);
6455
6456 Node* call = make_runtime_call(RC_LEAF,
6457 OptoRuntime::montgomerySquare_Type(),
6458 stubAddr, stubName, TypePtr::BOTTOM,
6459 a_start, n_start, len, inv, top(),
6460 m_start);
6461 set_result(m);
6462 }
6463
6464 return true;
6465 }
6466
6467 bool LibraryCallKit::inline_bigIntegerShift(bool isRightShift) {
6468 address stubAddr = nullptr;
6469 const char* stubName = nullptr;
6470
6471 stubAddr = isRightShift? StubRoutines::bigIntegerRightShift(): StubRoutines::bigIntegerLeftShift();
6472 if (stubAddr == nullptr) {
6473 return false; // Intrinsic's stub is not implemented on this platform
6474 }
6475
6476 stubName = isRightShift? "bigIntegerRightShiftWorker" : "bigIntegerLeftShiftWorker";
6477
6478 assert(callee()->signature()->size() == 5, "expected 5 arguments");
6479
6480 Node* newArr = argument(0);
6481 Node* oldArr = argument(1);
6482 Node* newIdx = argument(2);
6483 Node* shiftCount = argument(3);
6484 Node* numIter = argument(4);
6485
6486 const TypeAryPtr* newArr_type = newArr->Value(&_gvn)->isa_aryptr();
6487 const TypeAryPtr* oldArr_type = oldArr->Value(&_gvn)->isa_aryptr();
6488 if (newArr_type == nullptr || newArr_type->elem() == Type::BOTTOM ||
6489 oldArr_type == nullptr || oldArr_type->elem() == Type::BOTTOM) {
6490 return false;
6491 }
6492
6493 BasicType newArr_elem = newArr_type->elem()->array_element_basic_type();
6494 BasicType oldArr_elem = oldArr_type->elem()->array_element_basic_type();
6495 if (newArr_elem != T_INT || oldArr_elem != T_INT) {
6496 return false;
6497 }
6498
6499 // Make the call
6500 {
6501 Node* newArr_start = array_element_address(newArr, intcon(0), newArr_elem);
6502 Node* oldArr_start = array_element_address(oldArr, intcon(0), oldArr_elem);
6503
6504 Node* call = make_runtime_call(RC_LEAF,
6505 OptoRuntime::bigIntegerShift_Type(),
6506 stubAddr,
6507 stubName,
6508 TypePtr::BOTTOM,
6509 newArr_start,
6510 oldArr_start,
6511 newIdx,
6512 shiftCount,
6513 numIter);
6514 }
6515
6516 return true;
6517 }
6518
6519 //-------------inline_vectorizedMismatch------------------------------
6520 bool LibraryCallKit::inline_vectorizedMismatch() {
6521 assert(UseVectorizedMismatchIntrinsic, "not implemented on this platform");
6522
6523 assert(callee()->signature()->size() == 8, "vectorizedMismatch has 6 parameters");
6524 Node* obja = argument(0); // Object
6525 Node* aoffset = argument(1); // long
6526 Node* objb = argument(3); // Object
6527 Node* boffset = argument(4); // long
6528 Node* length = argument(6); // int
6529 Node* scale = argument(7); // int
6530
6531 const TypeAryPtr* obja_t = _gvn.type(obja)->isa_aryptr();
6532 const TypeAryPtr* objb_t = _gvn.type(objb)->isa_aryptr();
6533 if (obja_t == nullptr || obja_t->elem() == Type::BOTTOM ||
6534 objb_t == nullptr || objb_t->elem() == Type::BOTTOM ||
6535 scale == top()) {
6536 return false; // failed input validation
6537 }
6538
6539 Node* obja_adr = make_unsafe_address(obja, aoffset);
6540 Node* objb_adr = make_unsafe_address(objb, boffset);
6541
6542 // Partial inlining handling for inputs smaller than ArrayOperationPartialInlineSize bytes in size.
6543 //
6544 // inline_limit = ArrayOperationPartialInlineSize / element_size;
6545 // if (length <= inline_limit) {
6546 // inline_path:
6547 // vmask = VectorMaskGen length
6548 // vload1 = LoadVectorMasked obja, vmask
6549 // vload2 = LoadVectorMasked objb, vmask
6550 // result1 = VectorCmpMasked vload1, vload2, vmask
6551 // } else {
6552 // call_stub_path:
6553 // result2 = call vectorizedMismatch_stub(obja, objb, length, scale)
6554 // }
6555 // exit_block:
6556 // return Phi(result1, result2);
6557 //
6558 enum { inline_path = 1, // input is small enough to process it all at once
6559 stub_path = 2, // input is too large; call into the VM
6560 PATH_LIMIT = 3
6561 };
6562
6563 Node* exit_block = new RegionNode(PATH_LIMIT);
6564 Node* result_phi = new PhiNode(exit_block, TypeInt::INT);
6565 Node* memory_phi = new PhiNode(exit_block, Type::MEMORY, TypePtr::BOTTOM);
6566
6567 Node* call_stub_path = control();
6568
6569 BasicType elem_bt = T_ILLEGAL;
6570
6571 const TypeInt* scale_t = _gvn.type(scale)->is_int();
6572 if (scale_t->is_con()) {
6573 switch (scale_t->get_con()) {
6574 case 0: elem_bt = T_BYTE; break;
6575 case 1: elem_bt = T_SHORT; break;
6576 case 2: elem_bt = T_INT; break;
6577 case 3: elem_bt = T_LONG; break;
6578
6579 default: elem_bt = T_ILLEGAL; break; // not supported
6580 }
6581 }
6582
6583 int inline_limit = 0;
6584 bool do_partial_inline = false;
6585
6586 if (elem_bt != T_ILLEGAL && ArrayOperationPartialInlineSize > 0) {
6587 inline_limit = ArrayOperationPartialInlineSize / type2aelembytes(elem_bt);
6588 do_partial_inline = inline_limit >= 16;
6589 }
6590
6591 if (do_partial_inline) {
6592 assert(elem_bt != T_ILLEGAL, "sanity");
6593
6594 if (Matcher::match_rule_supported_vector(Op_VectorMaskGen, inline_limit, elem_bt) &&
6595 Matcher::match_rule_supported_vector(Op_LoadVectorMasked, inline_limit, elem_bt) &&
6596 Matcher::match_rule_supported_vector(Op_VectorCmpMasked, inline_limit, elem_bt)) {
6597
6598 const TypeVect* vt = TypeVect::make(elem_bt, inline_limit);
6599 Node* cmp_length = _gvn.transform(new CmpINode(length, intcon(inline_limit)));
6600 Node* bol_gt = _gvn.transform(new BoolNode(cmp_length, BoolTest::gt));
6601
6602 call_stub_path = generate_guard(bol_gt, nullptr, PROB_MIN);
6603
6604 if (!stopped()) {
6605 Node* casted_length = _gvn.transform(new CastIINode(control(), length, TypeInt::make(0, inline_limit, Type::WidenMin)));
6606
6607 const TypePtr* obja_adr_t = _gvn.type(obja_adr)->isa_ptr();
6608 const TypePtr* objb_adr_t = _gvn.type(objb_adr)->isa_ptr();
6609 Node* obja_adr_mem = memory(C->get_alias_index(obja_adr_t));
6610 Node* objb_adr_mem = memory(C->get_alias_index(objb_adr_t));
6611
6612 Node* vmask = _gvn.transform(VectorMaskGenNode::make(ConvI2X(casted_length), elem_bt));
6613 Node* vload_obja = _gvn.transform(new LoadVectorMaskedNode(control(), obja_adr_mem, obja_adr, obja_adr_t, vt, vmask));
6614 Node* vload_objb = _gvn.transform(new LoadVectorMaskedNode(control(), objb_adr_mem, objb_adr, objb_adr_t, vt, vmask));
6615 Node* result = _gvn.transform(new VectorCmpMaskedNode(vload_obja, vload_objb, vmask, TypeInt::INT));
6616
6617 exit_block->init_req(inline_path, control());
6618 memory_phi->init_req(inline_path, map()->memory());
6619 result_phi->init_req(inline_path, result);
6620
6621 C->set_max_vector_size(MAX2((uint)ArrayOperationPartialInlineSize, C->max_vector_size()));
6622 clear_upper_avx();
6623 }
6624 }
6625 }
6626
6627 if (call_stub_path != nullptr) {
6628 set_control(call_stub_path);
6629
6630 Node* call = make_runtime_call(RC_LEAF,
6631 OptoRuntime::vectorizedMismatch_Type(),
6632 StubRoutines::vectorizedMismatch(), "vectorizedMismatch", TypePtr::BOTTOM,
6633 obja_adr, objb_adr, length, scale);
6634
6635 exit_block->init_req(stub_path, control());
6636 memory_phi->init_req(stub_path, map()->memory());
6637 result_phi->init_req(stub_path, _gvn.transform(new ProjNode(call, TypeFunc::Parms)));
6638 }
6639
6640 exit_block = _gvn.transform(exit_block);
6641 memory_phi = _gvn.transform(memory_phi);
6642 result_phi = _gvn.transform(result_phi);
6643
6644 record_for_igvn(exit_block);
6645 record_for_igvn(memory_phi);
6646 record_for_igvn(result_phi);
6647
6648 set_control(exit_block);
6649 set_all_memory(memory_phi);
6650 set_result(result_phi);
6651
6652 return true;
6653 }
6654
6655 //------------------------------inline_vectorizedHashcode----------------------------
6656 bool LibraryCallKit::inline_vectorizedHashCode() {
6657 assert(UseVectorizedHashCodeIntrinsic, "not implemented on this platform");
6658
6659 assert(callee()->signature()->size() == 5, "vectorizedHashCode has 5 parameters");
6660 Node* array = argument(0);
6661 Node* offset = argument(1);
6662 Node* length = argument(2);
6663 Node* initialValue = argument(3);
6664 Node* basic_type = argument(4);
6665
6666 if (basic_type == top()) {
6667 return false; // failed input validation
6668 }
6669
6670 const TypeInt* basic_type_t = _gvn.type(basic_type)->is_int();
6671 if (!basic_type_t->is_con()) {
6672 return false; // Only intrinsify if mode argument is constant
6673 }
6674
6675 array = must_be_not_null(array, true);
6676
6677 BasicType bt = (BasicType)basic_type_t->get_con();
6678
6679 // Resolve address of first element
6680 Node* array_start = array_element_address(array, offset, bt);
6681
6682 set_result(_gvn.transform(new VectorizedHashCodeNode(control(), memory(TypeAryPtr::get_array_body_type(bt)),
6683 array_start, length, initialValue, basic_type)));
6684 clear_upper_avx();
6685
6686 return true;
6687 }
6688
6689 /**
6690 * Calculate CRC32 for byte.
6691 * int java.util.zip.CRC32.update(int crc, int b)
6692 */
6693 bool LibraryCallKit::inline_updateCRC32() {
6694 assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions support");
6695 assert(callee()->signature()->size() == 2, "update has 2 parameters");
6696 // no receiver since it is static method
6697 Node* crc = argument(0); // type: int
6698 Node* b = argument(1); // type: int
6699
6700 /*
6701 * int c = ~ crc;
6702 * b = timesXtoThe32[(b ^ c) & 0xFF];
6703 * b = b ^ (c >>> 8);
6704 * crc = ~b;
6705 */
6706
6707 Node* M1 = intcon(-1);
6708 crc = _gvn.transform(new XorINode(crc, M1));
6709 Node* result = _gvn.transform(new XorINode(crc, b));
6710 result = _gvn.transform(new AndINode(result, intcon(0xFF)));
6711
6712 Node* base = makecon(TypeRawPtr::make(StubRoutines::crc_table_addr()));
6713 Node* offset = _gvn.transform(new LShiftINode(result, intcon(0x2)));
6714 Node* adr = basic_plus_adr(top(), base, ConvI2X(offset));
6715 result = make_load(control(), adr, TypeInt::INT, T_INT, MemNode::unordered);
6716
6717 crc = _gvn.transform(new URShiftINode(crc, intcon(8)));
6718 result = _gvn.transform(new XorINode(crc, result));
6719 result = _gvn.transform(new XorINode(result, M1));
6720 set_result(result);
6721 return true;
6722 }
6723
6724 /**
6725 * Calculate CRC32 for byte[] array.
6726 * int java.util.zip.CRC32.updateBytes(int crc, byte[] buf, int off, int len)
6727 */
6728 bool LibraryCallKit::inline_updateBytesCRC32() {
6729 assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions support");
6730 assert(callee()->signature()->size() == 4, "updateBytes has 4 parameters");
6731 // no receiver since it is static method
6732 Node* crc = argument(0); // type: int
6733 Node* src = argument(1); // type: oop
6734 Node* offset = argument(2); // type: int
6735 Node* length = argument(3); // type: int
6736
6737 const TypeAryPtr* src_type = src->Value(&_gvn)->isa_aryptr();
6738 if (src_type == nullptr || src_type->elem() == Type::BOTTOM) {
6739 // failed array check
6740 return false;
6741 }
6742
6743 // Figure out the size and type of the elements we will be copying.
6744 BasicType src_elem = src_type->elem()->array_element_basic_type();
6745 if (src_elem != T_BYTE) {
6746 return false;
6747 }
6748
6749 // 'src_start' points to src array + scaled offset
6750 src = must_be_not_null(src, true);
6751 Node* src_start = array_element_address(src, offset, src_elem);
6752
6753 // We assume that range check is done by caller.
6754 // TODO: generate range check (offset+length < src.length) in debug VM.
6755
6756 // Call the stub.
6757 address stubAddr = StubRoutines::updateBytesCRC32();
6758 const char *stubName = "updateBytesCRC32";
6759
6760 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::updateBytesCRC32_Type(),
6761 stubAddr, stubName, TypePtr::BOTTOM,
6762 crc, src_start, length);
6763 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
6764 set_result(result);
6765 return true;
6766 }
6767
6768 /**
6769 * Calculate CRC32 for ByteBuffer.
6770 * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
6771 */
6772 bool LibraryCallKit::inline_updateByteBufferCRC32() {
6773 assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions support");
6774 assert(callee()->signature()->size() == 5, "updateByteBuffer has 4 parameters and one is long");
6775 // no receiver since it is static method
6776 Node* crc = argument(0); // type: int
6777 Node* src = argument(1); // type: long
6778 Node* offset = argument(3); // type: int
6779 Node* length = argument(4); // type: int
6780
6781 src = ConvL2X(src); // adjust Java long to machine word
6782 Node* base = _gvn.transform(new CastX2PNode(src));
6783 offset = ConvI2X(offset);
6784
6785 // 'src_start' points to src array + scaled offset
6786 Node* src_start = basic_plus_adr(top(), base, offset);
6787
6788 // Call the stub.
6789 address stubAddr = StubRoutines::updateBytesCRC32();
6790 const char *stubName = "updateBytesCRC32";
6791
6792 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::updateBytesCRC32_Type(),
6793 stubAddr, stubName, TypePtr::BOTTOM,
6794 crc, src_start, length);
6795 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
6796 set_result(result);
6797 return true;
6798 }
6799
6800 //------------------------------get_table_from_crc32c_class-----------------------
6801 Node * LibraryCallKit::get_table_from_crc32c_class(ciInstanceKlass *crc32c_class) {
6802 Node* table = load_field_from_object(nullptr, "byteTable", "[I", /*decorators*/ IN_HEAP, /*is_static*/ true, crc32c_class);
6803 assert (table != nullptr, "wrong version of java.util.zip.CRC32C");
6804
6805 return table;
6806 }
6807
6808 //------------------------------inline_updateBytesCRC32C-----------------------
6809 //
6810 // Calculate CRC32C for byte[] array.
6811 // int java.util.zip.CRC32C.updateBytes(int crc, byte[] buf, int off, int end)
6812 //
6813 bool LibraryCallKit::inline_updateBytesCRC32C() {
6814 assert(UseCRC32CIntrinsics, "need CRC32C instruction support");
6815 assert(callee()->signature()->size() == 4, "updateBytes has 4 parameters");
6816 assert(callee()->holder()->is_loaded(), "CRC32C class must be loaded");
6817 // no receiver since it is a static method
6818 Node* crc = argument(0); // type: int
6819 Node* src = argument(1); // type: oop
6820 Node* offset = argument(2); // type: int
6821 Node* end = argument(3); // type: int
6822
6823 Node* length = _gvn.transform(new SubINode(end, offset));
6824
6825 const TypeAryPtr* src_type = src->Value(&_gvn)->isa_aryptr();
6826 if (src_type == nullptr || src_type->elem() == Type::BOTTOM) {
6827 // failed array check
6828 return false;
6829 }
6830
6831 // Figure out the size and type of the elements we will be copying.
6832 BasicType src_elem = src_type->elem()->array_element_basic_type();
6833 if (src_elem != T_BYTE) {
6834 return false;
6835 }
6836
6837 // 'src_start' points to src array + scaled offset
6838 src = must_be_not_null(src, true);
6839 Node* src_start = array_element_address(src, offset, src_elem);
6840
6841 // static final int[] byteTable in class CRC32C
6842 Node* table = get_table_from_crc32c_class(callee()->holder());
6843 table = must_be_not_null(table, true);
6844 Node* table_start = array_element_address(table, intcon(0), T_INT);
6845
6846 // We assume that range check is done by caller.
6847 // TODO: generate range check (offset+length < src.length) in debug VM.
6848
6849 // Call the stub.
6850 address stubAddr = StubRoutines::updateBytesCRC32C();
6851 const char *stubName = "updateBytesCRC32C";
6852
6853 Node* call = make_runtime_call(RC_LEAF, OptoRuntime::updateBytesCRC32C_Type(),
6854 stubAddr, stubName, TypePtr::BOTTOM,
6855 crc, src_start, length, table_start);
6856 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
6857 set_result(result);
6858 return true;
6859 }
6860
6861 //------------------------------inline_updateDirectByteBufferCRC32C-----------------------
6862 //
6863 // Calculate CRC32C for DirectByteBuffer.
6864 // int java.util.zip.CRC32C.updateDirectByteBuffer(int crc, long buf, int off, int end)
6865 //
6866 bool LibraryCallKit::inline_updateDirectByteBufferCRC32C() {
6867 assert(UseCRC32CIntrinsics, "need CRC32C instruction support");
6868 assert(callee()->signature()->size() == 5, "updateDirectByteBuffer has 4 parameters and one is long");
6869 assert(callee()->holder()->is_loaded(), "CRC32C class must be loaded");
6870 // no receiver since it is a static method
6871 Node* crc = argument(0); // type: int
6872 Node* src = argument(1); // type: long
6873 Node* offset = argument(3); // type: int
6874 Node* end = argument(4); // type: int
6875
6876 Node* length = _gvn.transform(new SubINode(end, offset));
6877
6878 src = ConvL2X(src); // adjust Java long to machine word
6879 Node* base = _gvn.transform(new CastX2PNode(src));
6880 offset = ConvI2X(offset);
6881
6882 // 'src_start' points to src array + scaled offset
6883 Node* src_start = basic_plus_adr(top(), base, offset);
6884
6885 // static final int[] byteTable in class CRC32C
6886 Node* table = get_table_from_crc32c_class(callee()->holder());
6887 table = must_be_not_null(table, true);
6888 Node* table_start = array_element_address(table, intcon(0), T_INT);
6889
6890 // Call the stub.
6891 address stubAddr = StubRoutines::updateBytesCRC32C();
6892 const char *stubName = "updateBytesCRC32C";
6893
6894 Node* call = make_runtime_call(RC_LEAF, OptoRuntime::updateBytesCRC32C_Type(),
6895 stubAddr, stubName, TypePtr::BOTTOM,
6896 crc, src_start, length, table_start);
6897 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
6898 set_result(result);
6899 return true;
6900 }
6901
6902 //------------------------------inline_updateBytesAdler32----------------------
6903 //
6904 // Calculate Adler32 checksum for byte[] array.
6905 // int java.util.zip.Adler32.updateBytes(int crc, byte[] buf, int off, int len)
6906 //
6907 bool LibraryCallKit::inline_updateBytesAdler32() {
6908 assert(UseAdler32Intrinsics, "Adler32 Intrinsic support need"); // check if we actually need to check this flag or check a different one
6909 assert(callee()->signature()->size() == 4, "updateBytes has 4 parameters");
6910 assert(callee()->holder()->is_loaded(), "Adler32 class must be loaded");
6911 // no receiver since it is static method
6912 Node* crc = argument(0); // type: int
6913 Node* src = argument(1); // type: oop
6914 Node* offset = argument(2); // type: int
6915 Node* length = argument(3); // type: int
6916
6917 const TypeAryPtr* src_type = src->Value(&_gvn)->isa_aryptr();
6918 if (src_type == nullptr || src_type->elem() == Type::BOTTOM) {
6919 // failed array check
6920 return false;
6921 }
6922
6923 // Figure out the size and type of the elements we will be copying.
6924 BasicType src_elem = src_type->elem()->array_element_basic_type();
6925 if (src_elem != T_BYTE) {
6926 return false;
6927 }
6928
6929 // 'src_start' points to src array + scaled offset
6930 Node* src_start = array_element_address(src, offset, src_elem);
6931
6932 // We assume that range check is done by caller.
6933 // TODO: generate range check (offset+length < src.length) in debug VM.
6934
6935 // Call the stub.
6936 address stubAddr = StubRoutines::updateBytesAdler32();
6937 const char *stubName = "updateBytesAdler32";
6938
6939 Node* call = make_runtime_call(RC_LEAF, OptoRuntime::updateBytesAdler32_Type(),
6940 stubAddr, stubName, TypePtr::BOTTOM,
6941 crc, src_start, length);
6942 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
6943 set_result(result);
6944 return true;
6945 }
6946
6947 //------------------------------inline_updateByteBufferAdler32---------------
6948 //
6949 // Calculate Adler32 checksum for DirectByteBuffer.
6950 // int java.util.zip.Adler32.updateByteBuffer(int crc, long buf, int off, int len)
6951 //
6952 bool LibraryCallKit::inline_updateByteBufferAdler32() {
6953 assert(UseAdler32Intrinsics, "Adler32 Intrinsic support need"); // check if we actually need to check this flag or check a different one
6954 assert(callee()->signature()->size() == 5, "updateByteBuffer has 4 parameters and one is long");
6955 assert(callee()->holder()->is_loaded(), "Adler32 class must be loaded");
6956 // no receiver since it is static method
6957 Node* crc = argument(0); // type: int
6958 Node* src = argument(1); // type: long
6959 Node* offset = argument(3); // type: int
6960 Node* length = argument(4); // type: int
6961
6962 src = ConvL2X(src); // adjust Java long to machine word
6963 Node* base = _gvn.transform(new CastX2PNode(src));
6964 offset = ConvI2X(offset);
6965
6966 // 'src_start' points to src array + scaled offset
6967 Node* src_start = basic_plus_adr(top(), base, offset);
6968
6969 // Call the stub.
6970 address stubAddr = StubRoutines::updateBytesAdler32();
6971 const char *stubName = "updateBytesAdler32";
6972
6973 Node* call = make_runtime_call(RC_LEAF, OptoRuntime::updateBytesAdler32_Type(),
6974 stubAddr, stubName, TypePtr::BOTTOM,
6975 crc, src_start, length);
6976
6977 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
6978 set_result(result);
6979 return true;
6980 }
6981
6982 //----------------------------inline_reference_get0----------------------------
6983 // public T java.lang.ref.Reference.get();
6984 bool LibraryCallKit::inline_reference_get0() {
6985 const int referent_offset = java_lang_ref_Reference::referent_offset();
6986
6987 // Get the argument:
6988 Node* reference_obj = null_check_receiver();
6989 if (stopped()) return true;
6990
6991 DecoratorSet decorators = IN_HEAP | ON_WEAK_OOP_REF;
6992 Node* result = load_field_from_object(reference_obj, "referent", "Ljava/lang/Object;",
6993 decorators, /*is_static*/ false, nullptr);
6994 if (result == nullptr) return false;
6995
6996 // Add memory barrier to prevent commoning reads from this field
6997 // across safepoint since GC can change its value.
6998 insert_mem_bar(Op_MemBarCPUOrder);
6999
7000 set_result(result);
7001 return true;
7002 }
7003
7004 //----------------------------inline_reference_refersTo0----------------------------
7005 // bool java.lang.ref.Reference.refersTo0();
7006 // bool java.lang.ref.PhantomReference.refersTo0();
7007 bool LibraryCallKit::inline_reference_refersTo0(bool is_phantom) {
7008 // Get arguments:
7009 Node* reference_obj = null_check_receiver();
7010 Node* other_obj = argument(1);
7011 if (stopped()) return true;
7012
7013 DecoratorSet decorators = IN_HEAP | AS_NO_KEEPALIVE;
7014 decorators |= (is_phantom ? ON_PHANTOM_OOP_REF : ON_WEAK_OOP_REF);
7015 Node* referent = load_field_from_object(reference_obj, "referent", "Ljava/lang/Object;",
7016 decorators, /*is_static*/ false, nullptr);
7017 if (referent == nullptr) return false;
7018
7019 // Add memory barrier to prevent commoning reads from this field
7020 // across safepoint since GC can change its value.
7021 insert_mem_bar(Op_MemBarCPUOrder);
7022
7023 Node* cmp = _gvn.transform(new CmpPNode(referent, other_obj));
7024 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
7025 IfNode* if_node = create_and_map_if(control(), bol, PROB_FAIR, COUNT_UNKNOWN);
7026
7027 RegionNode* region = new RegionNode(3);
7028 PhiNode* phi = new PhiNode(region, TypeInt::BOOL);
7029
7030 Node* if_true = _gvn.transform(new IfTrueNode(if_node));
7031 region->init_req(1, if_true);
7032 phi->init_req(1, intcon(1));
7033
7034 Node* if_false = _gvn.transform(new IfFalseNode(if_node));
7035 region->init_req(2, if_false);
7036 phi->init_req(2, intcon(0));
7037
7038 set_control(_gvn.transform(region));
7039 record_for_igvn(region);
7040 set_result(_gvn.transform(phi));
7041 return true;
7042 }
7043
7044 //----------------------------inline_reference_clear0----------------------------
7045 // void java.lang.ref.Reference.clear0();
7046 // void java.lang.ref.PhantomReference.clear0();
7047 bool LibraryCallKit::inline_reference_clear0(bool is_phantom) {
7048 // This matches the implementation in JVM_ReferenceClear, see the comments there.
7049
7050 // Get arguments
7051 Node* reference_obj = null_check_receiver();
7052 if (stopped()) return true;
7053
7054 // Common access parameters
7055 DecoratorSet decorators = IN_HEAP | AS_NO_KEEPALIVE;
7056 decorators |= (is_phantom ? ON_PHANTOM_OOP_REF : ON_WEAK_OOP_REF);
7057 Node* referent_field_addr = basic_plus_adr(reference_obj, java_lang_ref_Reference::referent_offset());
7058 const TypePtr* referent_field_addr_type = _gvn.type(referent_field_addr)->isa_ptr();
7059 const Type* val_type = TypeOopPtr::make_from_klass(env()->Object_klass());
7060
7061 Node* referent = access_load_at(reference_obj,
7062 referent_field_addr,
7063 referent_field_addr_type,
7064 val_type,
7065 T_OBJECT,
7066 decorators);
7067
7068 IdealKit ideal(this);
7069 #define __ ideal.
7070 __ if_then(referent, BoolTest::ne, null());
7071 sync_kit(ideal);
7072 access_store_at(reference_obj,
7073 referent_field_addr,
7074 referent_field_addr_type,
7075 null(),
7076 val_type,
7077 T_OBJECT,
7078 decorators);
7079 __ sync_kit(this);
7080 __ end_if();
7081 final_sync(ideal);
7082 #undef __
7083
7084 return true;
7085 }
7086
7087 Node* LibraryCallKit::load_field_from_object(Node* fromObj, const char* fieldName, const char* fieldTypeString,
7088 DecoratorSet decorators, bool is_static,
7089 ciInstanceKlass* fromKls) {
7090 if (fromKls == nullptr) {
7091 const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();
7092 assert(tinst != nullptr, "obj is null");
7093 assert(tinst->is_loaded(), "obj is not loaded");
7094 fromKls = tinst->instance_klass();
7095 } else {
7096 assert(is_static, "only for static field access");
7097 }
7098 ciField* field = fromKls->get_field_by_name(ciSymbol::make(fieldName),
7099 ciSymbol::make(fieldTypeString),
7100 is_static);
7101
7102 assert(field != nullptr, "undefined field %s %s %s", fieldTypeString, fromKls->name()->as_utf8(), fieldName);
7103 if (field == nullptr) return (Node *) nullptr;
7104
7105 if (is_static) {
7106 const TypeInstPtr* tip = TypeInstPtr::make(fromKls->java_mirror());
7107 fromObj = makecon(tip);
7108 }
7109
7110 // Next code copied from Parse::do_get_xxx():
7111
7112 // Compute address and memory type.
7113 int offset = field->offset_in_bytes();
7114 bool is_vol = field->is_volatile();
7115 ciType* field_klass = field->type();
7116 assert(field_klass->is_loaded(), "should be loaded");
7117 const TypePtr* adr_type = C->alias_type(field)->adr_type();
7118 Node *adr = basic_plus_adr(fromObj, fromObj, offset);
7119 assert(C->get_alias_index(adr_type) == C->get_alias_index(_gvn.type(adr)->isa_ptr()),
7120 "slice of address and input slice don't match");
7121 BasicType bt = field->layout_type();
7122
7123 // Build the resultant type of the load
7124 const Type *type;
7125 if (bt == T_OBJECT) {
7126 type = TypeOopPtr::make_from_klass(field_klass->as_klass());
7127 } else {
7128 type = Type::get_const_basic_type(bt);
7129 }
7130
7131 if (is_vol) {
7132 decorators |= MO_SEQ_CST;
7133 }
7134
7135 return access_load_at(fromObj, adr, adr_type, type, bt, decorators);
7136 }
7137
7138 Node * LibraryCallKit::field_address_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString,
7139 bool is_exact /* true */, bool is_static /* false */,
7140 ciInstanceKlass * fromKls /* nullptr */) {
7141 if (fromKls == nullptr) {
7142 const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();
7143 assert(tinst != nullptr, "obj is null");
7144 assert(tinst->is_loaded(), "obj is not loaded");
7145 assert(!is_exact || tinst->klass_is_exact(), "klass not exact");
7146 fromKls = tinst->instance_klass();
7147 }
7148 else {
7149 assert(is_static, "only for static field access");
7150 }
7151 ciField* field = fromKls->get_field_by_name(ciSymbol::make(fieldName),
7152 ciSymbol::make(fieldTypeString),
7153 is_static);
7154
7155 assert(field != nullptr, "undefined field");
7156 assert(!field->is_volatile(), "not defined for volatile fields");
7157
7158 if (is_static) {
7159 const TypeInstPtr* tip = TypeInstPtr::make(fromKls->java_mirror());
7160 fromObj = makecon(tip);
7161 }
7162
7163 // Next code copied from Parse::do_get_xxx():
7164
7165 // Compute address and memory type.
7166 int offset = field->offset_in_bytes();
7167 Node *adr = basic_plus_adr(fromObj, fromObj, offset);
7168
7169 return adr;
7170 }
7171
7172 //------------------------------inline_aescrypt_Block-----------------------
7173 bool LibraryCallKit::inline_aescrypt_Block(vmIntrinsics::ID id) {
7174 address stubAddr = nullptr;
7175 const char *stubName;
7176 bool is_decrypt = false;
7177 assert(UseAES, "need AES instruction support");
7178
7179 switch(id) {
7180 case vmIntrinsics::_aescrypt_encryptBlock:
7181 stubAddr = StubRoutines::aescrypt_encryptBlock();
7182 stubName = "aescrypt_encryptBlock";
7183 break;
7184 case vmIntrinsics::_aescrypt_decryptBlock:
7185 stubAddr = StubRoutines::aescrypt_decryptBlock();
7186 stubName = "aescrypt_decryptBlock";
7187 is_decrypt = true;
7188 break;
7189 default:
7190 break;
7191 }
7192 if (stubAddr == nullptr) return false;
7193
7194 Node* aescrypt_object = argument(0);
7195 Node* src = argument(1);
7196 Node* src_offset = argument(2);
7197 Node* dest = argument(3);
7198 Node* dest_offset = argument(4);
7199
7200 src = must_be_not_null(src, true);
7201 dest = must_be_not_null(dest, true);
7202
7203 // (1) src and dest are arrays.
7204 const TypeAryPtr* src_type = src->Value(&_gvn)->isa_aryptr();
7205 const TypeAryPtr* dest_type = dest->Value(&_gvn)->isa_aryptr();
7206 assert( src_type != nullptr && src_type->elem() != Type::BOTTOM &&
7207 dest_type != nullptr && dest_type->elem() != Type::BOTTOM, "args are strange");
7208
7209 // for the quick and dirty code we will skip all the checks.
7210 // we are just trying to get the call to be generated.
7211 Node* src_start = src;
7212 Node* dest_start = dest;
7213 if (src_offset != nullptr || dest_offset != nullptr) {
7214 assert(src_offset != nullptr && dest_offset != nullptr, "");
7215 src_start = array_element_address(src, src_offset, T_BYTE);
7216 dest_start = array_element_address(dest, dest_offset, T_BYTE);
7217 }
7218
7219 // now need to get the start of its expanded key array
7220 // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java
7221 Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object, is_decrypt);
7222 if (k_start == nullptr) return false;
7223
7224 // Call the stub.
7225 make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::aescrypt_block_Type(),
7226 stubAddr, stubName, TypePtr::BOTTOM,
7227 src_start, dest_start, k_start);
7228
7229 return true;
7230 }
7231
7232 //------------------------------inline_cipherBlockChaining_AESCrypt-----------------------
7233 bool LibraryCallKit::inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id) {
7234 address stubAddr = nullptr;
7235 const char *stubName = nullptr;
7236 bool is_decrypt = false;
7237 assert(UseAES, "need AES instruction support");
7238
7239 switch(id) {
7240 case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
7241 stubAddr = StubRoutines::cipherBlockChaining_encryptAESCrypt();
7242 stubName = "cipherBlockChaining_encryptAESCrypt";
7243 break;
7244 case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
7245 stubAddr = StubRoutines::cipherBlockChaining_decryptAESCrypt();
7246 stubName = "cipherBlockChaining_decryptAESCrypt";
7247 is_decrypt = true;
7248 break;
7249 default:
7250 break;
7251 }
7252 if (stubAddr == nullptr) return false;
7253
7254 Node* cipherBlockChaining_object = argument(0);
7255 Node* src = argument(1);
7256 Node* src_offset = argument(2);
7257 Node* len = argument(3);
7258 Node* dest = argument(4);
7259 Node* dest_offset = argument(5);
7260
7261 src = must_be_not_null(src, false);
7262 dest = must_be_not_null(dest, false);
7263
7264 // (1) src and dest are arrays.
7265 const TypeAryPtr* src_type = src->Value(&_gvn)->isa_aryptr();
7266 const TypeAryPtr* dest_type = dest->Value(&_gvn)->isa_aryptr();
7267 assert( src_type != nullptr && src_type->elem() != Type::BOTTOM &&
7268 dest_type != nullptr && dest_type->elem() != Type::BOTTOM, "args are strange");
7269
7270 // checks are the responsibility of the caller
7271 Node* src_start = src;
7272 Node* dest_start = dest;
7273 if (src_offset != nullptr || dest_offset != nullptr) {
7274 assert(src_offset != nullptr && dest_offset != nullptr, "");
7275 src_start = array_element_address(src, src_offset, T_BYTE);
7276 dest_start = array_element_address(dest, dest_offset, T_BYTE);
7277 }
7278
7279 // if we are in this set of code, we "know" the embeddedCipher is an AESCrypt object
7280 // (because of the predicated logic executed earlier).
7281 // so we cast it here safely.
7282 // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java
7283
7284 Node* embeddedCipherObj = load_field_from_object(cipherBlockChaining_object, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;");
7285 if (embeddedCipherObj == nullptr) return false;
7286
7287 // cast it to what we know it will be at runtime
7288 const TypeInstPtr* tinst = _gvn.type(cipherBlockChaining_object)->isa_instptr();
7289 assert(tinst != nullptr, "CBC obj is null");
7290 assert(tinst->is_loaded(), "CBC obj is not loaded");
7291 ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AES_Crypt"));
7292 assert(klass_AESCrypt->is_loaded(), "predicate checks that this class is loaded");
7293
7294 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
7295 const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_AESCrypt);
7296 const TypeOopPtr* xtype = aklass->as_instance_type()->cast_to_ptr_type(TypePtr::NotNull);
7297 Node* aescrypt_object = new CheckCastPPNode(control(), embeddedCipherObj, xtype);
7298 aescrypt_object = _gvn.transform(aescrypt_object);
7299
7300 // we need to get the start of the aescrypt_object's expanded key array
7301 Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object, is_decrypt);
7302 if (k_start == nullptr) return false;
7303
7304 // similarly, get the start address of the r vector
7305 Node* objRvec = load_field_from_object(cipherBlockChaining_object, "r", "[B");
7306 if (objRvec == nullptr) return false;
7307 Node* r_start = array_element_address(objRvec, intcon(0), T_BYTE);
7308
7309 // Call the stub, passing src_start, dest_start, k_start, r_start and src_len
7310 Node* cbcCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
7311 OptoRuntime::cipherBlockChaining_aescrypt_Type(),
7312 stubAddr, stubName, TypePtr::BOTTOM,
7313 src_start, dest_start, k_start, r_start, len);
7314
7315 // return cipher length (int)
7316 Node* retvalue = _gvn.transform(new ProjNode(cbcCrypt, TypeFunc::Parms));
7317 set_result(retvalue);
7318 return true;
7319 }
7320
7321 //------------------------------inline_electronicCodeBook_AESCrypt-----------------------
7322 bool LibraryCallKit::inline_electronicCodeBook_AESCrypt(vmIntrinsics::ID id) {
7323 address stubAddr = nullptr;
7324 const char *stubName = nullptr;
7325 bool is_decrypt = false;
7326 assert(UseAES, "need AES instruction support");
7327
7328 switch (id) {
7329 case vmIntrinsics::_electronicCodeBook_encryptAESCrypt:
7330 stubAddr = StubRoutines::electronicCodeBook_encryptAESCrypt();
7331 stubName = "electronicCodeBook_encryptAESCrypt";
7332 break;
7333 case vmIntrinsics::_electronicCodeBook_decryptAESCrypt:
7334 stubAddr = StubRoutines::electronicCodeBook_decryptAESCrypt();
7335 stubName = "electronicCodeBook_decryptAESCrypt";
7336 is_decrypt = true;
7337 break;
7338 default:
7339 break;
7340 }
7341
7342 if (stubAddr == nullptr) return false;
7343
7344 Node* electronicCodeBook_object = argument(0);
7345 Node* src = argument(1);
7346 Node* src_offset = argument(2);
7347 Node* len = argument(3);
7348 Node* dest = argument(4);
7349 Node* dest_offset = argument(5);
7350
7351 // (1) src and dest are arrays.
7352 const TypeAryPtr* src_type = src->Value(&_gvn)->isa_aryptr();
7353 const TypeAryPtr* dest_type = dest->Value(&_gvn)->isa_aryptr();
7354 assert( src_type != nullptr && src_type->elem() != Type::BOTTOM &&
7355 dest_type != nullptr && dest_type->elem() != Type::BOTTOM, "args are strange");
7356
7357 // checks are the responsibility of the caller
7358 Node* src_start = src;
7359 Node* dest_start = dest;
7360 if (src_offset != nullptr || dest_offset != nullptr) {
7361 assert(src_offset != nullptr && dest_offset != nullptr, "");
7362 src_start = array_element_address(src, src_offset, T_BYTE);
7363 dest_start = array_element_address(dest, dest_offset, T_BYTE);
7364 }
7365
7366 // if we are in this set of code, we "know" the embeddedCipher is an AESCrypt object
7367 // (because of the predicated logic executed earlier).
7368 // so we cast it here safely.
7369 // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java
7370
7371 Node* embeddedCipherObj = load_field_from_object(electronicCodeBook_object, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;");
7372 if (embeddedCipherObj == nullptr) return false;
7373
7374 // cast it to what we know it will be at runtime
7375 const TypeInstPtr* tinst = _gvn.type(electronicCodeBook_object)->isa_instptr();
7376 assert(tinst != nullptr, "ECB obj is null");
7377 assert(tinst->is_loaded(), "ECB obj is not loaded");
7378 ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AES_Crypt"));
7379 assert(klass_AESCrypt->is_loaded(), "predicate checks that this class is loaded");
7380
7381 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
7382 const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_AESCrypt);
7383 const TypeOopPtr* xtype = aklass->as_instance_type()->cast_to_ptr_type(TypePtr::NotNull);
7384 Node* aescrypt_object = new CheckCastPPNode(control(), embeddedCipherObj, xtype);
7385 aescrypt_object = _gvn.transform(aescrypt_object);
7386
7387 // we need to get the start of the aescrypt_object's expanded key array
7388 Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object, is_decrypt);
7389 if (k_start == nullptr) return false;
7390
7391 // Call the stub, passing src_start, dest_start, k_start, r_start and src_len
7392 Node* ecbCrypt = make_runtime_call(RC_LEAF | RC_NO_FP,
7393 OptoRuntime::electronicCodeBook_aescrypt_Type(),
7394 stubAddr, stubName, TypePtr::BOTTOM,
7395 src_start, dest_start, k_start, len);
7396
7397 // return cipher length (int)
7398 Node* retvalue = _gvn.transform(new ProjNode(ecbCrypt, TypeFunc::Parms));
7399 set_result(retvalue);
7400 return true;
7401 }
7402
7403 //------------------------------inline_counterMode_AESCrypt-----------------------
7404 bool LibraryCallKit::inline_counterMode_AESCrypt(vmIntrinsics::ID id) {
7405 assert(UseAES, "need AES instruction support");
7406 if (!UseAESCTRIntrinsics) return false;
7407
7408 address stubAddr = nullptr;
7409 const char *stubName = nullptr;
7410 if (id == vmIntrinsics::_counterMode_AESCrypt) {
7411 stubAddr = StubRoutines::counterMode_AESCrypt();
7412 stubName = "counterMode_AESCrypt";
7413 }
7414 if (stubAddr == nullptr) return false;
7415
7416 Node* counterMode_object = argument(0);
7417 Node* src = argument(1);
7418 Node* src_offset = argument(2);
7419 Node* len = argument(3);
7420 Node* dest = argument(4);
7421 Node* dest_offset = argument(5);
7422
7423 // (1) src and dest are arrays.
7424 const TypeAryPtr* src_type = src->Value(&_gvn)->isa_aryptr();
7425 const TypeAryPtr* dest_type = dest->Value(&_gvn)->isa_aryptr();
7426 assert( src_type != nullptr && src_type->elem() != Type::BOTTOM &&
7427 dest_type != nullptr && dest_type->elem() != Type::BOTTOM, "args are strange");
7428
7429 // checks are the responsibility of the caller
7430 Node* src_start = src;
7431 Node* dest_start = dest;
7432 if (src_offset != nullptr || dest_offset != nullptr) {
7433 assert(src_offset != nullptr && dest_offset != nullptr, "");
7434 src_start = array_element_address(src, src_offset, T_BYTE);
7435 dest_start = array_element_address(dest, dest_offset, T_BYTE);
7436 }
7437
7438 // if we are in this set of code, we "know" the embeddedCipher is an AESCrypt object
7439 // (because of the predicated logic executed earlier).
7440 // so we cast it here safely.
7441 // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java
7442 Node* embeddedCipherObj = load_field_from_object(counterMode_object, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;");
7443 if (embeddedCipherObj == nullptr) return false;
7444 // cast it to what we know it will be at runtime
7445 const TypeInstPtr* tinst = _gvn.type(counterMode_object)->isa_instptr();
7446 assert(tinst != nullptr, "CTR obj is null");
7447 assert(tinst->is_loaded(), "CTR obj is not loaded");
7448 ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AES_Crypt"));
7449 assert(klass_AESCrypt->is_loaded(), "predicate checks that this class is loaded");
7450 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
7451 const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_AESCrypt);
7452 const TypeOopPtr* xtype = aklass->as_instance_type()->cast_to_ptr_type(TypePtr::NotNull);
7453 Node* aescrypt_object = new CheckCastPPNode(control(), embeddedCipherObj, xtype);
7454 aescrypt_object = _gvn.transform(aescrypt_object);
7455 // we need to get the start of the aescrypt_object's expanded key array
7456 Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object, /* is_decrypt */ false);
7457 if (k_start == nullptr) return false;
7458 // similarly, get the start address of the r vector
7459 Node* obj_counter = load_field_from_object(counterMode_object, "counter", "[B");
7460 if (obj_counter == nullptr) return false;
7461 Node* cnt_start = array_element_address(obj_counter, intcon(0), T_BYTE);
7462
7463 Node* saved_encCounter = load_field_from_object(counterMode_object, "encryptedCounter", "[B");
7464 if (saved_encCounter == nullptr) return false;
7465 Node* saved_encCounter_start = array_element_address(saved_encCounter, intcon(0), T_BYTE);
7466 Node* used = field_address_from_object(counterMode_object, "used", "I", /*is_exact*/ false);
7467
7468 // Call the stub, passing src_start, dest_start, k_start, r_start and src_len
7469 Node* ctrCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
7470 OptoRuntime::counterMode_aescrypt_Type(),
7471 stubAddr, stubName, TypePtr::BOTTOM,
7472 src_start, dest_start, k_start, cnt_start, len, saved_encCounter_start, used);
7473
7474 // return cipher length (int)
7475 Node* retvalue = _gvn.transform(new ProjNode(ctrCrypt, TypeFunc::Parms));
7476 set_result(retvalue);
7477 return true;
7478 }
7479
7480 //------------------------------get_key_start_from_aescrypt_object-----------------------
7481 Node* LibraryCallKit::get_key_start_from_aescrypt_object(Node* aescrypt_object, bool is_decrypt) {
7482 // MixColumns for decryption can be reduced by preprocessing MixColumns with round keys.
7483 // Intel's extension is based on this optimization and AESCrypt generates round keys by preprocessing MixColumns.
7484 // However, ppc64 vncipher processes MixColumns and requires the same round keys with encryption.
7485 // The following platform specific stubs of encryption and decryption use the same round keys.
7486 #if defined(PPC64) || defined(S390) || defined(RISCV64)
7487 bool use_decryption_key = false;
7488 #else
7489 bool use_decryption_key = is_decrypt;
7490 #endif
7491 Node* objAESCryptKey = load_field_from_object(aescrypt_object, use_decryption_key ? "sessionKd" : "sessionKe", "[I");
7492 assert(objAESCryptKey != nullptr, "wrong version of com.sun.crypto.provider.AES_Crypt");
7493 if (objAESCryptKey == nullptr) return (Node *) nullptr;
7494
7495 // now have the array, need to get the start address of the selected key array
7496 Node* k_start = array_element_address(objAESCryptKey, intcon(0), T_INT);
7497 return k_start;
7498 }
7499
7500 //----------------------------inline_cipherBlockChaining_AESCrypt_predicate----------------------------
7501 // Return node representing slow path of predicate check.
7502 // the pseudo code we want to emulate with this predicate is:
7503 // for encryption:
7504 // if (embeddedCipherObj instanceof AESCrypt) do_intrinsic, else do_javapath
7505 // for decryption:
7506 // if ((embeddedCipherObj instanceof AESCrypt) && (cipher!=plain)) do_intrinsic, else do_javapath
7507 // note cipher==plain is more conservative than the original java code but that's OK
7508 //
7509 Node* LibraryCallKit::inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting) {
7510 // The receiver was checked for null already.
7511 Node* objCBC = argument(0);
7512
7513 Node* src = argument(1);
7514 Node* dest = argument(4);
7515
7516 // Load embeddedCipher field of CipherBlockChaining object.
7517 Node* embeddedCipherObj = load_field_from_object(objCBC, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;");
7518
7519 // get AESCrypt klass for instanceOf check
7520 // AESCrypt might not be loaded yet if some other SymmetricCipher got us to this compile point
7521 // will have same classloader as CipherBlockChaining object
7522 const TypeInstPtr* tinst = _gvn.type(objCBC)->isa_instptr();
7523 assert(tinst != nullptr, "CBCobj is null");
7524 assert(tinst->is_loaded(), "CBCobj is not loaded");
7525
7526 // we want to do an instanceof comparison against the AESCrypt class
7527 ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AES_Crypt"));
7528 if (!klass_AESCrypt->is_loaded()) {
7529 // if AESCrypt is not even loaded, we never take the intrinsic fast path
7530 Node* ctrl = control();
7531 set_control(top()); // no regular fast path
7532 return ctrl;
7533 }
7534
7535 src = must_be_not_null(src, true);
7536 dest = must_be_not_null(dest, true);
7537
7538 // Resolve oops to stable for CmpP below.
7539 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
7540
7541 Node* instof = gen_instanceof(embeddedCipherObj, makecon(TypeKlassPtr::make(instklass_AESCrypt)));
7542 Node* cmp_instof = _gvn.transform(new CmpINode(instof, intcon(1)));
7543 Node* bool_instof = _gvn.transform(new BoolNode(cmp_instof, BoolTest::ne));
7544
7545 Node* instof_false = generate_guard(bool_instof, nullptr, PROB_MIN);
7546
7547 // for encryption, we are done
7548 if (!decrypting)
7549 return instof_false; // even if it is null
7550
7551 // for decryption, we need to add a further check to avoid
7552 // taking the intrinsic path when cipher and plain are the same
7553 // see the original java code for why.
7554 RegionNode* region = new RegionNode(3);
7555 region->init_req(1, instof_false);
7556
7557 Node* cmp_src_dest = _gvn.transform(new CmpPNode(src, dest));
7558 Node* bool_src_dest = _gvn.transform(new BoolNode(cmp_src_dest, BoolTest::eq));
7559 Node* src_dest_conjoint = generate_guard(bool_src_dest, nullptr, PROB_MIN);
7560 region->init_req(2, src_dest_conjoint);
7561
7562 record_for_igvn(region);
7563 return _gvn.transform(region);
7564 }
7565
7566 //----------------------------inline_electronicCodeBook_AESCrypt_predicate----------------------------
7567 // Return node representing slow path of predicate check.
7568 // the pseudo code we want to emulate with this predicate is:
7569 // for encryption:
7570 // if (embeddedCipherObj instanceof AESCrypt) do_intrinsic, else do_javapath
7571 // for decryption:
7572 // if ((embeddedCipherObj instanceof AESCrypt) && (cipher!=plain)) do_intrinsic, else do_javapath
7573 // note cipher==plain is more conservative than the original java code but that's OK
7574 //
7575 Node* LibraryCallKit::inline_electronicCodeBook_AESCrypt_predicate(bool decrypting) {
7576 // The receiver was checked for null already.
7577 Node* objECB = argument(0);
7578
7579 // Load embeddedCipher field of ElectronicCodeBook object.
7580 Node* embeddedCipherObj = load_field_from_object(objECB, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;");
7581
7582 // get AESCrypt klass for instanceOf check
7583 // AESCrypt might not be loaded yet if some other SymmetricCipher got us to this compile point
7584 // will have same classloader as ElectronicCodeBook object
7585 const TypeInstPtr* tinst = _gvn.type(objECB)->isa_instptr();
7586 assert(tinst != nullptr, "ECBobj is null");
7587 assert(tinst->is_loaded(), "ECBobj is not loaded");
7588
7589 // we want to do an instanceof comparison against the AESCrypt class
7590 ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AES_Crypt"));
7591 if (!klass_AESCrypt->is_loaded()) {
7592 // if AESCrypt is not even loaded, we never take the intrinsic fast path
7593 Node* ctrl = control();
7594 set_control(top()); // no regular fast path
7595 return ctrl;
7596 }
7597 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
7598
7599 Node* instof = gen_instanceof(embeddedCipherObj, makecon(TypeKlassPtr::make(instklass_AESCrypt)));
7600 Node* cmp_instof = _gvn.transform(new CmpINode(instof, intcon(1)));
7601 Node* bool_instof = _gvn.transform(new BoolNode(cmp_instof, BoolTest::ne));
7602
7603 Node* instof_false = generate_guard(bool_instof, nullptr, PROB_MIN);
7604
7605 // for encryption, we are done
7606 if (!decrypting)
7607 return instof_false; // even if it is null
7608
7609 // for decryption, we need to add a further check to avoid
7610 // taking the intrinsic path when cipher and plain are the same
7611 // see the original java code for why.
7612 RegionNode* region = new RegionNode(3);
7613 region->init_req(1, instof_false);
7614 Node* src = argument(1);
7615 Node* dest = argument(4);
7616 Node* cmp_src_dest = _gvn.transform(new CmpPNode(src, dest));
7617 Node* bool_src_dest = _gvn.transform(new BoolNode(cmp_src_dest, BoolTest::eq));
7618 Node* src_dest_conjoint = generate_guard(bool_src_dest, nullptr, PROB_MIN);
7619 region->init_req(2, src_dest_conjoint);
7620
7621 record_for_igvn(region);
7622 return _gvn.transform(region);
7623 }
7624
7625 //----------------------------inline_counterMode_AESCrypt_predicate----------------------------
7626 // Return node representing slow path of predicate check.
7627 // the pseudo code we want to emulate with this predicate is:
7628 // for encryption:
7629 // if (embeddedCipherObj instanceof AESCrypt) do_intrinsic, else do_javapath
7630 // for decryption:
7631 // if ((embeddedCipherObj instanceof AESCrypt) && (cipher!=plain)) do_intrinsic, else do_javapath
7632 // note cipher==plain is more conservative than the original java code but that's OK
7633 //
7634
7635 Node* LibraryCallKit::inline_counterMode_AESCrypt_predicate() {
7636 // The receiver was checked for null already.
7637 Node* objCTR = argument(0);
7638
7639 // Load embeddedCipher field of CipherBlockChaining object.
7640 Node* embeddedCipherObj = load_field_from_object(objCTR, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;");
7641
7642 // get AESCrypt klass for instanceOf check
7643 // AESCrypt might not be loaded yet if some other SymmetricCipher got us to this compile point
7644 // will have same classloader as CipherBlockChaining object
7645 const TypeInstPtr* tinst = _gvn.type(objCTR)->isa_instptr();
7646 assert(tinst != nullptr, "CTRobj is null");
7647 assert(tinst->is_loaded(), "CTRobj is not loaded");
7648
7649 // we want to do an instanceof comparison against the AESCrypt class
7650 ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AES_Crypt"));
7651 if (!klass_AESCrypt->is_loaded()) {
7652 // if AESCrypt is not even loaded, we never take the intrinsic fast path
7653 Node* ctrl = control();
7654 set_control(top()); // no regular fast path
7655 return ctrl;
7656 }
7657
7658 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
7659 Node* instof = gen_instanceof(embeddedCipherObj, makecon(TypeKlassPtr::make(instklass_AESCrypt)));
7660 Node* cmp_instof = _gvn.transform(new CmpINode(instof, intcon(1)));
7661 Node* bool_instof = _gvn.transform(new BoolNode(cmp_instof, BoolTest::ne));
7662 Node* instof_false = generate_guard(bool_instof, nullptr, PROB_MIN);
7663
7664 return instof_false; // even if it is null
7665 }
7666
7667 //------------------------------inline_ghash_processBlocks
7668 bool LibraryCallKit::inline_ghash_processBlocks() {
7669 address stubAddr;
7670 const char *stubName;
7671 assert(UseGHASHIntrinsics, "need GHASH intrinsics support");
7672
7673 stubAddr = StubRoutines::ghash_processBlocks();
7674 stubName = "ghash_processBlocks";
7675
7676 Node* data = argument(0);
7677 Node* offset = argument(1);
7678 Node* len = argument(2);
7679 Node* state = argument(3);
7680 Node* subkeyH = argument(4);
7681
7682 state = must_be_not_null(state, true);
7683 subkeyH = must_be_not_null(subkeyH, true);
7684 data = must_be_not_null(data, true);
7685
7686 Node* state_start = array_element_address(state, intcon(0), T_LONG);
7687 assert(state_start, "state is null");
7688 Node* subkeyH_start = array_element_address(subkeyH, intcon(0), T_LONG);
7689 assert(subkeyH_start, "subkeyH is null");
7690 Node* data_start = array_element_address(data, offset, T_BYTE);
7691 assert(data_start, "data is null");
7692
7693 Node* ghash = make_runtime_call(RC_LEAF|RC_NO_FP,
7694 OptoRuntime::ghash_processBlocks_Type(),
7695 stubAddr, stubName, TypePtr::BOTTOM,
7696 state_start, subkeyH_start, data_start, len);
7697 return true;
7698 }
7699
7700 //------------------------------inline_chacha20Block
7701 bool LibraryCallKit::inline_chacha20Block() {
7702 address stubAddr;
7703 const char *stubName;
7704 assert(UseChaCha20Intrinsics, "need ChaCha20 intrinsics support");
7705
7706 stubAddr = StubRoutines::chacha20Block();
7707 stubName = "chacha20Block";
7708
7709 Node* state = argument(0);
7710 Node* result = argument(1);
7711
7712 state = must_be_not_null(state, true);
7713 result = must_be_not_null(result, true);
7714
7715 Node* state_start = array_element_address(state, intcon(0), T_INT);
7716 assert(state_start, "state is null");
7717 Node* result_start = array_element_address(result, intcon(0), T_BYTE);
7718 assert(result_start, "result is null");
7719
7720 Node* cc20Blk = make_runtime_call(RC_LEAF|RC_NO_FP,
7721 OptoRuntime::chacha20Block_Type(),
7722 stubAddr, stubName, TypePtr::BOTTOM,
7723 state_start, result_start);
7724 // return key stream length (int)
7725 Node* retvalue = _gvn.transform(new ProjNode(cc20Blk, TypeFunc::Parms));
7726 set_result(retvalue);
7727 return true;
7728 }
7729
7730 //------------------------------inline_kyberNtt
7731 bool LibraryCallKit::inline_kyberNtt() {
7732 address stubAddr;
7733 const char *stubName;
7734 assert(UseKyberIntrinsics, "need Kyber intrinsics support");
7735 assert(callee()->signature()->size() == 2, "kyberNtt has 2 parameters");
7736
7737 stubAddr = StubRoutines::kyberNtt();
7738 stubName = "kyberNtt";
7739 if (!stubAddr) return false;
7740
7741 Node* coeffs = argument(0);
7742 Node* ntt_zetas = argument(1);
7743
7744 coeffs = must_be_not_null(coeffs, true);
7745 ntt_zetas = must_be_not_null(ntt_zetas, true);
7746
7747 Node* coeffs_start = array_element_address(coeffs, intcon(0), T_SHORT);
7748 assert(coeffs_start, "coeffs is null");
7749 Node* ntt_zetas_start = array_element_address(ntt_zetas, intcon(0), T_SHORT);
7750 assert(ntt_zetas_start, "ntt_zetas is null");
7751 Node* kyberNtt = make_runtime_call(RC_LEAF|RC_NO_FP,
7752 OptoRuntime::kyberNtt_Type(),
7753 stubAddr, stubName, TypePtr::BOTTOM,
7754 coeffs_start, ntt_zetas_start);
7755 // return an int
7756 Node* retvalue = _gvn.transform(new ProjNode(kyberNtt, TypeFunc::Parms));
7757 set_result(retvalue);
7758 return true;
7759 }
7760
7761 //------------------------------inline_kyberInverseNtt
7762 bool LibraryCallKit::inline_kyberInverseNtt() {
7763 address stubAddr;
7764 const char *stubName;
7765 assert(UseKyberIntrinsics, "need Kyber intrinsics support");
7766 assert(callee()->signature()->size() == 2, "kyberInverseNtt has 2 parameters");
7767
7768 stubAddr = StubRoutines::kyberInverseNtt();
7769 stubName = "kyberInverseNtt";
7770 if (!stubAddr) return false;
7771
7772 Node* coeffs = argument(0);
7773 Node* zetas = argument(1);
7774
7775 coeffs = must_be_not_null(coeffs, true);
7776 zetas = must_be_not_null(zetas, true);
7777
7778 Node* coeffs_start = array_element_address(coeffs, intcon(0), T_SHORT);
7779 assert(coeffs_start, "coeffs is null");
7780 Node* zetas_start = array_element_address(zetas, intcon(0), T_SHORT);
7781 assert(zetas_start, "inverseNtt_zetas is null");
7782 Node* kyberInverseNtt = make_runtime_call(RC_LEAF|RC_NO_FP,
7783 OptoRuntime::kyberInverseNtt_Type(),
7784 stubAddr, stubName, TypePtr::BOTTOM,
7785 coeffs_start, zetas_start);
7786
7787 // return an int
7788 Node* retvalue = _gvn.transform(new ProjNode(kyberInverseNtt, TypeFunc::Parms));
7789 set_result(retvalue);
7790 return true;
7791 }
7792
7793 //------------------------------inline_kyberNttMult
7794 bool LibraryCallKit::inline_kyberNttMult() {
7795 address stubAddr;
7796 const char *stubName;
7797 assert(UseKyberIntrinsics, "need Kyber intrinsics support");
7798 assert(callee()->signature()->size() == 4, "kyberNttMult has 4 parameters");
7799
7800 stubAddr = StubRoutines::kyberNttMult();
7801 stubName = "kyberNttMult";
7802 if (!stubAddr) return false;
7803
7804 Node* result = argument(0);
7805 Node* ntta = argument(1);
7806 Node* nttb = argument(2);
7807 Node* zetas = argument(3);
7808
7809 result = must_be_not_null(result, true);
7810 ntta = must_be_not_null(ntta, true);
7811 nttb = must_be_not_null(nttb, true);
7812 zetas = must_be_not_null(zetas, true);
7813
7814 Node* result_start = array_element_address(result, intcon(0), T_SHORT);
7815 assert(result_start, "result is null");
7816 Node* ntta_start = array_element_address(ntta, intcon(0), T_SHORT);
7817 assert(ntta_start, "ntta is null");
7818 Node* nttb_start = array_element_address(nttb, intcon(0), T_SHORT);
7819 assert(nttb_start, "nttb is null");
7820 Node* zetas_start = array_element_address(zetas, intcon(0), T_SHORT);
7821 assert(zetas_start, "nttMult_zetas is null");
7822 Node* kyberNttMult = make_runtime_call(RC_LEAF|RC_NO_FP,
7823 OptoRuntime::kyberNttMult_Type(),
7824 stubAddr, stubName, TypePtr::BOTTOM,
7825 result_start, ntta_start, nttb_start,
7826 zetas_start);
7827
7828 // return an int
7829 Node* retvalue = _gvn.transform(new ProjNode(kyberNttMult, TypeFunc::Parms));
7830 set_result(retvalue);
7831
7832 return true;
7833 }
7834
7835 //------------------------------inline_kyberAddPoly_2
7836 bool LibraryCallKit::inline_kyberAddPoly_2() {
7837 address stubAddr;
7838 const char *stubName;
7839 assert(UseKyberIntrinsics, "need Kyber intrinsics support");
7840 assert(callee()->signature()->size() == 3, "kyberAddPoly_2 has 3 parameters");
7841
7842 stubAddr = StubRoutines::kyberAddPoly_2();
7843 stubName = "kyberAddPoly_2";
7844 if (!stubAddr) return false;
7845
7846 Node* result = argument(0);
7847 Node* a = argument(1);
7848 Node* b = argument(2);
7849
7850 result = must_be_not_null(result, true);
7851 a = must_be_not_null(a, true);
7852 b = must_be_not_null(b, true);
7853
7854 Node* result_start = array_element_address(result, intcon(0), T_SHORT);
7855 assert(result_start, "result is null");
7856 Node* a_start = array_element_address(a, intcon(0), T_SHORT);
7857 assert(a_start, "a is null");
7858 Node* b_start = array_element_address(b, intcon(0), T_SHORT);
7859 assert(b_start, "b is null");
7860 Node* kyberAddPoly_2 = make_runtime_call(RC_LEAF|RC_NO_FP,
7861 OptoRuntime::kyberAddPoly_2_Type(),
7862 stubAddr, stubName, TypePtr::BOTTOM,
7863 result_start, a_start, b_start);
7864 // return an int
7865 Node* retvalue = _gvn.transform(new ProjNode(kyberAddPoly_2, TypeFunc::Parms));
7866 set_result(retvalue);
7867 return true;
7868 }
7869
7870 //------------------------------inline_kyberAddPoly_3
7871 bool LibraryCallKit::inline_kyberAddPoly_3() {
7872 address stubAddr;
7873 const char *stubName;
7874 assert(UseKyberIntrinsics, "need Kyber intrinsics support");
7875 assert(callee()->signature()->size() == 4, "kyberAddPoly_3 has 4 parameters");
7876
7877 stubAddr = StubRoutines::kyberAddPoly_3();
7878 stubName = "kyberAddPoly_3";
7879 if (!stubAddr) return false;
7880
7881 Node* result = argument(0);
7882 Node* a = argument(1);
7883 Node* b = argument(2);
7884 Node* c = argument(3);
7885
7886 result = must_be_not_null(result, true);
7887 a = must_be_not_null(a, true);
7888 b = must_be_not_null(b, true);
7889 c = must_be_not_null(c, true);
7890
7891 Node* result_start = array_element_address(result, intcon(0), T_SHORT);
7892 assert(result_start, "result is null");
7893 Node* a_start = array_element_address(a, intcon(0), T_SHORT);
7894 assert(a_start, "a is null");
7895 Node* b_start = array_element_address(b, intcon(0), T_SHORT);
7896 assert(b_start, "b is null");
7897 Node* c_start = array_element_address(c, intcon(0), T_SHORT);
7898 assert(c_start, "c is null");
7899 Node* kyberAddPoly_3 = make_runtime_call(RC_LEAF|RC_NO_FP,
7900 OptoRuntime::kyberAddPoly_3_Type(),
7901 stubAddr, stubName, TypePtr::BOTTOM,
7902 result_start, a_start, b_start, c_start);
7903 // return an int
7904 Node* retvalue = _gvn.transform(new ProjNode(kyberAddPoly_3, TypeFunc::Parms));
7905 set_result(retvalue);
7906 return true;
7907 }
7908
7909 //------------------------------inline_kyber12To16
7910 bool LibraryCallKit::inline_kyber12To16() {
7911 address stubAddr;
7912 const char *stubName;
7913 assert(UseKyberIntrinsics, "need Kyber intrinsics support");
7914 assert(callee()->signature()->size() == 4, "kyber12To16 has 4 parameters");
7915
7916 stubAddr = StubRoutines::kyber12To16();
7917 stubName = "kyber12To16";
7918 if (!stubAddr) return false;
7919
7920 Node* condensed = argument(0);
7921 Node* condensedOffs = argument(1);
7922 Node* parsed = argument(2);
7923 Node* parsedLength = argument(3);
7924
7925 condensed = must_be_not_null(condensed, true);
7926 parsed = must_be_not_null(parsed, true);
7927
7928 Node* condensed_start = array_element_address(condensed, intcon(0), T_BYTE);
7929 assert(condensed_start, "condensed is null");
7930 Node* parsed_start = array_element_address(parsed, intcon(0), T_SHORT);
7931 assert(parsed_start, "parsed is null");
7932 Node* kyber12To16 = make_runtime_call(RC_LEAF|RC_NO_FP,
7933 OptoRuntime::kyber12To16_Type(),
7934 stubAddr, stubName, TypePtr::BOTTOM,
7935 condensed_start, condensedOffs, parsed_start, parsedLength);
7936 // return an int
7937 Node* retvalue = _gvn.transform(new ProjNode(kyber12To16, TypeFunc::Parms));
7938 set_result(retvalue);
7939 return true;
7940
7941 }
7942
7943 //------------------------------inline_kyberBarrettReduce
7944 bool LibraryCallKit::inline_kyberBarrettReduce() {
7945 address stubAddr;
7946 const char *stubName;
7947 assert(UseKyberIntrinsics, "need Kyber intrinsics support");
7948 assert(callee()->signature()->size() == 1, "kyberBarrettReduce has 1 parameters");
7949
7950 stubAddr = StubRoutines::kyberBarrettReduce();
7951 stubName = "kyberBarrettReduce";
7952 if (!stubAddr) return false;
7953
7954 Node* coeffs = argument(0);
7955
7956 coeffs = must_be_not_null(coeffs, true);
7957
7958 Node* coeffs_start = array_element_address(coeffs, intcon(0), T_SHORT);
7959 assert(coeffs_start, "coeffs is null");
7960 Node* kyberBarrettReduce = make_runtime_call(RC_LEAF|RC_NO_FP,
7961 OptoRuntime::kyberBarrettReduce_Type(),
7962 stubAddr, stubName, TypePtr::BOTTOM,
7963 coeffs_start);
7964 // return an int
7965 Node* retvalue = _gvn.transform(new ProjNode(kyberBarrettReduce, TypeFunc::Parms));
7966 set_result(retvalue);
7967 return true;
7968 }
7969
7970 //------------------------------inline_dilithiumAlmostNtt
7971 bool LibraryCallKit::inline_dilithiumAlmostNtt() {
7972 address stubAddr;
7973 const char *stubName;
7974 assert(UseDilithiumIntrinsics, "need Dilithium intrinsics support");
7975 assert(callee()->signature()->size() == 2, "dilithiumAlmostNtt has 2 parameters");
7976
7977 stubAddr = StubRoutines::dilithiumAlmostNtt();
7978 stubName = "dilithiumAlmostNtt";
7979 if (!stubAddr) return false;
7980
7981 Node* coeffs = argument(0);
7982 Node* ntt_zetas = argument(1);
7983
7984 coeffs = must_be_not_null(coeffs, true);
7985 ntt_zetas = must_be_not_null(ntt_zetas, true);
7986
7987 Node* coeffs_start = array_element_address(coeffs, intcon(0), T_INT);
7988 assert(coeffs_start, "coeffs is null");
7989 Node* ntt_zetas_start = array_element_address(ntt_zetas, intcon(0), T_INT);
7990 assert(ntt_zetas_start, "ntt_zetas is null");
7991 Node* dilithiumAlmostNtt = make_runtime_call(RC_LEAF|RC_NO_FP,
7992 OptoRuntime::dilithiumAlmostNtt_Type(),
7993 stubAddr, stubName, TypePtr::BOTTOM,
7994 coeffs_start, ntt_zetas_start);
7995 // return an int
7996 Node* retvalue = _gvn.transform(new ProjNode(dilithiumAlmostNtt, TypeFunc::Parms));
7997 set_result(retvalue);
7998 return true;
7999 }
8000
8001 //------------------------------inline_dilithiumAlmostInverseNtt
8002 bool LibraryCallKit::inline_dilithiumAlmostInverseNtt() {
8003 address stubAddr;
8004 const char *stubName;
8005 assert(UseDilithiumIntrinsics, "need Dilithium intrinsics support");
8006 assert(callee()->signature()->size() == 2, "dilithiumAlmostInverseNtt has 2 parameters");
8007
8008 stubAddr = StubRoutines::dilithiumAlmostInverseNtt();
8009 stubName = "dilithiumAlmostInverseNtt";
8010 if (!stubAddr) return false;
8011
8012 Node* coeffs = argument(0);
8013 Node* zetas = argument(1);
8014
8015 coeffs = must_be_not_null(coeffs, true);
8016 zetas = must_be_not_null(zetas, true);
8017
8018 Node* coeffs_start = array_element_address(coeffs, intcon(0), T_INT);
8019 assert(coeffs_start, "coeffs is null");
8020 Node* zetas_start = array_element_address(zetas, intcon(0), T_INT);
8021 assert(zetas_start, "inverseNtt_zetas is null");
8022 Node* dilithiumAlmostInverseNtt = make_runtime_call(RC_LEAF|RC_NO_FP,
8023 OptoRuntime::dilithiumAlmostInverseNtt_Type(),
8024 stubAddr, stubName, TypePtr::BOTTOM,
8025 coeffs_start, zetas_start);
8026 // return an int
8027 Node* retvalue = _gvn.transform(new ProjNode(dilithiumAlmostInverseNtt, TypeFunc::Parms));
8028 set_result(retvalue);
8029 return true;
8030 }
8031
8032 //------------------------------inline_dilithiumNttMult
8033 bool LibraryCallKit::inline_dilithiumNttMult() {
8034 address stubAddr;
8035 const char *stubName;
8036 assert(UseDilithiumIntrinsics, "need Dilithium intrinsics support");
8037 assert(callee()->signature()->size() == 3, "dilithiumNttMult has 3 parameters");
8038
8039 stubAddr = StubRoutines::dilithiumNttMult();
8040 stubName = "dilithiumNttMult";
8041 if (!stubAddr) return false;
8042
8043 Node* result = argument(0);
8044 Node* ntta = argument(1);
8045 Node* nttb = argument(2);
8046 Node* zetas = argument(3);
8047
8048 result = must_be_not_null(result, true);
8049 ntta = must_be_not_null(ntta, true);
8050 nttb = must_be_not_null(nttb, true);
8051 zetas = must_be_not_null(zetas, true);
8052
8053 Node* result_start = array_element_address(result, intcon(0), T_INT);
8054 assert(result_start, "result is null");
8055 Node* ntta_start = array_element_address(ntta, intcon(0), T_INT);
8056 assert(ntta_start, "ntta is null");
8057 Node* nttb_start = array_element_address(nttb, intcon(0), T_INT);
8058 assert(nttb_start, "nttb is null");
8059 Node* dilithiumNttMult = make_runtime_call(RC_LEAF|RC_NO_FP,
8060 OptoRuntime::dilithiumNttMult_Type(),
8061 stubAddr, stubName, TypePtr::BOTTOM,
8062 result_start, ntta_start, nttb_start);
8063
8064 // return an int
8065 Node* retvalue = _gvn.transform(new ProjNode(dilithiumNttMult, TypeFunc::Parms));
8066 set_result(retvalue);
8067
8068 return true;
8069 }
8070
8071 //------------------------------inline_dilithiumMontMulByConstant
8072 bool LibraryCallKit::inline_dilithiumMontMulByConstant() {
8073 address stubAddr;
8074 const char *stubName;
8075 assert(UseDilithiumIntrinsics, "need Dilithium intrinsics support");
8076 assert(callee()->signature()->size() == 2, "dilithiumMontMulByConstant has 2 parameters");
8077
8078 stubAddr = StubRoutines::dilithiumMontMulByConstant();
8079 stubName = "dilithiumMontMulByConstant";
8080 if (!stubAddr) return false;
8081
8082 Node* coeffs = argument(0);
8083 Node* constant = argument(1);
8084
8085 coeffs = must_be_not_null(coeffs, true);
8086
8087 Node* coeffs_start = array_element_address(coeffs, intcon(0), T_INT);
8088 assert(coeffs_start, "coeffs is null");
8089 Node* dilithiumMontMulByConstant = make_runtime_call(RC_LEAF|RC_NO_FP,
8090 OptoRuntime::dilithiumMontMulByConstant_Type(),
8091 stubAddr, stubName, TypePtr::BOTTOM,
8092 coeffs_start, constant);
8093
8094 // return an int
8095 Node* retvalue = _gvn.transform(new ProjNode(dilithiumMontMulByConstant, TypeFunc::Parms));
8096 set_result(retvalue);
8097 return true;
8098 }
8099
8100
8101 //------------------------------inline_dilithiumDecomposePoly
8102 bool LibraryCallKit::inline_dilithiumDecomposePoly() {
8103 address stubAddr;
8104 const char *stubName;
8105 assert(UseDilithiumIntrinsics, "need Dilithium intrinsics support");
8106 assert(callee()->signature()->size() == 5, "dilithiumDecomposePoly has 5 parameters");
8107
8108 stubAddr = StubRoutines::dilithiumDecomposePoly();
8109 stubName = "dilithiumDecomposePoly";
8110 if (!stubAddr) return false;
8111
8112 Node* input = argument(0);
8113 Node* lowPart = argument(1);
8114 Node* highPart = argument(2);
8115 Node* twoGamma2 = argument(3);
8116 Node* multiplier = argument(4);
8117
8118 input = must_be_not_null(input, true);
8119 lowPart = must_be_not_null(lowPart, true);
8120 highPart = must_be_not_null(highPart, true);
8121
8122 Node* input_start = array_element_address(input, intcon(0), T_INT);
8123 assert(input_start, "input is null");
8124 Node* lowPart_start = array_element_address(lowPart, intcon(0), T_INT);
8125 assert(lowPart_start, "lowPart is null");
8126 Node* highPart_start = array_element_address(highPart, intcon(0), T_INT);
8127 assert(highPart_start, "highPart is null");
8128
8129 Node* dilithiumDecomposePoly = make_runtime_call(RC_LEAF|RC_NO_FP,
8130 OptoRuntime::dilithiumDecomposePoly_Type(),
8131 stubAddr, stubName, TypePtr::BOTTOM,
8132 input_start, lowPart_start, highPart_start,
8133 twoGamma2, multiplier);
8134
8135 // return an int
8136 Node* retvalue = _gvn.transform(new ProjNode(dilithiumDecomposePoly, TypeFunc::Parms));
8137 set_result(retvalue);
8138 return true;
8139 }
8140
8141 bool LibraryCallKit::inline_base64_encodeBlock() {
8142 address stubAddr;
8143 const char *stubName;
8144 assert(UseBASE64Intrinsics, "need Base64 intrinsics support");
8145 assert(callee()->signature()->size() == 6, "base64_encodeBlock has 6 parameters");
8146 stubAddr = StubRoutines::base64_encodeBlock();
8147 stubName = "encodeBlock";
8148
8149 if (!stubAddr) return false;
8150 Node* base64obj = argument(0);
8151 Node* src = argument(1);
8152 Node* offset = argument(2);
8153 Node* len = argument(3);
8154 Node* dest = argument(4);
8155 Node* dp = argument(5);
8156 Node* isURL = argument(6);
8157
8158 src = must_be_not_null(src, true);
8159 dest = must_be_not_null(dest, true);
8160
8161 Node* src_start = array_element_address(src, intcon(0), T_BYTE);
8162 assert(src_start, "source array is null");
8163 Node* dest_start = array_element_address(dest, intcon(0), T_BYTE);
8164 assert(dest_start, "destination array is null");
8165
8166 Node* base64 = make_runtime_call(RC_LEAF,
8167 OptoRuntime::base64_encodeBlock_Type(),
8168 stubAddr, stubName, TypePtr::BOTTOM,
8169 src_start, offset, len, dest_start, dp, isURL);
8170 return true;
8171 }
8172
8173 bool LibraryCallKit::inline_base64_decodeBlock() {
8174 address stubAddr;
8175 const char *stubName;
8176 assert(UseBASE64Intrinsics, "need Base64 intrinsics support");
8177 assert(callee()->signature()->size() == 7, "base64_decodeBlock has 7 parameters");
8178 stubAddr = StubRoutines::base64_decodeBlock();
8179 stubName = "decodeBlock";
8180
8181 if (!stubAddr) return false;
8182 Node* base64obj = argument(0);
8183 Node* src = argument(1);
8184 Node* src_offset = argument(2);
8185 Node* len = argument(3);
8186 Node* dest = argument(4);
8187 Node* dest_offset = argument(5);
8188 Node* isURL = argument(6);
8189 Node* isMIME = argument(7);
8190
8191 src = must_be_not_null(src, true);
8192 dest = must_be_not_null(dest, true);
8193
8194 Node* src_start = array_element_address(src, intcon(0), T_BYTE);
8195 assert(src_start, "source array is null");
8196 Node* dest_start = array_element_address(dest, intcon(0), T_BYTE);
8197 assert(dest_start, "destination array is null");
8198
8199 Node* call = make_runtime_call(RC_LEAF,
8200 OptoRuntime::base64_decodeBlock_Type(),
8201 stubAddr, stubName, TypePtr::BOTTOM,
8202 src_start, src_offset, len, dest_start, dest_offset, isURL, isMIME);
8203 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
8204 set_result(result);
8205 return true;
8206 }
8207
8208 bool LibraryCallKit::inline_poly1305_processBlocks() {
8209 address stubAddr;
8210 const char *stubName;
8211 assert(UsePoly1305Intrinsics, "need Poly intrinsics support");
8212 assert(callee()->signature()->size() == 5, "poly1305_processBlocks has %d parameters", callee()->signature()->size());
8213 stubAddr = StubRoutines::poly1305_processBlocks();
8214 stubName = "poly1305_processBlocks";
8215
8216 if (!stubAddr) return false;
8217 null_check_receiver(); // null-check receiver
8218 if (stopped()) return true;
8219
8220 Node* input = argument(1);
8221 Node* input_offset = argument(2);
8222 Node* len = argument(3);
8223 Node* alimbs = argument(4);
8224 Node* rlimbs = argument(5);
8225
8226 input = must_be_not_null(input, true);
8227 alimbs = must_be_not_null(alimbs, true);
8228 rlimbs = must_be_not_null(rlimbs, true);
8229
8230 Node* input_start = array_element_address(input, input_offset, T_BYTE);
8231 assert(input_start, "input array is null");
8232 Node* acc_start = array_element_address(alimbs, intcon(0), T_LONG);
8233 assert(acc_start, "acc array is null");
8234 Node* r_start = array_element_address(rlimbs, intcon(0), T_LONG);
8235 assert(r_start, "r array is null");
8236
8237 Node* call = make_runtime_call(RC_LEAF | RC_NO_FP,
8238 OptoRuntime::poly1305_processBlocks_Type(),
8239 stubAddr, stubName, TypePtr::BOTTOM,
8240 input_start, len, acc_start, r_start);
8241 return true;
8242 }
8243
8244 bool LibraryCallKit::inline_intpoly_montgomeryMult_P256() {
8245 address stubAddr;
8246 const char *stubName;
8247 assert(UseIntPolyIntrinsics, "need intpoly intrinsics support");
8248 assert(callee()->signature()->size() == 3, "intpoly_montgomeryMult_P256 has %d parameters", callee()->signature()->size());
8249 stubAddr = StubRoutines::intpoly_montgomeryMult_P256();
8250 stubName = "intpoly_montgomeryMult_P256";
8251
8252 if (!stubAddr) return false;
8253 null_check_receiver(); // null-check receiver
8254 if (stopped()) return true;
8255
8256 Node* a = argument(1);
8257 Node* b = argument(2);
8258 Node* r = argument(3);
8259
8260 a = must_be_not_null(a, true);
8261 b = must_be_not_null(b, true);
8262 r = must_be_not_null(r, true);
8263
8264 Node* a_start = array_element_address(a, intcon(0), T_LONG);
8265 assert(a_start, "a array is null");
8266 Node* b_start = array_element_address(b, intcon(0), T_LONG);
8267 assert(b_start, "b array is null");
8268 Node* r_start = array_element_address(r, intcon(0), T_LONG);
8269 assert(r_start, "r array is null");
8270
8271 Node* call = make_runtime_call(RC_LEAF | RC_NO_FP,
8272 OptoRuntime::intpoly_montgomeryMult_P256_Type(),
8273 stubAddr, stubName, TypePtr::BOTTOM,
8274 a_start, b_start, r_start);
8275 return true;
8276 }
8277
8278 bool LibraryCallKit::inline_intpoly_assign() {
8279 assert(UseIntPolyIntrinsics, "need intpoly intrinsics support");
8280 assert(callee()->signature()->size() == 3, "intpoly_assign has %d parameters", callee()->signature()->size());
8281 const char *stubName = "intpoly_assign";
8282 address stubAddr = StubRoutines::intpoly_assign();
8283 if (!stubAddr) return false;
8284
8285 Node* set = argument(0);
8286 Node* a = argument(1);
8287 Node* b = argument(2);
8288 Node* arr_length = load_array_length(a);
8289
8290 a = must_be_not_null(a, true);
8291 b = must_be_not_null(b, true);
8292
8293 Node* a_start = array_element_address(a, intcon(0), T_LONG);
8294 assert(a_start, "a array is null");
8295 Node* b_start = array_element_address(b, intcon(0), T_LONG);
8296 assert(b_start, "b array is null");
8297
8298 Node* call = make_runtime_call(RC_LEAF | RC_NO_FP,
8299 OptoRuntime::intpoly_assign_Type(),
8300 stubAddr, stubName, TypePtr::BOTTOM,
8301 set, a_start, b_start, arr_length);
8302 return true;
8303 }
8304
8305 //------------------------------inline_digestBase_implCompress-----------------------
8306 //
8307 // Calculate MD5 for single-block byte[] array.
8308 // void com.sun.security.provider.MD5.implCompress(byte[] buf, int ofs)
8309 //
8310 // Calculate SHA (i.e., SHA-1) for single-block byte[] array.
8311 // void com.sun.security.provider.SHA.implCompress(byte[] buf, int ofs)
8312 //
8313 // Calculate SHA2 (i.e., SHA-244 or SHA-256) for single-block byte[] array.
8314 // void com.sun.security.provider.SHA2.implCompress(byte[] buf, int ofs)
8315 //
8316 // Calculate SHA5 (i.e., SHA-384 or SHA-512) for single-block byte[] array.
8317 // void com.sun.security.provider.SHA5.implCompress(byte[] buf, int ofs)
8318 //
8319 // Calculate SHA3 (i.e., SHA3-224 or SHA3-256 or SHA3-384 or SHA3-512) for single-block byte[] array.
8320 // void com.sun.security.provider.SHA3.implCompress(byte[] buf, int ofs)
8321 //
8322 bool LibraryCallKit::inline_digestBase_implCompress(vmIntrinsics::ID id) {
8323 assert(callee()->signature()->size() == 2, "sha_implCompress has 2 parameters");
8324
8325 Node* digestBase_obj = argument(0);
8326 Node* src = argument(1); // type oop
8327 Node* ofs = argument(2); // type int
8328
8329 const TypeAryPtr* src_type = src->Value(&_gvn)->isa_aryptr();
8330 if (src_type == nullptr || src_type->elem() == Type::BOTTOM) {
8331 // failed array check
8332 return false;
8333 }
8334 // Figure out the size and type of the elements we will be copying.
8335 BasicType src_elem = src_type->elem()->array_element_basic_type();
8336 if (src_elem != T_BYTE) {
8337 return false;
8338 }
8339 // 'src_start' points to src array + offset
8340 src = must_be_not_null(src, true);
8341 Node* src_start = array_element_address(src, ofs, src_elem);
8342 Node* state = nullptr;
8343 Node* block_size = nullptr;
8344 address stubAddr;
8345 const char *stubName;
8346
8347 switch(id) {
8348 case vmIntrinsics::_md5_implCompress:
8349 assert(UseMD5Intrinsics, "need MD5 instruction support");
8350 state = get_state_from_digest_object(digestBase_obj, T_INT);
8351 stubAddr = StubRoutines::md5_implCompress();
8352 stubName = "md5_implCompress";
8353 break;
8354 case vmIntrinsics::_sha_implCompress:
8355 assert(UseSHA1Intrinsics, "need SHA1 instruction support");
8356 state = get_state_from_digest_object(digestBase_obj, T_INT);
8357 stubAddr = StubRoutines::sha1_implCompress();
8358 stubName = "sha1_implCompress";
8359 break;
8360 case vmIntrinsics::_sha2_implCompress:
8361 assert(UseSHA256Intrinsics, "need SHA256 instruction support");
8362 state = get_state_from_digest_object(digestBase_obj, T_INT);
8363 stubAddr = StubRoutines::sha256_implCompress();
8364 stubName = "sha256_implCompress";
8365 break;
8366 case vmIntrinsics::_sha5_implCompress:
8367 assert(UseSHA512Intrinsics, "need SHA512 instruction support");
8368 state = get_state_from_digest_object(digestBase_obj, T_LONG);
8369 stubAddr = StubRoutines::sha512_implCompress();
8370 stubName = "sha512_implCompress";
8371 break;
8372 case vmIntrinsics::_sha3_implCompress:
8373 assert(UseSHA3Intrinsics, "need SHA3 instruction support");
8374 state = get_state_from_digest_object(digestBase_obj, T_LONG);
8375 stubAddr = StubRoutines::sha3_implCompress();
8376 stubName = "sha3_implCompress";
8377 block_size = get_block_size_from_digest_object(digestBase_obj);
8378 if (block_size == nullptr) return false;
8379 break;
8380 default:
8381 fatal_unexpected_iid(id);
8382 return false;
8383 }
8384 if (state == nullptr) return false;
8385
8386 assert(stubAddr != nullptr, "Stub %s is not generated", stubName);
8387 if (stubAddr == nullptr) return false;
8388
8389 // Call the stub.
8390 Node* call;
8391 if (block_size == nullptr) {
8392 call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::digestBase_implCompress_Type(false),
8393 stubAddr, stubName, TypePtr::BOTTOM,
8394 src_start, state);
8395 } else {
8396 call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::digestBase_implCompress_Type(true),
8397 stubAddr, stubName, TypePtr::BOTTOM,
8398 src_start, state, block_size);
8399 }
8400
8401 return true;
8402 }
8403
8404 //------------------------------inline_double_keccak
8405 bool LibraryCallKit::inline_double_keccak() {
8406 address stubAddr;
8407 const char *stubName;
8408 assert(UseSHA3Intrinsics, "need SHA3 intrinsics support");
8409 assert(callee()->signature()->size() == 2, "double_keccak has 2 parameters");
8410
8411 stubAddr = StubRoutines::double_keccak();
8412 stubName = "double_keccak";
8413 if (!stubAddr) return false;
8414
8415 Node* status0 = argument(0);
8416 Node* status1 = argument(1);
8417
8418 status0 = must_be_not_null(status0, true);
8419 status1 = must_be_not_null(status1, true);
8420
8421 Node* status0_start = array_element_address(status0, intcon(0), T_LONG);
8422 assert(status0_start, "status0 is null");
8423 Node* status1_start = array_element_address(status1, intcon(0), T_LONG);
8424 assert(status1_start, "status1 is null");
8425 Node* double_keccak = make_runtime_call(RC_LEAF|RC_NO_FP,
8426 OptoRuntime::double_keccak_Type(),
8427 stubAddr, stubName, TypePtr::BOTTOM,
8428 status0_start, status1_start);
8429 // return an int
8430 Node* retvalue = _gvn.transform(new ProjNode(double_keccak, TypeFunc::Parms));
8431 set_result(retvalue);
8432 return true;
8433 }
8434
8435
8436 //------------------------------inline_digestBase_implCompressMB-----------------------
8437 //
8438 // Calculate MD5/SHA/SHA2/SHA5/SHA3 for multi-block byte[] array.
8439 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit)
8440 //
8441 bool LibraryCallKit::inline_digestBase_implCompressMB(int predicate) {
8442 assert(UseMD5Intrinsics || UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics || UseSHA3Intrinsics,
8443 "need MD5/SHA1/SHA256/SHA512/SHA3 instruction support");
8444 assert((uint)predicate < 5, "sanity");
8445 assert(callee()->signature()->size() == 3, "digestBase_implCompressMB has 3 parameters");
8446
8447 Node* digestBase_obj = argument(0); // The receiver was checked for null already.
8448 Node* src = argument(1); // byte[] array
8449 Node* ofs = argument(2); // type int
8450 Node* limit = argument(3); // type int
8451
8452 const TypeAryPtr* src_type = src->Value(&_gvn)->isa_aryptr();
8453 if (src_type == nullptr || src_type->elem() == Type::BOTTOM) {
8454 // failed array check
8455 return false;
8456 }
8457 // Figure out the size and type of the elements we will be copying.
8458 BasicType src_elem = src_type->elem()->array_element_basic_type();
8459 if (src_elem != T_BYTE) {
8460 return false;
8461 }
8462 // 'src_start' points to src array + offset
8463 src = must_be_not_null(src, false);
8464 Node* src_start = array_element_address(src, ofs, src_elem);
8465
8466 const char* klass_digestBase_name = nullptr;
8467 const char* stub_name = nullptr;
8468 address stub_addr = nullptr;
8469 BasicType elem_type = T_INT;
8470
8471 switch (predicate) {
8472 case 0:
8473 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_md5_implCompress)) {
8474 klass_digestBase_name = "sun/security/provider/MD5";
8475 stub_name = "md5_implCompressMB";
8476 stub_addr = StubRoutines::md5_implCompressMB();
8477 }
8478 break;
8479 case 1:
8480 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_sha_implCompress)) {
8481 klass_digestBase_name = "sun/security/provider/SHA";
8482 stub_name = "sha1_implCompressMB";
8483 stub_addr = StubRoutines::sha1_implCompressMB();
8484 }
8485 break;
8486 case 2:
8487 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_sha2_implCompress)) {
8488 klass_digestBase_name = "sun/security/provider/SHA2";
8489 stub_name = "sha256_implCompressMB";
8490 stub_addr = StubRoutines::sha256_implCompressMB();
8491 }
8492 break;
8493 case 3:
8494 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_sha5_implCompress)) {
8495 klass_digestBase_name = "sun/security/provider/SHA5";
8496 stub_name = "sha512_implCompressMB";
8497 stub_addr = StubRoutines::sha512_implCompressMB();
8498 elem_type = T_LONG;
8499 }
8500 break;
8501 case 4:
8502 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_sha3_implCompress)) {
8503 klass_digestBase_name = "sun/security/provider/SHA3";
8504 stub_name = "sha3_implCompressMB";
8505 stub_addr = StubRoutines::sha3_implCompressMB();
8506 elem_type = T_LONG;
8507 }
8508 break;
8509 default:
8510 fatal("unknown DigestBase intrinsic predicate: %d", predicate);
8511 }
8512 if (klass_digestBase_name != nullptr) {
8513 assert(stub_addr != nullptr, "Stub is generated");
8514 if (stub_addr == nullptr) return false;
8515
8516 // get DigestBase klass to lookup for SHA klass
8517 const TypeInstPtr* tinst = _gvn.type(digestBase_obj)->isa_instptr();
8518 assert(tinst != nullptr, "digestBase_obj is not instance???");
8519 assert(tinst->is_loaded(), "DigestBase is not loaded");
8520
8521 ciKlass* klass_digestBase = tinst->instance_klass()->find_klass(ciSymbol::make(klass_digestBase_name));
8522 assert(klass_digestBase->is_loaded(), "predicate checks that this class is loaded");
8523 ciInstanceKlass* instklass_digestBase = klass_digestBase->as_instance_klass();
8524 return inline_digestBase_implCompressMB(digestBase_obj, instklass_digestBase, elem_type, stub_addr, stub_name, src_start, ofs, limit);
8525 }
8526 return false;
8527 }
8528
8529 //------------------------------inline_digestBase_implCompressMB-----------------------
8530 bool LibraryCallKit::inline_digestBase_implCompressMB(Node* digestBase_obj, ciInstanceKlass* instklass_digestBase,
8531 BasicType elem_type, address stubAddr, const char *stubName,
8532 Node* src_start, Node* ofs, Node* limit) {
8533 const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_digestBase);
8534 const TypeOopPtr* xtype = aklass->cast_to_exactness(false)->as_instance_type()->cast_to_ptr_type(TypePtr::NotNull);
8535 Node* digest_obj = new CheckCastPPNode(control(), digestBase_obj, xtype);
8536 digest_obj = _gvn.transform(digest_obj);
8537
8538 Node* state = get_state_from_digest_object(digest_obj, elem_type);
8539 if (state == nullptr) return false;
8540
8541 Node* block_size = nullptr;
8542 if (strcmp("sha3_implCompressMB", stubName) == 0) {
8543 block_size = get_block_size_from_digest_object(digest_obj);
8544 if (block_size == nullptr) return false;
8545 }
8546
8547 // Call the stub.
8548 Node* call;
8549 if (block_size == nullptr) {
8550 call = make_runtime_call(RC_LEAF|RC_NO_FP,
8551 OptoRuntime::digestBase_implCompressMB_Type(false),
8552 stubAddr, stubName, TypePtr::BOTTOM,
8553 src_start, state, ofs, limit);
8554 } else {
8555 call = make_runtime_call(RC_LEAF|RC_NO_FP,
8556 OptoRuntime::digestBase_implCompressMB_Type(true),
8557 stubAddr, stubName, TypePtr::BOTTOM,
8558 src_start, state, block_size, ofs, limit);
8559 }
8560
8561 // return ofs (int)
8562 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
8563 set_result(result);
8564
8565 return true;
8566 }
8567
8568 //------------------------------inline_galoisCounterMode_AESCrypt-----------------------
8569 bool LibraryCallKit::inline_galoisCounterMode_AESCrypt() {
8570 assert(UseAES, "need AES instruction support");
8571 address stubAddr = nullptr;
8572 const char *stubName = nullptr;
8573 stubAddr = StubRoutines::galoisCounterMode_AESCrypt();
8574 stubName = "galoisCounterMode_AESCrypt";
8575
8576 if (stubAddr == nullptr) return false;
8577
8578 Node* in = argument(0);
8579 Node* inOfs = argument(1);
8580 Node* len = argument(2);
8581 Node* ct = argument(3);
8582 Node* ctOfs = argument(4);
8583 Node* out = argument(5);
8584 Node* outOfs = argument(6);
8585 Node* gctr_object = argument(7);
8586 Node* ghash_object = argument(8);
8587
8588 // (1) in, ct and out are arrays.
8589 const TypeAryPtr* in_type = in->Value(&_gvn)->isa_aryptr();
8590 const TypeAryPtr* ct_type = ct->Value(&_gvn)->isa_aryptr();
8591 const TypeAryPtr* out_type = out->Value(&_gvn)->isa_aryptr();
8592 assert( in_type != nullptr && in_type->elem() != Type::BOTTOM &&
8593 ct_type != nullptr && ct_type->elem() != Type::BOTTOM &&
8594 out_type != nullptr && out_type->elem() != Type::BOTTOM, "args are strange");
8595
8596 // checks are the responsibility of the caller
8597 Node* in_start = in;
8598 Node* ct_start = ct;
8599 Node* out_start = out;
8600 if (inOfs != nullptr || ctOfs != nullptr || outOfs != nullptr) {
8601 assert(inOfs != nullptr && ctOfs != nullptr && outOfs != nullptr, "");
8602 in_start = array_element_address(in, inOfs, T_BYTE);
8603 ct_start = array_element_address(ct, ctOfs, T_BYTE);
8604 out_start = array_element_address(out, outOfs, T_BYTE);
8605 }
8606
8607 // if we are in this set of code, we "know" the embeddedCipher is an AESCrypt object
8608 // (because of the predicated logic executed earlier).
8609 // so we cast it here safely.
8610 // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java
8611 Node* embeddedCipherObj = load_field_from_object(gctr_object, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;");
8612 Node* counter = load_field_from_object(gctr_object, "counter", "[B");
8613 Node* subkeyHtbl = load_field_from_object(ghash_object, "subkeyHtbl", "[J");
8614 Node* state = load_field_from_object(ghash_object, "state", "[J");
8615
8616 if (embeddedCipherObj == nullptr || counter == nullptr || subkeyHtbl == nullptr || state == nullptr) {
8617 return false;
8618 }
8619 // cast it to what we know it will be at runtime
8620 const TypeInstPtr* tinst = _gvn.type(gctr_object)->isa_instptr();
8621 assert(tinst != nullptr, "GCTR obj is null");
8622 assert(tinst->is_loaded(), "GCTR obj is not loaded");
8623 ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AES_Crypt"));
8624 assert(klass_AESCrypt->is_loaded(), "predicate checks that this class is loaded");
8625 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
8626 const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_AESCrypt);
8627 const TypeOopPtr* xtype = aklass->as_instance_type();
8628 Node* aescrypt_object = new CheckCastPPNode(control(), embeddedCipherObj, xtype);
8629 aescrypt_object = _gvn.transform(aescrypt_object);
8630 // we need to get the start of the aescrypt_object's expanded key array
8631 Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object, /* is_decrypt */ false);
8632 if (k_start == nullptr) return false;
8633 // similarly, get the start address of the r vector
8634 Node* cnt_start = array_element_address(counter, intcon(0), T_BYTE);
8635 Node* state_start = array_element_address(state, intcon(0), T_LONG);
8636 Node* subkeyHtbl_start = array_element_address(subkeyHtbl, intcon(0), T_LONG);
8637
8638
8639 // Call the stub, passing params
8640 Node* gcmCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
8641 OptoRuntime::galoisCounterMode_aescrypt_Type(),
8642 stubAddr, stubName, TypePtr::BOTTOM,
8643 in_start, len, ct_start, out_start, k_start, state_start, subkeyHtbl_start, cnt_start);
8644
8645 // return cipher length (int)
8646 Node* retvalue = _gvn.transform(new ProjNode(gcmCrypt, TypeFunc::Parms));
8647 set_result(retvalue);
8648
8649 return true;
8650 }
8651
8652 //----------------------------inline_galoisCounterMode_AESCrypt_predicate----------------------------
8653 // Return node representing slow path of predicate check.
8654 // the pseudo code we want to emulate with this predicate is:
8655 // for encryption:
8656 // if (embeddedCipherObj instanceof AESCrypt) do_intrinsic, else do_javapath
8657 // for decryption:
8658 // if ((embeddedCipherObj instanceof AESCrypt) && (cipher!=plain)) do_intrinsic, else do_javapath
8659 // note cipher==plain is more conservative than the original java code but that's OK
8660 //
8661
8662 Node* LibraryCallKit::inline_galoisCounterMode_AESCrypt_predicate() {
8663 // The receiver was checked for null already.
8664 Node* objGCTR = argument(7);
8665 // Load embeddedCipher field of GCTR object.
8666 Node* embeddedCipherObj = load_field_from_object(objGCTR, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;");
8667 assert(embeddedCipherObj != nullptr, "embeddedCipherObj is null");
8668
8669 // get AESCrypt klass for instanceOf check
8670 // AESCrypt might not be loaded yet if some other SymmetricCipher got us to this compile point
8671 // will have same classloader as CipherBlockChaining object
8672 const TypeInstPtr* tinst = _gvn.type(objGCTR)->isa_instptr();
8673 assert(tinst != nullptr, "GCTR obj is null");
8674 assert(tinst->is_loaded(), "GCTR obj is not loaded");
8675
8676 // we want to do an instanceof comparison against the AESCrypt class
8677 ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AES_Crypt"));
8678 if (!klass_AESCrypt->is_loaded()) {
8679 // if AESCrypt is not even loaded, we never take the intrinsic fast path
8680 Node* ctrl = control();
8681 set_control(top()); // no regular fast path
8682 return ctrl;
8683 }
8684
8685 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
8686 Node* instof = gen_instanceof(embeddedCipherObj, makecon(TypeKlassPtr::make(instklass_AESCrypt)));
8687 Node* cmp_instof = _gvn.transform(new CmpINode(instof, intcon(1)));
8688 Node* bool_instof = _gvn.transform(new BoolNode(cmp_instof, BoolTest::ne));
8689 Node* instof_false = generate_guard(bool_instof, nullptr, PROB_MIN);
8690
8691 return instof_false; // even if it is null
8692 }
8693
8694 //------------------------------get_state_from_digest_object-----------------------
8695 Node * LibraryCallKit::get_state_from_digest_object(Node *digest_object, BasicType elem_type) {
8696 const char* state_type;
8697 switch (elem_type) {
8698 case T_BYTE: state_type = "[B"; break;
8699 case T_INT: state_type = "[I"; break;
8700 case T_LONG: state_type = "[J"; break;
8701 default: ShouldNotReachHere();
8702 }
8703 Node* digest_state = load_field_from_object(digest_object, "state", state_type);
8704 assert (digest_state != nullptr, "wrong version of sun.security.provider.MD5/SHA/SHA2/SHA5/SHA3");
8705 if (digest_state == nullptr) return (Node *) nullptr;
8706
8707 // now have the array, need to get the start address of the state array
8708 Node* state = array_element_address(digest_state, intcon(0), elem_type);
8709 return state;
8710 }
8711
8712 //------------------------------get_block_size_from_sha3_object----------------------------------
8713 Node * LibraryCallKit::get_block_size_from_digest_object(Node *digest_object) {
8714 Node* block_size = load_field_from_object(digest_object, "blockSize", "I");
8715 assert (block_size != nullptr, "sanity");
8716 return block_size;
8717 }
8718
8719 //----------------------------inline_digestBase_implCompressMB_predicate----------------------------
8720 // Return node representing slow path of predicate check.
8721 // the pseudo code we want to emulate with this predicate is:
8722 // if (digestBaseObj instanceof MD5/SHA/SHA2/SHA5/SHA3) do_intrinsic, else do_javapath
8723 //
8724 Node* LibraryCallKit::inline_digestBase_implCompressMB_predicate(int predicate) {
8725 assert(UseMD5Intrinsics || UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics || UseSHA3Intrinsics,
8726 "need MD5/SHA1/SHA256/SHA512/SHA3 instruction support");
8727 assert((uint)predicate < 5, "sanity");
8728
8729 // The receiver was checked for null already.
8730 Node* digestBaseObj = argument(0);
8731
8732 // get DigestBase klass for instanceOf check
8733 const TypeInstPtr* tinst = _gvn.type(digestBaseObj)->isa_instptr();
8734 assert(tinst != nullptr, "digestBaseObj is null");
8735 assert(tinst->is_loaded(), "DigestBase is not loaded");
8736
8737 const char* klass_name = nullptr;
8738 switch (predicate) {
8739 case 0:
8740 if (UseMD5Intrinsics) {
8741 // we want to do an instanceof comparison against the MD5 class
8742 klass_name = "sun/security/provider/MD5";
8743 }
8744 break;
8745 case 1:
8746 if (UseSHA1Intrinsics) {
8747 // we want to do an instanceof comparison against the SHA class
8748 klass_name = "sun/security/provider/SHA";
8749 }
8750 break;
8751 case 2:
8752 if (UseSHA256Intrinsics) {
8753 // we want to do an instanceof comparison against the SHA2 class
8754 klass_name = "sun/security/provider/SHA2";
8755 }
8756 break;
8757 case 3:
8758 if (UseSHA512Intrinsics) {
8759 // we want to do an instanceof comparison against the SHA5 class
8760 klass_name = "sun/security/provider/SHA5";
8761 }
8762 break;
8763 case 4:
8764 if (UseSHA3Intrinsics) {
8765 // we want to do an instanceof comparison against the SHA3 class
8766 klass_name = "sun/security/provider/SHA3";
8767 }
8768 break;
8769 default:
8770 fatal("unknown SHA intrinsic predicate: %d", predicate);
8771 }
8772
8773 ciKlass* klass = nullptr;
8774 if (klass_name != nullptr) {
8775 klass = tinst->instance_klass()->find_klass(ciSymbol::make(klass_name));
8776 }
8777 if ((klass == nullptr) || !klass->is_loaded()) {
8778 // if none of MD5/SHA/SHA2/SHA5 is loaded, we never take the intrinsic fast path
8779 Node* ctrl = control();
8780 set_control(top()); // no intrinsic path
8781 return ctrl;
8782 }
8783 ciInstanceKlass* instklass = klass->as_instance_klass();
8784
8785 Node* instof = gen_instanceof(digestBaseObj, makecon(TypeKlassPtr::make(instklass)));
8786 Node* cmp_instof = _gvn.transform(new CmpINode(instof, intcon(1)));
8787 Node* bool_instof = _gvn.transform(new BoolNode(cmp_instof, BoolTest::ne));
8788 Node* instof_false = generate_guard(bool_instof, nullptr, PROB_MIN);
8789
8790 return instof_false; // even if it is null
8791 }
8792
8793 //-------------inline_fma-----------------------------------
8794 bool LibraryCallKit::inline_fma(vmIntrinsics::ID id) {
8795 Node *a = nullptr;
8796 Node *b = nullptr;
8797 Node *c = nullptr;
8798 Node* result = nullptr;
8799 switch (id) {
8800 case vmIntrinsics::_fmaD:
8801 assert(callee()->signature()->size() == 6, "fma has 3 parameters of size 2 each.");
8802 // no receiver since it is static method
8803 a = argument(0);
8804 b = argument(2);
8805 c = argument(4);
8806 result = _gvn.transform(new FmaDNode(a, b, c));
8807 break;
8808 case vmIntrinsics::_fmaF:
8809 assert(callee()->signature()->size() == 3, "fma has 3 parameters of size 1 each.");
8810 a = argument(0);
8811 b = argument(1);
8812 c = argument(2);
8813 result = _gvn.transform(new FmaFNode(a, b, c));
8814 break;
8815 default:
8816 fatal_unexpected_iid(id); break;
8817 }
8818 set_result(result);
8819 return true;
8820 }
8821
8822 bool LibraryCallKit::inline_character_compare(vmIntrinsics::ID id) {
8823 // argument(0) is receiver
8824 Node* codePoint = argument(1);
8825 Node* n = nullptr;
8826
8827 switch (id) {
8828 case vmIntrinsics::_isDigit :
8829 n = new DigitNode(control(), codePoint);
8830 break;
8831 case vmIntrinsics::_isLowerCase :
8832 n = new LowerCaseNode(control(), codePoint);
8833 break;
8834 case vmIntrinsics::_isUpperCase :
8835 n = new UpperCaseNode(control(), codePoint);
8836 break;
8837 case vmIntrinsics::_isWhitespace :
8838 n = new WhitespaceNode(control(), codePoint);
8839 break;
8840 default:
8841 fatal_unexpected_iid(id);
8842 }
8843
8844 set_result(_gvn.transform(n));
8845 return true;
8846 }
8847
8848 bool LibraryCallKit::inline_profileBoolean() {
8849 Node* counts = argument(1);
8850 const TypeAryPtr* ary = nullptr;
8851 ciArray* aobj = nullptr;
8852 if (counts->is_Con()
8853 && (ary = counts->bottom_type()->isa_aryptr()) != nullptr
8854 && (aobj = ary->const_oop()->as_array()) != nullptr
8855 && (aobj->length() == 2)) {
8856 // Profile is int[2] where [0] and [1] correspond to false and true value occurrences respectively.
8857 jint false_cnt = aobj->element_value(0).as_int();
8858 jint true_cnt = aobj->element_value(1).as_int();
8859
8860 if (C->log() != nullptr) {
8861 C->log()->elem("observe source='profileBoolean' false='%d' true='%d'",
8862 false_cnt, true_cnt);
8863 }
8864
8865 if (false_cnt + true_cnt == 0) {
8866 // According to profile, never executed.
8867 uncommon_trap_exact(Deoptimization::Reason_intrinsic,
8868 Deoptimization::Action_reinterpret);
8869 return true;
8870 }
8871
8872 // result is a boolean (0 or 1) and its profile (false_cnt & true_cnt)
8873 // is a number of each value occurrences.
8874 Node* result = argument(0);
8875 if (false_cnt == 0 || true_cnt == 0) {
8876 // According to profile, one value has been never seen.
8877 int expected_val = (false_cnt == 0) ? 1 : 0;
8878
8879 Node* cmp = _gvn.transform(new CmpINode(result, intcon(expected_val)));
8880 Node* test = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
8881
8882 IfNode* check = create_and_map_if(control(), test, PROB_ALWAYS, COUNT_UNKNOWN);
8883 Node* fast_path = _gvn.transform(new IfTrueNode(check));
8884 Node* slow_path = _gvn.transform(new IfFalseNode(check));
8885
8886 { // Slow path: uncommon trap for never seen value and then reexecute
8887 // MethodHandleImpl::profileBoolean() to bump the count, so JIT knows
8888 // the value has been seen at least once.
8889 PreserveJVMState pjvms(this);
8890 PreserveReexecuteState preexecs(this);
8891 jvms()->set_should_reexecute(true);
8892
8893 set_control(slow_path);
8894 set_i_o(i_o());
8895
8896 uncommon_trap_exact(Deoptimization::Reason_intrinsic,
8897 Deoptimization::Action_reinterpret);
8898 }
8899 // The guard for never seen value enables sharpening of the result and
8900 // returning a constant. It allows to eliminate branches on the same value
8901 // later on.
8902 set_control(fast_path);
8903 result = intcon(expected_val);
8904 }
8905 // Stop profiling.
8906 // MethodHandleImpl::profileBoolean() has profiling logic in its bytecode.
8907 // By replacing method body with profile data (represented as ProfileBooleanNode
8908 // on IR level) we effectively disable profiling.
8909 // It enables full speed execution once optimized code is generated.
8910 Node* profile = _gvn.transform(new ProfileBooleanNode(result, false_cnt, true_cnt));
8911 C->record_for_igvn(profile);
8912 set_result(profile);
8913 return true;
8914 } else {
8915 // Continue profiling.
8916 // Profile data isn't available at the moment. So, execute method's bytecode version.
8917 // Usually, when GWT LambdaForms are profiled it means that a stand-alone nmethod
8918 // is compiled and counters aren't available since corresponding MethodHandle
8919 // isn't a compile-time constant.
8920 return false;
8921 }
8922 }
8923
8924 bool LibraryCallKit::inline_isCompileConstant() {
8925 Node* n = argument(0);
8926 set_result(n->is_Con() ? intcon(1) : intcon(0));
8927 return true;
8928 }
8929
8930 //------------------------------- inline_getObjectSize --------------------------------------
8931 //
8932 // Calculate the runtime size of the object/array.
8933 // native long sun.instrument.InstrumentationImpl.getObjectSize0(long nativeAgent, Object objectToSize);
8934 //
8935 bool LibraryCallKit::inline_getObjectSize() {
8936 Node* obj = argument(3);
8937 Node* klass_node = load_object_klass(obj);
8938
8939 jint layout_con = Klass::_lh_neutral_value;
8940 Node* layout_val = get_layout_helper(klass_node, layout_con);
8941 int layout_is_con = (layout_val == nullptr);
8942
8943 if (layout_is_con) {
8944 // Layout helper is constant, can figure out things at compile time.
8945
8946 if (Klass::layout_helper_is_instance(layout_con)) {
8947 // Instance case: layout_con contains the size itself.
8948 Node *size = longcon(Klass::layout_helper_size_in_bytes(layout_con));
8949 set_result(size);
8950 } else {
8951 // Array case: size is round(header + element_size*arraylength).
8952 // Since arraylength is different for every array instance, we have to
8953 // compute the whole thing at runtime.
8954
8955 Node* arr_length = load_array_length(obj);
8956
8957 int round_mask = MinObjAlignmentInBytes - 1;
8958 int hsize = Klass::layout_helper_header_size(layout_con);
8959 int eshift = Klass::layout_helper_log2_element_size(layout_con);
8960
8961 if ((round_mask & ~right_n_bits(eshift)) == 0) {
8962 round_mask = 0; // strength-reduce it if it goes away completely
8963 }
8964 assert((hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded");
8965 Node* header_size = intcon(hsize + round_mask);
8966
8967 Node* lengthx = ConvI2X(arr_length);
8968 Node* headerx = ConvI2X(header_size);
8969
8970 Node* abody = lengthx;
8971 if (eshift != 0) {
8972 abody = _gvn.transform(new LShiftXNode(lengthx, intcon(eshift)));
8973 }
8974 Node* size = _gvn.transform( new AddXNode(headerx, abody) );
8975 if (round_mask != 0) {
8976 size = _gvn.transform( new AndXNode(size, MakeConX(~round_mask)) );
8977 }
8978 size = ConvX2L(size);
8979 set_result(size);
8980 }
8981 } else {
8982 // Layout helper is not constant, need to test for array-ness at runtime.
8983
8984 enum { _instance_path = 1, _array_path, PATH_LIMIT };
8985 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
8986 PhiNode* result_val = new PhiNode(result_reg, TypeLong::LONG);
8987 record_for_igvn(result_reg);
8988
8989 Node* array_ctl = generate_array_guard(klass_node, nullptr, &obj);
8990 if (array_ctl != nullptr) {
8991 // Array case: size is round(header + element_size*arraylength).
8992 // Since arraylength is different for every array instance, we have to
8993 // compute the whole thing at runtime.
8994
8995 PreserveJVMState pjvms(this);
8996 set_control(array_ctl);
8997 Node* arr_length = load_array_length(obj);
8998
8999 int round_mask = MinObjAlignmentInBytes - 1;
9000 Node* mask = intcon(round_mask);
9001
9002 Node* hss = intcon(Klass::_lh_header_size_shift);
9003 Node* hsm = intcon(Klass::_lh_header_size_mask);
9004 Node* header_size = _gvn.transform(new URShiftINode(layout_val, hss));
9005 header_size = _gvn.transform(new AndINode(header_size, hsm));
9006 header_size = _gvn.transform(new AddINode(header_size, mask));
9007
9008 // There is no need to mask or shift this value.
9009 // The semantics of LShiftINode include an implicit mask to 0x1F.
9010 assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place");
9011 Node* elem_shift = layout_val;
9012
9013 Node* lengthx = ConvI2X(arr_length);
9014 Node* headerx = ConvI2X(header_size);
9015
9016 Node* abody = _gvn.transform(new LShiftXNode(lengthx, elem_shift));
9017 Node* size = _gvn.transform(new AddXNode(headerx, abody));
9018 if (round_mask != 0) {
9019 size = _gvn.transform(new AndXNode(size, MakeConX(~round_mask)));
9020 }
9021 size = ConvX2L(size);
9022
9023 result_reg->init_req(_array_path, control());
9024 result_val->init_req(_array_path, size);
9025 }
9026
9027 if (!stopped()) {
9028 // Instance case: the layout helper gives us instance size almost directly,
9029 // but we need to mask out the _lh_instance_slow_path_bit.
9030 Node* size = ConvI2X(layout_val);
9031 assert((int) Klass::_lh_instance_slow_path_bit < BytesPerLong, "clear bit");
9032 Node* mask = MakeConX(~(intptr_t) right_n_bits(LogBytesPerLong));
9033 size = _gvn.transform(new AndXNode(size, mask));
9034 size = ConvX2L(size);
9035
9036 result_reg->init_req(_instance_path, control());
9037 result_val->init_req(_instance_path, size);
9038 }
9039
9040 set_result(result_reg, result_val);
9041 }
9042
9043 return true;
9044 }
9045
9046 //------------------------------- inline_blackhole --------------------------------------
9047 //
9048 // Make sure all arguments to this node are alive.
9049 // This matches methods that were requested to be blackholed through compile commands.
9050 //
9051 bool LibraryCallKit::inline_blackhole() {
9052 assert(callee()->is_static(), "Should have been checked before: only static methods here");
9053 assert(callee()->is_empty(), "Should have been checked before: only empty methods here");
9054 assert(callee()->holder()->is_loaded(), "Should have been checked before: only methods for loaded classes here");
9055
9056 // Blackhole node pinches only the control, not memory. This allows
9057 // the blackhole to be pinned in the loop that computes blackholed
9058 // values, but have no other side effects, like breaking the optimizations
9059 // across the blackhole.
9060
9061 Node* bh = _gvn.transform(new BlackholeNode(control()));
9062 set_control(_gvn.transform(new ProjNode(bh, TypeFunc::Control)));
9063
9064 // Bind call arguments as blackhole arguments to keep them alive
9065 uint nargs = callee()->arg_size();
9066 for (uint i = 0; i < nargs; i++) {
9067 bh->add_req(argument(i));
9068 }
9069
9070 return true;
9071 }
9072
9073 Node* LibraryCallKit::unbox_fp16_value(const TypeInstPtr* float16_box_type, ciField* field, Node* box) {
9074 const TypeInstPtr* box_type = _gvn.type(box)->isa_instptr();
9075 if (box_type == nullptr || box_type->instance_klass() != float16_box_type->instance_klass()) {
9076 return nullptr; // box klass is not Float16
9077 }
9078
9079 // Null check; get notnull casted pointer
9080 Node* null_ctl = top();
9081 Node* not_null_box = null_check_oop(box, &null_ctl, true);
9082 // If not_null_box is dead, only null-path is taken
9083 if (stopped()) {
9084 set_control(null_ctl);
9085 return nullptr;
9086 }
9087 assert(not_null_box->bottom_type()->is_instptr()->maybe_null() == false, "");
9088 const TypePtr* adr_type = C->alias_type(field)->adr_type();
9089 Node* adr = basic_plus_adr(not_null_box, field->offset_in_bytes());
9090 return access_load_at(not_null_box, adr, adr_type, TypeInt::SHORT, T_SHORT, IN_HEAP);
9091 }
9092
9093 Node* LibraryCallKit::box_fp16_value(const TypeInstPtr* float16_box_type, ciField* field, Node* value) {
9094 PreserveReexecuteState preexecs(this);
9095 jvms()->set_should_reexecute(true);
9096
9097 const TypeKlassPtr* klass_type = float16_box_type->as_klass_type();
9098 Node* klass_node = makecon(klass_type);
9099 Node* box = new_instance(klass_node);
9100
9101 Node* value_field = basic_plus_adr(box, field->offset_in_bytes());
9102 const TypePtr* value_adr_type = value_field->bottom_type()->is_ptr();
9103
9104 Node* field_store = _gvn.transform(access_store_at(box,
9105 value_field,
9106 value_adr_type,
9107 value,
9108 TypeInt::SHORT,
9109 T_SHORT,
9110 IN_HEAP));
9111 set_memory(field_store, value_adr_type);
9112 return box;
9113 }
9114
9115 bool LibraryCallKit::inline_fp16_operations(vmIntrinsics::ID id, int num_args) {
9116 if (!Matcher::match_rule_supported(Op_ReinterpretS2HF) ||
9117 !Matcher::match_rule_supported(Op_ReinterpretHF2S)) {
9118 return false;
9119 }
9120
9121 const TypeInstPtr* box_type = _gvn.type(argument(0))->isa_instptr();
9122 if (box_type == nullptr || box_type->const_oop() == nullptr) {
9123 return false;
9124 }
9125
9126 ciInstanceKlass* float16_klass = box_type->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
9127 const TypeInstPtr* float16_box_type = TypeInstPtr::make_exact(TypePtr::NotNull, float16_klass);
9128 ciField* field = float16_klass->get_field_by_name(ciSymbols::value_name(),
9129 ciSymbols::short_signature(),
9130 false);
9131 assert(field != nullptr, "");
9132
9133 // Transformed nodes
9134 Node* fld1 = nullptr;
9135 Node* fld2 = nullptr;
9136 Node* fld3 = nullptr;
9137 switch(num_args) {
9138 case 3:
9139 fld3 = unbox_fp16_value(float16_box_type, field, argument(3));
9140 if (fld3 == nullptr) {
9141 return false;
9142 }
9143 fld3 = _gvn.transform(new ReinterpretS2HFNode(fld3));
9144 // fall-through
9145 case 2:
9146 fld2 = unbox_fp16_value(float16_box_type, field, argument(2));
9147 if (fld2 == nullptr) {
9148 return false;
9149 }
9150 fld2 = _gvn.transform(new ReinterpretS2HFNode(fld2));
9151 // fall-through
9152 case 1:
9153 fld1 = unbox_fp16_value(float16_box_type, field, argument(1));
9154 if (fld1 == nullptr) {
9155 return false;
9156 }
9157 fld1 = _gvn.transform(new ReinterpretS2HFNode(fld1));
9158 break;
9159 default: fatal("Unsupported number of arguments %d", num_args);
9160 }
9161
9162 Node* result = nullptr;
9163 switch (id) {
9164 // Unary operations
9165 case vmIntrinsics::_sqrt_float16:
9166 result = _gvn.transform(new SqrtHFNode(C, control(), fld1));
9167 break;
9168 // Ternary operations
9169 case vmIntrinsics::_fma_float16:
9170 result = _gvn.transform(new FmaHFNode(fld1, fld2, fld3));
9171 break;
9172 default:
9173 fatal_unexpected_iid(id);
9174 break;
9175 }
9176 result = _gvn.transform(new ReinterpretHF2SNode(result));
9177 set_result(box_fp16_value(float16_box_type, field, result));
9178 return true;
9179 }
9180