1 /*
2 * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/macroAssembler.hpp"
26 #include "ci/ciArrayKlass.hpp"
27 #include "ci/ciFlatArrayKlass.hpp"
28 #include "ci/ciInstanceKlass.hpp"
29 #include "ci/ciSymbols.hpp"
30 #include "ci/ciUtilities.inline.hpp"
31 #include "classfile/vmIntrinsics.hpp"
32 #include "compiler/compileBroker.hpp"
33 #include "compiler/compileLog.hpp"
34 #include "gc/shared/barrierSet.hpp"
35 #include "gc/shared/c2/barrierSetC2.hpp"
36 #include "jfr/support/jfrIntrinsics.hpp"
37 #include "memory/resourceArea.hpp"
38 #include "oops/accessDecorators.hpp"
39 #include "oops/klass.inline.hpp"
40 #include "oops/layoutKind.hpp"
41 #include "oops/objArrayKlass.hpp"
42 #include "opto/addnode.hpp"
43 #include "opto/arraycopynode.hpp"
44 #include "opto/c2compiler.hpp"
45 #include "opto/castnode.hpp"
46 #include "opto/cfgnode.hpp"
47 #include "opto/convertnode.hpp"
48 #include "opto/countbitsnode.hpp"
49 #include "opto/graphKit.hpp"
50 #include "opto/idealKit.hpp"
51 #include "opto/inlinetypenode.hpp"
52 #include "opto/library_call.hpp"
53 #include "opto/mathexactnode.hpp"
54 #include "opto/mulnode.hpp"
55 #include "opto/narrowptrnode.hpp"
56 #include "opto/opaquenode.hpp"
57 #include "opto/opcodes.hpp"
58 #include "opto/parse.hpp"
59 #include "opto/rootnode.hpp"
60 #include "opto/runtime.hpp"
61 #include "opto/subnode.hpp"
62 #include "opto/type.hpp"
63 #include "opto/vectornode.hpp"
64 #include "prims/jvmtiExport.hpp"
65 #include "prims/jvmtiThreadState.hpp"
66 #include "prims/unsafe.hpp"
67 #include "runtime/globals.hpp"
68 #include "runtime/jniHandles.inline.hpp"
69 #include "runtime/mountUnmountDisabler.hpp"
70 #include "runtime/objectMonitor.hpp"
71 #include "runtime/sharedRuntime.hpp"
72 #include "runtime/stubRoutines.hpp"
73 #include "utilities/globalDefinitions.hpp"
74 #include "utilities/macros.hpp"
75 #include "utilities/powerOfTwo.hpp"
76
77 //---------------------------make_vm_intrinsic----------------------------
78 CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
79 vmIntrinsicID id = m->intrinsic_id();
80 assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
81
82 if (!m->is_loaded()) {
83 // Do not attempt to inline unloaded methods.
84 return nullptr;
85 }
86
87 C2Compiler* compiler = (C2Compiler*)CompileBroker::compiler(CompLevel_full_optimization);
88 bool is_available = false;
89
90 {
91 // For calling is_intrinsic_supported and is_intrinsic_disabled_by_flag
92 // the compiler must transition to '_thread_in_vm' state because both
93 // methods access VM-internal data.
94 VM_ENTRY_MARK;
95 methodHandle mh(THREAD, m->get_Method());
96 is_available = compiler != nullptr && compiler->is_intrinsic_available(mh, C->directive());
97 if (is_available && is_virtual) {
98 is_available = vmIntrinsics::does_virtual_dispatch(id);
99 }
100 }
101
102 if (is_available) {
103 assert(id <= vmIntrinsics::LAST_COMPILER_INLINE, "caller responsibility");
104 assert(id != vmIntrinsics::_Object_init && id != vmIntrinsics::_invoke, "enum out of order?");
105 return new LibraryIntrinsic(m, is_virtual,
106 vmIntrinsics::predicates_needed(id),
107 vmIntrinsics::does_virtual_dispatch(id),
108 id);
109 } else {
110 return nullptr;
111 }
112 }
113
114 JVMState* LibraryIntrinsic::generate(JVMState* jvms) {
115 LibraryCallKit kit(jvms, this);
116 Compile* C = kit.C;
117 int nodes = C->unique();
118 #ifndef PRODUCT
119 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
120 char buf[1000];
121 const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
122 tty->print_cr("Intrinsic %s", str);
123 }
124 #endif
125 ciMethod* callee = kit.callee();
126 const int bci = kit.bci();
127 #ifdef ASSERT
128 Node* ctrl = kit.control();
129 #endif
130 // Try to inline the intrinsic.
131 if (callee->check_intrinsic_candidate() &&
132 kit.try_to_inline(_last_predicate)) {
133 const char *inline_msg = is_virtual() ? "(intrinsic, virtual)"
134 : "(intrinsic)";
135 CompileTask::print_inlining_ul(callee, jvms->depth() - 1, bci, InliningResult::SUCCESS, inline_msg);
136 C->inline_printer()->record(callee, jvms, InliningResult::SUCCESS, inline_msg);
137 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
138 if (C->log()) {
139 C->log()->elem("intrinsic id='%s'%s nodes='%d'",
140 vmIntrinsics::name_at(intrinsic_id()),
141 (is_virtual() ? " virtual='1'" : ""),
142 C->unique() - nodes);
143 }
144 // Push the result from the inlined method onto the stack.
145 kit.push_result();
146 return kit.transfer_exceptions_into_jvms();
147 }
148
149 // The intrinsic bailed out
150 assert(ctrl == kit.control(), "Control flow was added although the intrinsic bailed out");
151 assert(jvms->map() == kit.map(), "Out of sync JVM state");
152 if (jvms->has_method()) {
153 // Not a root compile.
154 const char* msg;
155 if (callee->intrinsic_candidate()) {
156 msg = is_virtual() ? "failed to inline (intrinsic, virtual)" : "failed to inline (intrinsic)";
157 } else {
158 msg = is_virtual() ? "failed to inline (intrinsic, virtual), method not annotated"
159 : "failed to inline (intrinsic), method not annotated";
160 }
161 CompileTask::print_inlining_ul(callee, jvms->depth() - 1, bci, InliningResult::FAILURE, msg);
162 C->inline_printer()->record(callee, jvms, InliningResult::FAILURE, msg);
163 } else {
164 // Root compile
165 ResourceMark rm;
166 stringStream msg_stream;
167 msg_stream.print("Did not generate intrinsic %s%s at bci:%d in",
168 vmIntrinsics::name_at(intrinsic_id()),
169 is_virtual() ? " (virtual)" : "", bci);
170 const char *msg = msg_stream.freeze();
171 log_debug(jit, inlining)("%s", msg);
172 if (C->print_intrinsics() || C->print_inlining()) {
173 tty->print("%s", msg);
174 }
175 }
176 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
177
178 return nullptr;
179 }
180
181 Node* LibraryIntrinsic::generate_predicate(JVMState* jvms, int predicate) {
182 LibraryCallKit kit(jvms, this);
183 Compile* C = kit.C;
184 int nodes = C->unique();
185 _last_predicate = predicate;
186 #ifndef PRODUCT
187 assert(is_predicated() && predicate < predicates_count(), "sanity");
188 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
189 char buf[1000];
190 const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
191 tty->print_cr("Predicate for intrinsic %s", str);
192 }
193 #endif
194 ciMethod* callee = kit.callee();
195 const int bci = kit.bci();
196
197 Node* slow_ctl = kit.try_to_predicate(predicate);
198 if (!kit.failing()) {
199 const char *inline_msg = is_virtual() ? "(intrinsic, virtual, predicate)"
200 : "(intrinsic, predicate)";
201 CompileTask::print_inlining_ul(callee, jvms->depth() - 1, bci, InliningResult::SUCCESS, inline_msg);
202 C->inline_printer()->record(callee, jvms, InliningResult::SUCCESS, inline_msg);
203
204 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
205 if (C->log()) {
206 C->log()->elem("predicate_intrinsic id='%s'%s nodes='%d'",
207 vmIntrinsics::name_at(intrinsic_id()),
208 (is_virtual() ? " virtual='1'" : ""),
209 C->unique() - nodes);
210 }
211 return slow_ctl; // Could be null if the check folds.
212 }
213
214 // The intrinsic bailed out
215 if (jvms->has_method()) {
216 // Not a root compile.
217 const char* msg = "failed to generate predicate for intrinsic";
218 CompileTask::print_inlining_ul(kit.callee(), jvms->depth() - 1, bci, InliningResult::FAILURE, msg);
219 C->inline_printer()->record(kit.callee(), jvms, InliningResult::FAILURE, msg);
220 } else {
221 // Root compile
222 ResourceMark rm;
223 stringStream msg_stream;
224 msg_stream.print("Did not generate intrinsic %s%s at bci:%d in",
225 vmIntrinsics::name_at(intrinsic_id()),
226 is_virtual() ? " (virtual)" : "", bci);
227 const char *msg = msg_stream.freeze();
228 log_debug(jit, inlining)("%s", msg);
229 C->inline_printer()->record(kit.callee(), jvms, InliningResult::FAILURE, msg);
230 }
231 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
232 return nullptr;
233 }
234
235 bool LibraryCallKit::try_to_inline(int predicate) {
236 // Handle symbolic names for otherwise undistinguished boolean switches:
237 const bool is_store = true;
238 const bool is_compress = true;
239 const bool is_static = true;
240 const bool is_volatile = true;
241
242 if (!jvms()->has_method()) {
243 // Root JVMState has a null method.
244 assert(map()->memory()->Opcode() == Op_Parm, "");
245 // Insert the memory aliasing node
246 set_all_memory(reset_memory());
247 }
248 assert(merged_memory(), "");
249
250 switch (intrinsic_id()) {
251 case vmIntrinsics::_hashCode: return inline_native_hashcode(intrinsic()->is_virtual(), !is_static);
252 case vmIntrinsics::_identityHashCode: return inline_native_hashcode(/*!virtual*/ false, is_static);
253 case vmIntrinsics::_getClass: return inline_native_getClass();
254
255 case vmIntrinsics::_ceil:
256 case vmIntrinsics::_floor:
257 case vmIntrinsics::_rint:
258 case vmIntrinsics::_dsin:
259 case vmIntrinsics::_dcos:
260 case vmIntrinsics::_dtan:
261 case vmIntrinsics::_dsinh:
262 case vmIntrinsics::_dtanh:
263 case vmIntrinsics::_dcbrt:
264 case vmIntrinsics::_dabs:
265 case vmIntrinsics::_fabs:
266 case vmIntrinsics::_iabs:
267 case vmIntrinsics::_labs:
268 case vmIntrinsics::_datan2:
269 case vmIntrinsics::_dsqrt:
270 case vmIntrinsics::_dsqrt_strict:
271 case vmIntrinsics::_dexp:
272 case vmIntrinsics::_dlog:
273 case vmIntrinsics::_dlog10:
274 case vmIntrinsics::_dpow:
275 case vmIntrinsics::_dcopySign:
276 case vmIntrinsics::_fcopySign:
277 case vmIntrinsics::_dsignum:
278 case vmIntrinsics::_roundF:
279 case vmIntrinsics::_roundD:
280 case vmIntrinsics::_fsignum: return inline_math_native(intrinsic_id());
281
282 case vmIntrinsics::_notify:
283 case vmIntrinsics::_notifyAll:
284 return inline_notify(intrinsic_id());
285
286 case vmIntrinsics::_addExactI: return inline_math_addExactI(false /* add */);
287 case vmIntrinsics::_addExactL: return inline_math_addExactL(false /* add */);
288 case vmIntrinsics::_decrementExactI: return inline_math_subtractExactI(true /* decrement */);
289 case vmIntrinsics::_decrementExactL: return inline_math_subtractExactL(true /* decrement */);
290 case vmIntrinsics::_incrementExactI: return inline_math_addExactI(true /* increment */);
291 case vmIntrinsics::_incrementExactL: return inline_math_addExactL(true /* increment */);
292 case vmIntrinsics::_multiplyExactI: return inline_math_multiplyExactI();
293 case vmIntrinsics::_multiplyExactL: return inline_math_multiplyExactL();
294 case vmIntrinsics::_multiplyHigh: return inline_math_multiplyHigh();
295 case vmIntrinsics::_unsignedMultiplyHigh: return inline_math_unsignedMultiplyHigh();
296 case vmIntrinsics::_negateExactI: return inline_math_negateExactI();
297 case vmIntrinsics::_negateExactL: return inline_math_negateExactL();
298 case vmIntrinsics::_subtractExactI: return inline_math_subtractExactI(false /* subtract */);
299 case vmIntrinsics::_subtractExactL: return inline_math_subtractExactL(false /* subtract */);
300
301 case vmIntrinsics::_arraycopy: return inline_arraycopy();
302
303 case vmIntrinsics::_arraySort: return inline_array_sort();
304 case vmIntrinsics::_arrayPartition: return inline_array_partition();
305
306 case vmIntrinsics::_compareToL: return inline_string_compareTo(StrIntrinsicNode::LL);
307 case vmIntrinsics::_compareToU: return inline_string_compareTo(StrIntrinsicNode::UU);
308 case vmIntrinsics::_compareToLU: return inline_string_compareTo(StrIntrinsicNode::LU);
309 case vmIntrinsics::_compareToUL: return inline_string_compareTo(StrIntrinsicNode::UL);
310
311 case vmIntrinsics::_indexOfL: return inline_string_indexOf(StrIntrinsicNode::LL);
312 case vmIntrinsics::_indexOfU: return inline_string_indexOf(StrIntrinsicNode::UU);
313 case vmIntrinsics::_indexOfUL: return inline_string_indexOf(StrIntrinsicNode::UL);
314 case vmIntrinsics::_indexOfIL: return inline_string_indexOfI(StrIntrinsicNode::LL);
315 case vmIntrinsics::_indexOfIU: return inline_string_indexOfI(StrIntrinsicNode::UU);
316 case vmIntrinsics::_indexOfIUL: return inline_string_indexOfI(StrIntrinsicNode::UL);
317 case vmIntrinsics::_indexOfU_char: return inline_string_indexOfChar(StrIntrinsicNode::U);
318 case vmIntrinsics::_indexOfL_char: return inline_string_indexOfChar(StrIntrinsicNode::L);
319
320 case vmIntrinsics::_equalsL: return inline_string_equals(StrIntrinsicNode::LL);
321
322 case vmIntrinsics::_vectorizedHashCode: return inline_vectorizedHashCode();
323
324 case vmIntrinsics::_toBytesStringU: return inline_string_toBytesU();
325 case vmIntrinsics::_getCharsStringU: return inline_string_getCharsU();
326 case vmIntrinsics::_getCharStringU: return inline_string_char_access(!is_store);
327 case vmIntrinsics::_putCharStringU: return inline_string_char_access( is_store);
328
329 case vmIntrinsics::_compressStringC:
330 case vmIntrinsics::_compressStringB: return inline_string_copy( is_compress);
331 case vmIntrinsics::_inflateStringC:
332 case vmIntrinsics::_inflateStringB: return inline_string_copy(!is_compress);
333
334 case vmIntrinsics::_getReference: return inline_unsafe_access(!is_store, T_OBJECT, Relaxed, false);
335 case vmIntrinsics::_getBoolean: return inline_unsafe_access(!is_store, T_BOOLEAN, Relaxed, false);
336 case vmIntrinsics::_getByte: return inline_unsafe_access(!is_store, T_BYTE, Relaxed, false);
337 case vmIntrinsics::_getShort: return inline_unsafe_access(!is_store, T_SHORT, Relaxed, false);
338 case vmIntrinsics::_getChar: return inline_unsafe_access(!is_store, T_CHAR, Relaxed, false);
339 case vmIntrinsics::_getInt: return inline_unsafe_access(!is_store, T_INT, Relaxed, false);
340 case vmIntrinsics::_getLong: return inline_unsafe_access(!is_store, T_LONG, Relaxed, false);
341 case vmIntrinsics::_getFloat: return inline_unsafe_access(!is_store, T_FLOAT, Relaxed, false);
342 case vmIntrinsics::_getDouble: return inline_unsafe_access(!is_store, T_DOUBLE, Relaxed, false);
343
344 case vmIntrinsics::_putReference: return inline_unsafe_access( is_store, T_OBJECT, Relaxed, false);
345 case vmIntrinsics::_putBoolean: return inline_unsafe_access( is_store, T_BOOLEAN, Relaxed, false);
346 case vmIntrinsics::_putByte: return inline_unsafe_access( is_store, T_BYTE, Relaxed, false);
347 case vmIntrinsics::_putShort: return inline_unsafe_access( is_store, T_SHORT, Relaxed, false);
348 case vmIntrinsics::_putChar: return inline_unsafe_access( is_store, T_CHAR, Relaxed, false);
349 case vmIntrinsics::_putInt: return inline_unsafe_access( is_store, T_INT, Relaxed, false);
350 case vmIntrinsics::_putLong: return inline_unsafe_access( is_store, T_LONG, Relaxed, false);
351 case vmIntrinsics::_putFloat: return inline_unsafe_access( is_store, T_FLOAT, Relaxed, false);
352 case vmIntrinsics::_putDouble: return inline_unsafe_access( is_store, T_DOUBLE, Relaxed, false);
353
354 case vmIntrinsics::_getReferenceVolatile: return inline_unsafe_access(!is_store, T_OBJECT, Volatile, false);
355 case vmIntrinsics::_getBooleanVolatile: return inline_unsafe_access(!is_store, T_BOOLEAN, Volatile, false);
356 case vmIntrinsics::_getByteVolatile: return inline_unsafe_access(!is_store, T_BYTE, Volatile, false);
357 case vmIntrinsics::_getShortVolatile: return inline_unsafe_access(!is_store, T_SHORT, Volatile, false);
358 case vmIntrinsics::_getCharVolatile: return inline_unsafe_access(!is_store, T_CHAR, Volatile, false);
359 case vmIntrinsics::_getIntVolatile: return inline_unsafe_access(!is_store, T_INT, Volatile, false);
360 case vmIntrinsics::_getLongVolatile: return inline_unsafe_access(!is_store, T_LONG, Volatile, false);
361 case vmIntrinsics::_getFloatVolatile: return inline_unsafe_access(!is_store, T_FLOAT, Volatile, false);
362 case vmIntrinsics::_getDoubleVolatile: return inline_unsafe_access(!is_store, T_DOUBLE, Volatile, false);
363
364 case vmIntrinsics::_putReferenceVolatile: return inline_unsafe_access( is_store, T_OBJECT, Volatile, false);
365 case vmIntrinsics::_putBooleanVolatile: return inline_unsafe_access( is_store, T_BOOLEAN, Volatile, false);
366 case vmIntrinsics::_putByteVolatile: return inline_unsafe_access( is_store, T_BYTE, Volatile, false);
367 case vmIntrinsics::_putShortVolatile: return inline_unsafe_access( is_store, T_SHORT, Volatile, false);
368 case vmIntrinsics::_putCharVolatile: return inline_unsafe_access( is_store, T_CHAR, Volatile, false);
369 case vmIntrinsics::_putIntVolatile: return inline_unsafe_access( is_store, T_INT, Volatile, false);
370 case vmIntrinsics::_putLongVolatile: return inline_unsafe_access( is_store, T_LONG, Volatile, false);
371 case vmIntrinsics::_putFloatVolatile: return inline_unsafe_access( is_store, T_FLOAT, Volatile, false);
372 case vmIntrinsics::_putDoubleVolatile: return inline_unsafe_access( is_store, T_DOUBLE, Volatile, false);
373
374 case vmIntrinsics::_getShortUnaligned: return inline_unsafe_access(!is_store, T_SHORT, Relaxed, true);
375 case vmIntrinsics::_getCharUnaligned: return inline_unsafe_access(!is_store, T_CHAR, Relaxed, true);
376 case vmIntrinsics::_getIntUnaligned: return inline_unsafe_access(!is_store, T_INT, Relaxed, true);
377 case vmIntrinsics::_getLongUnaligned: return inline_unsafe_access(!is_store, T_LONG, Relaxed, true);
378
379 case vmIntrinsics::_putShortUnaligned: return inline_unsafe_access( is_store, T_SHORT, Relaxed, true);
380 case vmIntrinsics::_putCharUnaligned: return inline_unsafe_access( is_store, T_CHAR, Relaxed, true);
381 case vmIntrinsics::_putIntUnaligned: return inline_unsafe_access( is_store, T_INT, Relaxed, true);
382 case vmIntrinsics::_putLongUnaligned: return inline_unsafe_access( is_store, T_LONG, Relaxed, true);
383
384 case vmIntrinsics::_getReferenceAcquire: return inline_unsafe_access(!is_store, T_OBJECT, Acquire, false);
385 case vmIntrinsics::_getBooleanAcquire: return inline_unsafe_access(!is_store, T_BOOLEAN, Acquire, false);
386 case vmIntrinsics::_getByteAcquire: return inline_unsafe_access(!is_store, T_BYTE, Acquire, false);
387 case vmIntrinsics::_getShortAcquire: return inline_unsafe_access(!is_store, T_SHORT, Acquire, false);
388 case vmIntrinsics::_getCharAcquire: return inline_unsafe_access(!is_store, T_CHAR, Acquire, false);
389 case vmIntrinsics::_getIntAcquire: return inline_unsafe_access(!is_store, T_INT, Acquire, false);
390 case vmIntrinsics::_getLongAcquire: return inline_unsafe_access(!is_store, T_LONG, Acquire, false);
391 case vmIntrinsics::_getFloatAcquire: return inline_unsafe_access(!is_store, T_FLOAT, Acquire, false);
392 case vmIntrinsics::_getDoubleAcquire: return inline_unsafe_access(!is_store, T_DOUBLE, Acquire, false);
393
394 case vmIntrinsics::_putReferenceRelease: return inline_unsafe_access( is_store, T_OBJECT, Release, false);
395 case vmIntrinsics::_putBooleanRelease: return inline_unsafe_access( is_store, T_BOOLEAN, Release, false);
396 case vmIntrinsics::_putByteRelease: return inline_unsafe_access( is_store, T_BYTE, Release, false);
397 case vmIntrinsics::_putShortRelease: return inline_unsafe_access( is_store, T_SHORT, Release, false);
398 case vmIntrinsics::_putCharRelease: return inline_unsafe_access( is_store, T_CHAR, Release, false);
399 case vmIntrinsics::_putIntRelease: return inline_unsafe_access( is_store, T_INT, Release, false);
400 case vmIntrinsics::_putLongRelease: return inline_unsafe_access( is_store, T_LONG, Release, false);
401 case vmIntrinsics::_putFloatRelease: return inline_unsafe_access( is_store, T_FLOAT, Release, false);
402 case vmIntrinsics::_putDoubleRelease: return inline_unsafe_access( is_store, T_DOUBLE, Release, false);
403
404 case vmIntrinsics::_getReferenceOpaque: return inline_unsafe_access(!is_store, T_OBJECT, Opaque, false);
405 case vmIntrinsics::_getBooleanOpaque: return inline_unsafe_access(!is_store, T_BOOLEAN, Opaque, false);
406 case vmIntrinsics::_getByteOpaque: return inline_unsafe_access(!is_store, T_BYTE, Opaque, false);
407 case vmIntrinsics::_getShortOpaque: return inline_unsafe_access(!is_store, T_SHORT, Opaque, false);
408 case vmIntrinsics::_getCharOpaque: return inline_unsafe_access(!is_store, T_CHAR, Opaque, false);
409 case vmIntrinsics::_getIntOpaque: return inline_unsafe_access(!is_store, T_INT, Opaque, false);
410 case vmIntrinsics::_getLongOpaque: return inline_unsafe_access(!is_store, T_LONG, Opaque, false);
411 case vmIntrinsics::_getFloatOpaque: return inline_unsafe_access(!is_store, T_FLOAT, Opaque, false);
412 case vmIntrinsics::_getDoubleOpaque: return inline_unsafe_access(!is_store, T_DOUBLE, Opaque, false);
413
414 case vmIntrinsics::_putReferenceOpaque: return inline_unsafe_access( is_store, T_OBJECT, Opaque, false);
415 case vmIntrinsics::_putBooleanOpaque: return inline_unsafe_access( is_store, T_BOOLEAN, Opaque, false);
416 case vmIntrinsics::_putByteOpaque: return inline_unsafe_access( is_store, T_BYTE, Opaque, false);
417 case vmIntrinsics::_putShortOpaque: return inline_unsafe_access( is_store, T_SHORT, Opaque, false);
418 case vmIntrinsics::_putCharOpaque: return inline_unsafe_access( is_store, T_CHAR, Opaque, false);
419 case vmIntrinsics::_putIntOpaque: return inline_unsafe_access( is_store, T_INT, Opaque, false);
420 case vmIntrinsics::_putLongOpaque: return inline_unsafe_access( is_store, T_LONG, Opaque, false);
421 case vmIntrinsics::_putFloatOpaque: return inline_unsafe_access( is_store, T_FLOAT, Opaque, false);
422 case vmIntrinsics::_putDoubleOpaque: return inline_unsafe_access( is_store, T_DOUBLE, Opaque, false);
423
424 case vmIntrinsics::_getFlatValue: return inline_unsafe_flat_access(!is_store, Relaxed);
425 case vmIntrinsics::_putFlatValue: return inline_unsafe_flat_access( is_store, Relaxed);
426
427 case vmIntrinsics::_compareAndSetReference: return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap, Volatile);
428 case vmIntrinsics::_compareAndSetByte: return inline_unsafe_load_store(T_BYTE, LS_cmp_swap, Volatile);
429 case vmIntrinsics::_compareAndSetShort: return inline_unsafe_load_store(T_SHORT, LS_cmp_swap, Volatile);
430 case vmIntrinsics::_compareAndSetInt: return inline_unsafe_load_store(T_INT, LS_cmp_swap, Volatile);
431 case vmIntrinsics::_compareAndSetLong: return inline_unsafe_load_store(T_LONG, LS_cmp_swap, Volatile);
432
433 case vmIntrinsics::_weakCompareAndSetReferencePlain: return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Relaxed);
434 case vmIntrinsics::_weakCompareAndSetReferenceAcquire: return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Acquire);
435 case vmIntrinsics::_weakCompareAndSetReferenceRelease: return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Release);
436 case vmIntrinsics::_weakCompareAndSetReference: return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Volatile);
437 case vmIntrinsics::_weakCompareAndSetBytePlain: return inline_unsafe_load_store(T_BYTE, LS_cmp_swap_weak, Relaxed);
438 case vmIntrinsics::_weakCompareAndSetByteAcquire: return inline_unsafe_load_store(T_BYTE, LS_cmp_swap_weak, Acquire);
439 case vmIntrinsics::_weakCompareAndSetByteRelease: return inline_unsafe_load_store(T_BYTE, LS_cmp_swap_weak, Release);
440 case vmIntrinsics::_weakCompareAndSetByte: return inline_unsafe_load_store(T_BYTE, LS_cmp_swap_weak, Volatile);
441 case vmIntrinsics::_weakCompareAndSetShortPlain: return inline_unsafe_load_store(T_SHORT, LS_cmp_swap_weak, Relaxed);
442 case vmIntrinsics::_weakCompareAndSetShortAcquire: return inline_unsafe_load_store(T_SHORT, LS_cmp_swap_weak, Acquire);
443 case vmIntrinsics::_weakCompareAndSetShortRelease: return inline_unsafe_load_store(T_SHORT, LS_cmp_swap_weak, Release);
444 case vmIntrinsics::_weakCompareAndSetShort: return inline_unsafe_load_store(T_SHORT, LS_cmp_swap_weak, Volatile);
445 case vmIntrinsics::_weakCompareAndSetIntPlain: return inline_unsafe_load_store(T_INT, LS_cmp_swap_weak, Relaxed);
446 case vmIntrinsics::_weakCompareAndSetIntAcquire: return inline_unsafe_load_store(T_INT, LS_cmp_swap_weak, Acquire);
447 case vmIntrinsics::_weakCompareAndSetIntRelease: return inline_unsafe_load_store(T_INT, LS_cmp_swap_weak, Release);
448 case vmIntrinsics::_weakCompareAndSetInt: return inline_unsafe_load_store(T_INT, LS_cmp_swap_weak, Volatile);
449 case vmIntrinsics::_weakCompareAndSetLongPlain: return inline_unsafe_load_store(T_LONG, LS_cmp_swap_weak, Relaxed);
450 case vmIntrinsics::_weakCompareAndSetLongAcquire: return inline_unsafe_load_store(T_LONG, LS_cmp_swap_weak, Acquire);
451 case vmIntrinsics::_weakCompareAndSetLongRelease: return inline_unsafe_load_store(T_LONG, LS_cmp_swap_weak, Release);
452 case vmIntrinsics::_weakCompareAndSetLong: return inline_unsafe_load_store(T_LONG, LS_cmp_swap_weak, Volatile);
453
454 case vmIntrinsics::_compareAndExchangeReference: return inline_unsafe_load_store(T_OBJECT, LS_cmp_exchange, Volatile);
455 case vmIntrinsics::_compareAndExchangeReferenceAcquire: return inline_unsafe_load_store(T_OBJECT, LS_cmp_exchange, Acquire);
456 case vmIntrinsics::_compareAndExchangeReferenceRelease: return inline_unsafe_load_store(T_OBJECT, LS_cmp_exchange, Release);
457 case vmIntrinsics::_compareAndExchangeByte: return inline_unsafe_load_store(T_BYTE, LS_cmp_exchange, Volatile);
458 case vmIntrinsics::_compareAndExchangeByteAcquire: return inline_unsafe_load_store(T_BYTE, LS_cmp_exchange, Acquire);
459 case vmIntrinsics::_compareAndExchangeByteRelease: return inline_unsafe_load_store(T_BYTE, LS_cmp_exchange, Release);
460 case vmIntrinsics::_compareAndExchangeShort: return inline_unsafe_load_store(T_SHORT, LS_cmp_exchange, Volatile);
461 case vmIntrinsics::_compareAndExchangeShortAcquire: return inline_unsafe_load_store(T_SHORT, LS_cmp_exchange, Acquire);
462 case vmIntrinsics::_compareAndExchangeShortRelease: return inline_unsafe_load_store(T_SHORT, LS_cmp_exchange, Release);
463 case vmIntrinsics::_compareAndExchangeInt: return inline_unsafe_load_store(T_INT, LS_cmp_exchange, Volatile);
464 case vmIntrinsics::_compareAndExchangeIntAcquire: return inline_unsafe_load_store(T_INT, LS_cmp_exchange, Acquire);
465 case vmIntrinsics::_compareAndExchangeIntRelease: return inline_unsafe_load_store(T_INT, LS_cmp_exchange, Release);
466 case vmIntrinsics::_compareAndExchangeLong: return inline_unsafe_load_store(T_LONG, LS_cmp_exchange, Volatile);
467 case vmIntrinsics::_compareAndExchangeLongAcquire: return inline_unsafe_load_store(T_LONG, LS_cmp_exchange, Acquire);
468 case vmIntrinsics::_compareAndExchangeLongRelease: return inline_unsafe_load_store(T_LONG, LS_cmp_exchange, Release);
469
470 case vmIntrinsics::_getAndAddByte: return inline_unsafe_load_store(T_BYTE, LS_get_add, Volatile);
471 case vmIntrinsics::_getAndAddShort: return inline_unsafe_load_store(T_SHORT, LS_get_add, Volatile);
472 case vmIntrinsics::_getAndAddInt: return inline_unsafe_load_store(T_INT, LS_get_add, Volatile);
473 case vmIntrinsics::_getAndAddLong: return inline_unsafe_load_store(T_LONG, LS_get_add, Volatile);
474
475 case vmIntrinsics::_getAndSetByte: return inline_unsafe_load_store(T_BYTE, LS_get_set, Volatile);
476 case vmIntrinsics::_getAndSetShort: return inline_unsafe_load_store(T_SHORT, LS_get_set, Volatile);
477 case vmIntrinsics::_getAndSetInt: return inline_unsafe_load_store(T_INT, LS_get_set, Volatile);
478 case vmIntrinsics::_getAndSetLong: return inline_unsafe_load_store(T_LONG, LS_get_set, Volatile);
479 case vmIntrinsics::_getAndSetReference: return inline_unsafe_load_store(T_OBJECT, LS_get_set, Volatile);
480
481 case vmIntrinsics::_loadFence:
482 case vmIntrinsics::_storeFence:
483 case vmIntrinsics::_storeStoreFence:
484 case vmIntrinsics::_fullFence: return inline_unsafe_fence(intrinsic_id());
485
486 case vmIntrinsics::_arrayInstanceBaseOffset: return inline_arrayInstanceBaseOffset();
487 case vmIntrinsics::_arrayInstanceIndexScale: return inline_arrayInstanceIndexScale();
488 case vmIntrinsics::_arrayLayout: return inline_arrayLayout();
489 case vmIntrinsics::_getFieldMap: return inline_getFieldMap();
490
491 case vmIntrinsics::_onSpinWait: return inline_onspinwait();
492
493 case vmIntrinsics::_currentCarrierThread: return inline_native_currentCarrierThread();
494 case vmIntrinsics::_currentThread: return inline_native_currentThread();
495 case vmIntrinsics::_setCurrentThread: return inline_native_setCurrentThread();
496
497 case vmIntrinsics::_scopedValueCache: return inline_native_scopedValueCache();
498 case vmIntrinsics::_setScopedValueCache: return inline_native_setScopedValueCache();
499
500 case vmIntrinsics::_Continuation_pin: return inline_native_Continuation_pinning(false);
501 case vmIntrinsics::_Continuation_unpin: return inline_native_Continuation_pinning(true);
502
503 case vmIntrinsics::_vthreadEndFirstTransition: return inline_native_vthread_end_transition(CAST_FROM_FN_PTR(address, OptoRuntime::vthread_end_first_transition_Java()),
504 "endFirstTransition", true);
505 case vmIntrinsics::_vthreadStartFinalTransition: return inline_native_vthread_start_transition(CAST_FROM_FN_PTR(address, OptoRuntime::vthread_start_final_transition_Java()),
506 "startFinalTransition", true);
507 case vmIntrinsics::_vthreadStartTransition: return inline_native_vthread_start_transition(CAST_FROM_FN_PTR(address, OptoRuntime::vthread_start_transition_Java()),
508 "startTransition", false);
509 case vmIntrinsics::_vthreadEndTransition: return inline_native_vthread_end_transition(CAST_FROM_FN_PTR(address, OptoRuntime::vthread_end_transition_Java()),
510 "endTransition", false);
511 #if INCLUDE_JVMTI
512 case vmIntrinsics::_notifyJvmtiVThreadDisableSuspend: return inline_native_notify_jvmti_sync();
513 #endif
514
515 #ifdef JFR_HAVE_INTRINSICS
516 case vmIntrinsics::_counterTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, JfrTime::time_function()), "counterTime");
517 case vmIntrinsics::_getEventWriter: return inline_native_getEventWriter();
518 case vmIntrinsics::_jvm_commit: return inline_native_jvm_commit();
519 #endif
520 case vmIntrinsics::_currentTimeMillis: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
521 case vmIntrinsics::_nanoTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
522 case vmIntrinsics::_writeback0: return inline_unsafe_writeback0();
523 case vmIntrinsics::_writebackPreSync0: return inline_unsafe_writebackSync0(true);
524 case vmIntrinsics::_writebackPostSync0: return inline_unsafe_writebackSync0(false);
525 case vmIntrinsics::_allocateInstance: return inline_unsafe_allocate();
526 case vmIntrinsics::_copyMemory: return inline_unsafe_copyMemory();
527 case vmIntrinsics::_setMemory: return inline_unsafe_setMemory();
528 case vmIntrinsics::_getLength: return inline_native_getLength();
529 case vmIntrinsics::_copyOf: return inline_array_copyOf(false);
530 case vmIntrinsics::_copyOfRange: return inline_array_copyOf(true);
531 case vmIntrinsics::_equalsB: return inline_array_equals(StrIntrinsicNode::LL);
532 case vmIntrinsics::_equalsC: return inline_array_equals(StrIntrinsicNode::UU);
533 case vmIntrinsics::_Preconditions_checkIndex: return inline_preconditions_checkIndex(T_INT);
534 case vmIntrinsics::_Preconditions_checkLongIndex: return inline_preconditions_checkIndex(T_LONG);
535 case vmIntrinsics::_clone: return inline_native_clone(intrinsic()->is_virtual());
536
537 case vmIntrinsics::_allocateUninitializedArray: return inline_unsafe_newArray(true);
538 case vmIntrinsics::_newArray: return inline_unsafe_newArray(false);
539 case vmIntrinsics::_newNullRestrictedNonAtomicArray: return inline_newArray(/* null_free */ true, /* atomic */ false);
540 case vmIntrinsics::_newNullRestrictedAtomicArray: return inline_newArray(/* null_free */ true, /* atomic */ true);
541 case vmIntrinsics::_newNullableAtomicArray: return inline_newArray(/* null_free */ false, /* atomic */ true);
542 case vmIntrinsics::_isFlatArray: return inline_getArrayProperties(IsFlat);
543 case vmIntrinsics::_isNullRestrictedArray: return inline_getArrayProperties(IsNullRestricted);
544 case vmIntrinsics::_isAtomicArray: return inline_getArrayProperties(IsAtomic);
545
546 case vmIntrinsics::_isAssignableFrom: return inline_native_subtype_check();
547
548 case vmIntrinsics::_isInstance:
549 case vmIntrinsics::_isHidden:
550 case vmIntrinsics::_getSuperclass: return inline_native_Class_query(intrinsic_id());
551
552 case vmIntrinsics::_floatToRawIntBits:
553 case vmIntrinsics::_floatToIntBits:
554 case vmIntrinsics::_intBitsToFloat:
555 case vmIntrinsics::_doubleToRawLongBits:
556 case vmIntrinsics::_doubleToLongBits:
557 case vmIntrinsics::_longBitsToDouble:
558 case vmIntrinsics::_floatToFloat16:
559 case vmIntrinsics::_float16ToFloat: return inline_fp_conversions(intrinsic_id());
560 case vmIntrinsics::_sqrt_float16: return inline_fp16_operations(intrinsic_id(), 1);
561 case vmIntrinsics::_fma_float16: return inline_fp16_operations(intrinsic_id(), 3);
562 case vmIntrinsics::_floatIsFinite:
563 case vmIntrinsics::_floatIsInfinite:
564 case vmIntrinsics::_doubleIsFinite:
565 case vmIntrinsics::_doubleIsInfinite: return inline_fp_range_check(intrinsic_id());
566
567 case vmIntrinsics::_numberOfLeadingZeros_i:
568 case vmIntrinsics::_numberOfLeadingZeros_l:
569 case vmIntrinsics::_numberOfTrailingZeros_i:
570 case vmIntrinsics::_numberOfTrailingZeros_l:
571 case vmIntrinsics::_bitCount_i:
572 case vmIntrinsics::_bitCount_l:
573 case vmIntrinsics::_reverse_i:
574 case vmIntrinsics::_reverse_l:
575 case vmIntrinsics::_reverseBytes_i:
576 case vmIntrinsics::_reverseBytes_l:
577 case vmIntrinsics::_reverseBytes_s:
578 case vmIntrinsics::_reverseBytes_c: return inline_number_methods(intrinsic_id());
579
580 case vmIntrinsics::_compress_i:
581 case vmIntrinsics::_compress_l:
582 case vmIntrinsics::_expand_i:
583 case vmIntrinsics::_expand_l: return inline_bitshuffle_methods(intrinsic_id());
584
585 case vmIntrinsics::_compareUnsigned_i:
586 case vmIntrinsics::_compareUnsigned_l: return inline_compare_unsigned(intrinsic_id());
587
588 case vmIntrinsics::_divideUnsigned_i:
589 case vmIntrinsics::_divideUnsigned_l:
590 case vmIntrinsics::_remainderUnsigned_i:
591 case vmIntrinsics::_remainderUnsigned_l: return inline_divmod_methods(intrinsic_id());
592
593 case vmIntrinsics::_getCallerClass: return inline_native_Reflection_getCallerClass();
594
595 case vmIntrinsics::_Reference_get0: return inline_reference_get0();
596 case vmIntrinsics::_Reference_refersTo0: return inline_reference_refersTo0(false);
597 case vmIntrinsics::_Reference_reachabilityFence: return inline_reference_reachabilityFence();
598 case vmIntrinsics::_PhantomReference_refersTo0: return inline_reference_refersTo0(true);
599 case vmIntrinsics::_Reference_clear0: return inline_reference_clear0(false);
600 case vmIntrinsics::_PhantomReference_clear0: return inline_reference_clear0(true);
601
602 case vmIntrinsics::_Class_cast: return inline_Class_cast();
603
604 case vmIntrinsics::_aescrypt_encryptBlock:
605 case vmIntrinsics::_aescrypt_decryptBlock: return inline_aescrypt_Block(intrinsic_id());
606
607 case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
608 case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
609 return inline_cipherBlockChaining_AESCrypt(intrinsic_id());
610
611 case vmIntrinsics::_electronicCodeBook_encryptAESCrypt:
612 case vmIntrinsics::_electronicCodeBook_decryptAESCrypt:
613 return inline_electronicCodeBook_AESCrypt(intrinsic_id());
614
615 case vmIntrinsics::_counterMode_AESCrypt:
616 return inline_counterMode_AESCrypt(intrinsic_id());
617
618 case vmIntrinsics::_galoisCounterMode_AESCrypt:
619 return inline_galoisCounterMode_AESCrypt();
620
621 case vmIntrinsics::_md5_implCompress:
622 case vmIntrinsics::_sha_implCompress:
623 case vmIntrinsics::_sha2_implCompress:
624 case vmIntrinsics::_sha5_implCompress:
625 case vmIntrinsics::_sha3_implCompress:
626 return inline_digestBase_implCompress(intrinsic_id());
627 case vmIntrinsics::_double_keccak:
628 return inline_double_keccak();
629
630 case vmIntrinsics::_digestBase_implCompressMB:
631 return inline_digestBase_implCompressMB(predicate);
632
633 case vmIntrinsics::_multiplyToLen:
634 return inline_multiplyToLen();
635
636 case vmIntrinsics::_squareToLen:
637 return inline_squareToLen();
638
639 case vmIntrinsics::_mulAdd:
640 return inline_mulAdd();
641
642 case vmIntrinsics::_montgomeryMultiply:
643 return inline_montgomeryMultiply();
644 case vmIntrinsics::_montgomerySquare:
645 return inline_montgomerySquare();
646
647 case vmIntrinsics::_bigIntegerRightShiftWorker:
648 return inline_bigIntegerShift(true);
649 case vmIntrinsics::_bigIntegerLeftShiftWorker:
650 return inline_bigIntegerShift(false);
651
652 case vmIntrinsics::_vectorizedMismatch:
653 return inline_vectorizedMismatch();
654
655 case vmIntrinsics::_ghash_processBlocks:
656 return inline_ghash_processBlocks();
657 case vmIntrinsics::_chacha20Block:
658 return inline_chacha20Block();
659 case vmIntrinsics::_kyberNtt:
660 return inline_kyberNtt();
661 case vmIntrinsics::_kyberInverseNtt:
662 return inline_kyberInverseNtt();
663 case vmIntrinsics::_kyberNttMult:
664 return inline_kyberNttMult();
665 case vmIntrinsics::_kyberAddPoly_2:
666 return inline_kyberAddPoly_2();
667 case vmIntrinsics::_kyberAddPoly_3:
668 return inline_kyberAddPoly_3();
669 case vmIntrinsics::_kyber12To16:
670 return inline_kyber12To16();
671 case vmIntrinsics::_kyberBarrettReduce:
672 return inline_kyberBarrettReduce();
673 case vmIntrinsics::_dilithiumAlmostNtt:
674 return inline_dilithiumAlmostNtt();
675 case vmIntrinsics::_dilithiumAlmostInverseNtt:
676 return inline_dilithiumAlmostInverseNtt();
677 case vmIntrinsics::_dilithiumNttMult:
678 return inline_dilithiumNttMult();
679 case vmIntrinsics::_dilithiumMontMulByConstant:
680 return inline_dilithiumMontMulByConstant();
681 case vmIntrinsics::_dilithiumDecomposePoly:
682 return inline_dilithiumDecomposePoly();
683 case vmIntrinsics::_base64_encodeBlock:
684 return inline_base64_encodeBlock();
685 case vmIntrinsics::_base64_decodeBlock:
686 return inline_base64_decodeBlock();
687 case vmIntrinsics::_poly1305_processBlocks:
688 return inline_poly1305_processBlocks();
689 case vmIntrinsics::_intpoly_montgomeryMult_P256:
690 return inline_intpoly_montgomeryMult_P256();
691 case vmIntrinsics::_intpoly_assign:
692 return inline_intpoly_assign();
693 case vmIntrinsics::_encodeISOArray:
694 case vmIntrinsics::_encodeByteISOArray:
695 return inline_encodeISOArray(false);
696 case vmIntrinsics::_encodeAsciiArray:
697 return inline_encodeISOArray(true);
698
699 case vmIntrinsics::_updateCRC32:
700 return inline_updateCRC32();
701 case vmIntrinsics::_updateBytesCRC32:
702 return inline_updateBytesCRC32();
703 case vmIntrinsics::_updateByteBufferCRC32:
704 return inline_updateByteBufferCRC32();
705
706 case vmIntrinsics::_updateBytesCRC32C:
707 return inline_updateBytesCRC32C();
708 case vmIntrinsics::_updateDirectByteBufferCRC32C:
709 return inline_updateDirectByteBufferCRC32C();
710
711 case vmIntrinsics::_updateBytesAdler32:
712 return inline_updateBytesAdler32();
713 case vmIntrinsics::_updateByteBufferAdler32:
714 return inline_updateByteBufferAdler32();
715
716 case vmIntrinsics::_profileBoolean:
717 return inline_profileBoolean();
718 case vmIntrinsics::_isCompileConstant:
719 return inline_isCompileConstant();
720
721 case vmIntrinsics::_countPositives:
722 return inline_countPositives();
723
724 case vmIntrinsics::_fmaD:
725 case vmIntrinsics::_fmaF:
726 return inline_fma(intrinsic_id());
727
728 case vmIntrinsics::_isDigit:
729 case vmIntrinsics::_isLowerCase:
730 case vmIntrinsics::_isUpperCase:
731 case vmIntrinsics::_isWhitespace:
732 return inline_character_compare(intrinsic_id());
733
734 case vmIntrinsics::_min:
735 case vmIntrinsics::_max:
736 case vmIntrinsics::_min_strict:
737 case vmIntrinsics::_max_strict:
738 case vmIntrinsics::_minL:
739 case vmIntrinsics::_maxL:
740 case vmIntrinsics::_minF:
741 case vmIntrinsics::_maxF:
742 case vmIntrinsics::_minD:
743 case vmIntrinsics::_maxD:
744 case vmIntrinsics::_minF_strict:
745 case vmIntrinsics::_maxF_strict:
746 case vmIntrinsics::_minD_strict:
747 case vmIntrinsics::_maxD_strict:
748 return inline_min_max(intrinsic_id());
749
750 case vmIntrinsics::_VectorUnaryOp:
751 return inline_vector_nary_operation(1);
752 case vmIntrinsics::_VectorBinaryOp:
753 return inline_vector_nary_operation(2);
754 case vmIntrinsics::_VectorUnaryLibOp:
755 return inline_vector_call(1);
756 case vmIntrinsics::_VectorBinaryLibOp:
757 return inline_vector_call(2);
758 case vmIntrinsics::_VectorTernaryOp:
759 return inline_vector_nary_operation(3);
760 case vmIntrinsics::_VectorFromBitsCoerced:
761 return inline_vector_frombits_coerced();
762 case vmIntrinsics::_VectorMaskOp:
763 return inline_vector_mask_operation();
764 case vmIntrinsics::_VectorLoadOp:
765 return inline_vector_mem_operation(/*is_store=*/false);
766 case vmIntrinsics::_VectorLoadMaskedOp:
767 return inline_vector_mem_masked_operation(/*is_store*/false);
768 case vmIntrinsics::_VectorStoreOp:
769 return inline_vector_mem_operation(/*is_store=*/true);
770 case vmIntrinsics::_VectorStoreMaskedOp:
771 return inline_vector_mem_masked_operation(/*is_store=*/true);
772 case vmIntrinsics::_VectorGatherOp:
773 return inline_vector_gather_scatter(/*is_scatter*/ false);
774 case vmIntrinsics::_VectorScatterOp:
775 return inline_vector_gather_scatter(/*is_scatter*/ true);
776 case vmIntrinsics::_VectorReductionCoerced:
777 return inline_vector_reduction();
778 case vmIntrinsics::_VectorTest:
779 return inline_vector_test();
780 case vmIntrinsics::_VectorBlend:
781 return inline_vector_blend();
782 case vmIntrinsics::_VectorRearrange:
783 return inline_vector_rearrange();
784 case vmIntrinsics::_VectorSelectFrom:
785 return inline_vector_select_from();
786 case vmIntrinsics::_VectorCompare:
787 return inline_vector_compare();
788 case vmIntrinsics::_VectorBroadcastInt:
789 return inline_vector_broadcast_int();
790 case vmIntrinsics::_VectorConvert:
791 return inline_vector_convert();
792 case vmIntrinsics::_VectorInsert:
793 return inline_vector_insert();
794 case vmIntrinsics::_VectorExtract:
795 return inline_vector_extract();
796 case vmIntrinsics::_VectorCompressExpand:
797 return inline_vector_compress_expand();
798 case vmIntrinsics::_VectorSelectFromTwoVectorOp:
799 return inline_vector_select_from_two_vectors();
800 case vmIntrinsics::_IndexVector:
801 return inline_index_vector();
802 case vmIntrinsics::_IndexPartiallyInUpperRange:
803 return inline_index_partially_in_upper_range();
804
805 case vmIntrinsics::_getObjectSize:
806 return inline_getObjectSize();
807
808 case vmIntrinsics::_blackhole:
809 return inline_blackhole();
810
811 default:
812 // If you get here, it may be that someone has added a new intrinsic
813 // to the list in vmIntrinsics.hpp without implementing it here.
814 #ifndef PRODUCT
815 if ((PrintMiscellaneous && (Verbose || WizardMode)) || PrintOpto) {
816 tty->print_cr("*** Warning: Unimplemented intrinsic %s(%d)",
817 vmIntrinsics::name_at(intrinsic_id()), vmIntrinsics::as_int(intrinsic_id()));
818 }
819 #endif
820 return false;
821 }
822 }
823
824 Node* LibraryCallKit::try_to_predicate(int predicate) {
825 if (!jvms()->has_method()) {
826 // Root JVMState has a null method.
827 assert(map()->memory()->Opcode() == Op_Parm, "");
828 // Insert the memory aliasing node
829 set_all_memory(reset_memory());
830 }
831 assert(merged_memory(), "");
832
833 switch (intrinsic_id()) {
834 case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
835 return inline_cipherBlockChaining_AESCrypt_predicate(false);
836 case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
837 return inline_cipherBlockChaining_AESCrypt_predicate(true);
838 case vmIntrinsics::_electronicCodeBook_encryptAESCrypt:
839 return inline_electronicCodeBook_AESCrypt_predicate(false);
840 case vmIntrinsics::_electronicCodeBook_decryptAESCrypt:
841 return inline_electronicCodeBook_AESCrypt_predicate(true);
842 case vmIntrinsics::_counterMode_AESCrypt:
843 return inline_counterMode_AESCrypt_predicate();
844 case vmIntrinsics::_digestBase_implCompressMB:
845 return inline_digestBase_implCompressMB_predicate(predicate);
846 case vmIntrinsics::_galoisCounterMode_AESCrypt:
847 return inline_galoisCounterMode_AESCrypt_predicate();
848
849 default:
850 // If you get here, it may be that someone has added a new intrinsic
851 // to the list in vmIntrinsics.hpp without implementing it here.
852 #ifndef PRODUCT
853 if ((PrintMiscellaneous && (Verbose || WizardMode)) || PrintOpto) {
854 tty->print_cr("*** Warning: Unimplemented predicate for intrinsic %s(%d)",
855 vmIntrinsics::name_at(intrinsic_id()), vmIntrinsics::as_int(intrinsic_id()));
856 }
857 #endif
858 Node* slow_ctl = control();
859 set_control(top()); // No fast path intrinsic
860 return slow_ctl;
861 }
862 }
863
864 //------------------------------set_result-------------------------------
865 // Helper function for finishing intrinsics.
866 void LibraryCallKit::set_result(RegionNode* region, PhiNode* value) {
867 record_for_igvn(region);
868 set_control(_gvn.transform(region));
869 set_result( _gvn.transform(value));
870 assert(value->type()->basic_type() == result()->bottom_type()->basic_type(), "sanity");
871 }
872
873 RegionNode* LibraryCallKit::create_bailout() {
874 RegionNode* bailout = new RegionNode(1);
875 record_for_igvn(bailout);
876 return bailout;
877 }
878
879 bool LibraryCallKit::check_bailout(RegionNode* bailout) {
880 if (bailout->req() > 1) {
881 bailout = _gvn.transform(bailout)->as_Region();
882 Node* frame = _gvn.transform(new ParmNode(C->start(), TypeFunc::FramePtr));
883 Node* halt = _gvn.transform(new HaltNode(bailout, frame, "unexpected guard failure in intrinsic"));
884 C->root()->add_req(halt);
885 }
886 return stopped();
887 }
888
889 //------------------------------generate_guard---------------------------
890 // Helper function for generating guarded fast-slow graph structures.
891 // The given 'test', if true, guards a slow path. If the test fails
892 // then a fast path can be taken. (We generally hope it fails.)
893 // In all cases, GraphKit::control() is updated to the fast path.
894 // The returned value represents the control for the slow path.
895 // The return value is never 'top'; it is either a valid control
896 // or null if it is obvious that the slow path can never be taken.
897 // Also, if region and the slow control are not null, the slow edge
898 // is appended to the region.
899 Node* LibraryCallKit::generate_guard(Node* test, RegionNode* region, float true_prob) {
900 if (stopped()) {
901 // Already short circuited.
902 return nullptr;
903 }
904
905 // Build an if node and its projections.
906 // If test is true we take the slow path, which we assume is uncommon.
907 if (_gvn.type(test) == TypeInt::ZERO) {
908 // The slow branch is never taken. No need to build this guard.
909 return nullptr;
910 }
911
912 IfNode* iff = create_and_map_if(control(), test, true_prob, COUNT_UNKNOWN);
913
914 Node* if_slow = _gvn.transform(new IfTrueNode(iff));
915 if (if_slow == top()) {
916 // The slow branch is never taken. No need to build this guard.
917 return nullptr;
918 }
919
920 if (region != nullptr)
921 region->add_req(if_slow);
922
923 Node* if_fast = _gvn.transform(new IfFalseNode(iff));
924 set_control(if_fast);
925
926 return if_slow;
927 }
928
929 inline Node* LibraryCallKit::generate_slow_guard(Node* test, RegionNode* region) {
930 return generate_guard(test, region, PROB_UNLIKELY_MAG(3));
931 }
932 inline Node* LibraryCallKit::generate_fair_guard(Node* test, RegionNode* region) {
933 return generate_guard(test, region, PROB_FAIR);
934 }
935
936 inline Node* LibraryCallKit::generate_negative_guard(Node* index, RegionNode* region,
937 Node** pos_index, bool with_opaque) {
938 if (stopped())
939 return nullptr; // already stopped
940 if (_gvn.type(index)->higher_equal(TypeInt::POS)) // [0,maxint]
941 return nullptr; // index is already adequately typed
942 Node* cmp_lt = _gvn.transform(new CmpINode(index, intcon(0)));
943 Node* bol_lt = _gvn.transform(new BoolNode(cmp_lt, BoolTest::lt));
944 if (with_opaque) {
945 bol_lt = _gvn.transform(new OpaqueConstantBoolNode(C, bol_lt, false));
946 }
947 Node* is_neg = generate_guard(bol_lt, region, PROB_MIN);
948 if (is_neg != nullptr && pos_index != nullptr) {
949 // Emulate effect of Parse::adjust_map_after_if.
950 Node* ccast = new CastIINode(control(), index, TypeInt::POS);
951 (*pos_index) = _gvn.transform(ccast);
952 }
953 return is_neg;
954 }
955
956 // Make sure that 'position' is a valid limit index, in [0..length].
957 // There are two equivalent plans for checking this:
958 // A. (offset + copyLength) unsigned<= arrayLength
959 // B. offset <= (arrayLength - copyLength)
960 // We require that all of the values above, except for the sum and
961 // difference, are already known to be non-negative.
962 // Plan A is robust in the face of overflow, if offset and copyLength
963 // are both hugely positive.
964 //
965 // Plan B is less direct and intuitive, but it does not overflow at
966 // all, since the difference of two non-negatives is always
967 // representable. Whenever Java methods must perform the equivalent
968 // check they generally use Plan B instead of Plan A.
969 // For the moment we use Plan A.
970 inline Node* LibraryCallKit::generate_limit_guard(Node* offset,
971 Node* subseq_length,
972 Node* array_length,
973 RegionNode* region,
974 bool with_opaque) {
975 if (stopped())
976 return nullptr; // already stopped
977 bool zero_offset = _gvn.type(offset) == TypeInt::ZERO;
978 if (zero_offset && subseq_length->eqv_uncast(array_length))
979 return nullptr; // common case of whole-array copy
980 Node* last = subseq_length;
981 if (!zero_offset) // last += offset
982 last = _gvn.transform(new AddINode(last, offset));
983 Node* cmp_lt = _gvn.transform(new CmpUNode(array_length, last));
984 Node* bol_lt = _gvn.transform(new BoolNode(cmp_lt, BoolTest::lt));
985 if (with_opaque) {
986 bol_lt = _gvn.transform(new OpaqueConstantBoolNode(C, bol_lt, false));
987 }
988 Node* is_over = generate_guard(bol_lt, region, PROB_MIN);
989 return is_over;
990 }
991
992 // Emit range checks for the given String.value byte array
993 void LibraryCallKit::generate_string_range_check(Node* array,
994 Node* offset,
995 Node* count,
996 bool char_count,
997 RegionNode* region) {
998 if (stopped()) {
999 return; // already stopped
1000 }
1001 if (char_count) {
1002 // Convert char count to byte count
1003 count = _gvn.transform(new LShiftINode(count, intcon(1)));
1004 }
1005 // Offset and count must not be negative
1006 generate_negative_guard(offset, region, nullptr, true);
1007 generate_negative_guard(count, region, nullptr, true);
1008 // Offset + count must not exceed length of array
1009 generate_limit_guard(offset, count, load_array_length(array), region, true);
1010 }
1011
1012 Node* LibraryCallKit::current_thread_helper(Node*& tls_output, ByteSize handle_offset,
1013 bool is_immutable) {
1014 ciKlass* thread_klass = env()->Thread_klass();
1015 const Type* thread_type
1016 = TypeOopPtr::make_from_klass(thread_klass)->cast_to_ptr_type(TypePtr::NotNull);
1017
1018 Node* thread = _gvn.transform(new ThreadLocalNode());
1019 Node* p = off_heap_plus_addr(thread, in_bytes(handle_offset));
1020 tls_output = thread;
1021
1022 Node* thread_obj_handle
1023 = (is_immutable
1024 ? LoadNode::make(_gvn, nullptr, immutable_memory(), p, p->bottom_type()->is_ptr(),
1025 TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered)
1026 : make_load(nullptr, p, p->bottom_type()->is_ptr(), T_ADDRESS, MemNode::unordered));
1027 thread_obj_handle = _gvn.transform(thread_obj_handle);
1028
1029 DecoratorSet decorators = IN_NATIVE;
1030 if (is_immutable) {
1031 decorators |= C2_IMMUTABLE_MEMORY;
1032 }
1033 return access_load(thread_obj_handle, thread_type, T_OBJECT, decorators);
1034 }
1035
1036 //--------------------------generate_current_thread--------------------
1037 Node* LibraryCallKit::generate_current_thread(Node* &tls_output) {
1038 return current_thread_helper(tls_output, JavaThread::threadObj_offset(),
1039 /*is_immutable*/false);
1040 }
1041
1042 //--------------------------generate_virtual_thread--------------------
1043 Node* LibraryCallKit::generate_virtual_thread(Node* tls_output) {
1044 return current_thread_helper(tls_output, JavaThread::vthread_offset(),
1045 !C->method()->changes_current_thread());
1046 }
1047
1048 //------------------------------make_string_method_node------------------------
1049 // Helper method for String intrinsic functions. This version is called with
1050 // str1 and str2 pointing to byte[] nodes containing Latin1 or UTF16 encoded
1051 // characters (depending on 'is_byte'). cnt1 and cnt2 are pointing to Int nodes
1052 // containing the lengths of str1 and str2.
1053 Node* LibraryCallKit::make_string_method_node(int opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2, StrIntrinsicNode::ArgEnc ae) {
1054 Node* result = nullptr;
1055 switch (opcode) {
1056 case Op_StrIndexOf:
1057 result = new StrIndexOfNode(control(), memory(TypeAryPtr::BYTES),
1058 str1_start, cnt1, str2_start, cnt2, ae);
1059 break;
1060 case Op_StrComp:
1061 result = new StrCompNode(control(), memory(TypeAryPtr::BYTES),
1062 str1_start, cnt1, str2_start, cnt2, ae);
1063 break;
1064 case Op_StrEquals:
1065 // We already know that cnt1 == cnt2 here (checked in 'inline_string_equals').
1066 // Use the constant length if there is one because optimized match rule may exist.
1067 result = new StrEqualsNode(control(), memory(TypeAryPtr::BYTES),
1068 str1_start, str2_start, cnt2->is_Con() ? cnt2 : cnt1, ae);
1069 break;
1070 default:
1071 ShouldNotReachHere();
1072 return nullptr;
1073 }
1074
1075 // All these intrinsics have checks.
1076 C->set_has_split_ifs(true); // Has chance for split-if optimization
1077 clear_upper_avx();
1078
1079 return _gvn.transform(result);
1080 }
1081
1082 //------------------------------inline_string_compareTo------------------------
1083 bool LibraryCallKit::inline_string_compareTo(StrIntrinsicNode::ArgEnc ae) {
1084 Node* arg1 = argument(0);
1085 Node* arg2 = argument(1);
1086
1087 arg1 = must_be_not_null(arg1, true);
1088 arg2 = must_be_not_null(arg2, true);
1089
1090 // Get start addr and length of first argument
1091 Node* arg1_start = array_element_address(arg1, intcon(0), T_BYTE);
1092 Node* arg1_cnt = load_array_length(arg1);
1093
1094 // Get start addr and length of second argument
1095 Node* arg2_start = array_element_address(arg2, intcon(0), T_BYTE);
1096 Node* arg2_cnt = load_array_length(arg2);
1097
1098 Node* result = make_string_method_node(Op_StrComp, arg1_start, arg1_cnt, arg2_start, arg2_cnt, ae);
1099 set_result(result);
1100 return true;
1101 }
1102
1103 //------------------------------inline_string_equals------------------------
1104 bool LibraryCallKit::inline_string_equals(StrIntrinsicNode::ArgEnc ae) {
1105 Node* arg1 = argument(0);
1106 Node* arg2 = argument(1);
1107
1108 // paths (plus control) merge
1109 RegionNode* region = new RegionNode(3);
1110 Node* phi = new PhiNode(region, TypeInt::BOOL);
1111
1112 if (!stopped()) {
1113
1114 arg1 = must_be_not_null(arg1, true);
1115 arg2 = must_be_not_null(arg2, true);
1116
1117 // Get start addr and length of first argument
1118 Node* arg1_start = array_element_address(arg1, intcon(0), T_BYTE);
1119 Node* arg1_cnt = load_array_length(arg1);
1120
1121 // Get start addr and length of second argument
1122 Node* arg2_start = array_element_address(arg2, intcon(0), T_BYTE);
1123 Node* arg2_cnt = load_array_length(arg2);
1124
1125 // Check for arg1_cnt != arg2_cnt
1126 Node* cmp = _gvn.transform(new CmpINode(arg1_cnt, arg2_cnt));
1127 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
1128 Node* if_ne = generate_slow_guard(bol, nullptr);
1129 if (if_ne != nullptr) {
1130 phi->init_req(2, intcon(0));
1131 region->init_req(2, if_ne);
1132 }
1133
1134 // Check for count == 0 is done by assembler code for StrEquals.
1135
1136 if (!stopped()) {
1137 Node* equals = make_string_method_node(Op_StrEquals, arg1_start, arg1_cnt, arg2_start, arg2_cnt, ae);
1138 phi->init_req(1, equals);
1139 region->init_req(1, control());
1140 }
1141 }
1142
1143 // post merge
1144 set_control(_gvn.transform(region));
1145 record_for_igvn(region);
1146
1147 set_result(_gvn.transform(phi));
1148 return true;
1149 }
1150
1151 //------------------------------inline_array_equals----------------------------
1152 bool LibraryCallKit::inline_array_equals(StrIntrinsicNode::ArgEnc ae) {
1153 assert(ae == StrIntrinsicNode::UU || ae == StrIntrinsicNode::LL, "unsupported array types");
1154 Node* arg1 = argument(0);
1155 Node* arg2 = argument(1);
1156
1157 const TypeAryPtr* mtype = (ae == StrIntrinsicNode::UU) ? TypeAryPtr::CHARS : TypeAryPtr::BYTES;
1158 set_result(_gvn.transform(new AryEqNode(control(), memory(mtype), arg1, arg2, ae)));
1159 clear_upper_avx();
1160
1161 return true;
1162 }
1163
1164
1165 //------------------------------inline_countPositives------------------------------
1166 // int java.lang.StringCoding#countPositives0(byte[] ba, int off, int len)
1167 bool LibraryCallKit::inline_countPositives() {
1168 assert(callee()->signature()->size() == 3, "countPositives has 3 parameters");
1169 // no receiver since it is static method
1170 Node* ba = argument(0);
1171 Node* offset = argument(1);
1172 Node* len = argument(2);
1173
1174 ba = must_be_not_null(ba, true);
1175 RegionNode* bailout = create_bailout();
1176 generate_string_range_check(ba, offset, len, false, bailout);
1177 if (check_bailout(bailout)) {
1178 return true;
1179 }
1180
1181 Node* ba_start = array_element_address(ba, offset, T_BYTE);
1182 Node* result = new CountPositivesNode(control(), memory(TypeAryPtr::BYTES), ba_start, len);
1183 set_result(_gvn.transform(result));
1184 clear_upper_avx();
1185 return true;
1186 }
1187
1188 bool LibraryCallKit::inline_preconditions_checkIndex(BasicType bt) {
1189 Node* index = argument(0);
1190 Node* length = bt == T_INT ? argument(1) : argument(2);
1191 if (too_many_traps(Deoptimization::Reason_intrinsic) || too_many_traps(Deoptimization::Reason_range_check)) {
1192 return false;
1193 }
1194
1195 // check that length is positive
1196 Node* len_pos_cmp = _gvn.transform(CmpNode::make(length, integercon(0, bt), bt));
1197 Node* len_pos_bol = _gvn.transform(new BoolNode(len_pos_cmp, BoolTest::ge));
1198
1199 {
1200 BuildCutout unless(this, len_pos_bol, PROB_MAX);
1201 uncommon_trap(Deoptimization::Reason_intrinsic,
1202 Deoptimization::Action_make_not_entrant);
1203 }
1204
1205 if (stopped()) {
1206 // Length is known to be always negative during compilation and the IR graph so far constructed is good so return success
1207 return true;
1208 }
1209
1210 // length is now known positive, add a cast node to make this explicit
1211 jlong upper_bound = _gvn.type(length)->is_integer(bt)->hi_as_long();
1212 Node* casted_length = ConstraintCastNode::make_cast_for_basic_type(
1213 control(), length, TypeInteger::make(0, upper_bound, Type::WidenMax, bt),
1214 ConstraintCastNode::DependencyType::FloatingNarrowing, bt);
1215 casted_length = _gvn.transform(casted_length);
1216 replace_in_map(length, casted_length);
1217 length = casted_length;
1218
1219 // Use an unsigned comparison for the range check itself
1220 Node* rc_cmp = _gvn.transform(CmpNode::make(index, length, bt, true));
1221 BoolTest::mask btest = BoolTest::lt;
1222 Node* rc_bool = _gvn.transform(new BoolNode(rc_cmp, btest));
1223 RangeCheckNode* rc = new RangeCheckNode(control(), rc_bool, PROB_MAX, COUNT_UNKNOWN);
1224 _gvn.set_type(rc, rc->Value(&_gvn));
1225 if (!rc_bool->is_Con()) {
1226 record_for_igvn(rc);
1227 }
1228 set_control(_gvn.transform(new IfTrueNode(rc)));
1229 {
1230 PreserveJVMState pjvms(this);
1231 set_control(_gvn.transform(new IfFalseNode(rc)));
1232 uncommon_trap(Deoptimization::Reason_range_check,
1233 Deoptimization::Action_make_not_entrant);
1234 }
1235
1236 if (stopped()) {
1237 // Range check is known to always fail during compilation and the IR graph so far constructed is good so return success
1238 return true;
1239 }
1240
1241 // index is now known to be >= 0 and < length, cast it
1242 Node* result = ConstraintCastNode::make_cast_for_basic_type(
1243 control(), index, TypeInteger::make(0, upper_bound, Type::WidenMax, bt),
1244 ConstraintCastNode::DependencyType::FloatingNarrowing, bt);
1245 result = _gvn.transform(result);
1246 set_result(result);
1247 replace_in_map(index, result);
1248 return true;
1249 }
1250
1251 //------------------------------inline_string_indexOf------------------------
1252 bool LibraryCallKit::inline_string_indexOf(StrIntrinsicNode::ArgEnc ae) {
1253 if (!Matcher::match_rule_supported(Op_StrIndexOf)) {
1254 return false;
1255 }
1256 Node* src = argument(0);
1257 Node* tgt = argument(1);
1258
1259 // Make the merge point
1260 RegionNode* result_rgn = new RegionNode(4);
1261 Node* result_phi = new PhiNode(result_rgn, TypeInt::INT);
1262
1263 src = must_be_not_null(src, true);
1264 tgt = must_be_not_null(tgt, true);
1265
1266 // Get start addr and length of source string
1267 Node* src_start = array_element_address(src, intcon(0), T_BYTE);
1268 Node* src_count = load_array_length(src);
1269
1270 // Get start addr and length of substring
1271 Node* tgt_start = array_element_address(tgt, intcon(0), T_BYTE);
1272 Node* tgt_count = load_array_length(tgt);
1273
1274 Node* result = nullptr;
1275 bool call_opt_stub = (StubRoutines::_string_indexof_array[ae] != nullptr);
1276
1277 if (ae == StrIntrinsicNode::UU || ae == StrIntrinsicNode::UL) {
1278 // Divide src size by 2 if String is UTF16 encoded
1279 src_count = _gvn.transform(new RShiftINode(src_count, intcon(1)));
1280 }
1281 if (ae == StrIntrinsicNode::UU) {
1282 // Divide substring size by 2 if String is UTF16 encoded
1283 tgt_count = _gvn.transform(new RShiftINode(tgt_count, intcon(1)));
1284 }
1285
1286 if (call_opt_stub) {
1287 Node* call = make_runtime_call(RC_LEAF, OptoRuntime::string_IndexOf_Type(),
1288 StubRoutines::_string_indexof_array[ae],
1289 "stringIndexOf", TypePtr::BOTTOM, src_start,
1290 src_count, tgt_start, tgt_count);
1291 result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
1292 } else {
1293 result = make_indexOf_node(src_start, src_count, tgt_start, tgt_count,
1294 result_rgn, result_phi, ae);
1295 }
1296 if (result != nullptr) {
1297 result_phi->init_req(3, result);
1298 result_rgn->init_req(3, control());
1299 }
1300 set_control(_gvn.transform(result_rgn));
1301 record_for_igvn(result_rgn);
1302 set_result(_gvn.transform(result_phi));
1303
1304 return true;
1305 }
1306
1307 //-----------------------------inline_string_indexOfI-----------------------
1308 bool LibraryCallKit::inline_string_indexOfI(StrIntrinsicNode::ArgEnc ae) {
1309 if (!Matcher::match_rule_supported(Op_StrIndexOf)) {
1310 return false;
1311 }
1312
1313 assert(callee()->signature()->size() == 5, "String.indexOf() has 5 arguments");
1314 Node* src = argument(0); // byte[]
1315 Node* src_count = argument(1); // char count
1316 Node* tgt = argument(2); // byte[]
1317 Node* tgt_count = argument(3); // char count
1318 Node* from_index = argument(4); // char index
1319
1320 src = must_be_not_null(src, true);
1321 tgt = must_be_not_null(tgt, true);
1322
1323 // Multiply byte array index by 2 if String is UTF16 encoded
1324 Node* src_offset = (ae == StrIntrinsicNode::LL) ? from_index : _gvn.transform(new LShiftINode(from_index, intcon(1)));
1325 src_count = _gvn.transform(new SubINode(src_count, from_index));
1326 Node* src_start = array_element_address(src, src_offset, T_BYTE);
1327 Node* tgt_start = array_element_address(tgt, intcon(0), T_BYTE);
1328
1329 // Range checks
1330 RegionNode* bailout = create_bailout();
1331 generate_string_range_check(src, src_offset, src_count, ae != StrIntrinsicNode::LL, bailout);
1332 generate_string_range_check(tgt, intcon(0), tgt_count, ae == StrIntrinsicNode::UU, bailout);
1333 if (check_bailout(bailout)) {
1334 return true;
1335 }
1336
1337 RegionNode* region = new RegionNode(5);
1338 Node* phi = new PhiNode(region, TypeInt::INT);
1339 Node* result = nullptr;
1340
1341 bool call_opt_stub = (StubRoutines::_string_indexof_array[ae] != nullptr);
1342
1343 if (call_opt_stub) {
1344 Node* call = make_runtime_call(RC_LEAF, OptoRuntime::string_IndexOf_Type(),
1345 StubRoutines::_string_indexof_array[ae],
1346 "stringIndexOf", TypePtr::BOTTOM, src_start,
1347 src_count, tgt_start, tgt_count);
1348 result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
1349 } else {
1350 result = make_indexOf_node(src_start, src_count, tgt_start, tgt_count,
1351 region, phi, ae);
1352 }
1353 if (result != nullptr) {
1354 // The result is index relative to from_index if substring was found, -1 otherwise.
1355 // Generate code which will fold into cmove.
1356 Node* cmp = _gvn.transform(new CmpINode(result, intcon(0)));
1357 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::lt));
1358
1359 Node* if_lt = generate_slow_guard(bol, nullptr);
1360 if (if_lt != nullptr) {
1361 // result == -1
1362 phi->init_req(3, result);
1363 region->init_req(3, if_lt);
1364 }
1365 if (!stopped()) {
1366 result = _gvn.transform(new AddINode(result, from_index));
1367 phi->init_req(4, result);
1368 region->init_req(4, control());
1369 }
1370 }
1371
1372 set_control(_gvn.transform(region));
1373 record_for_igvn(region);
1374 set_result(_gvn.transform(phi));
1375 clear_upper_avx();
1376
1377 return true;
1378 }
1379
1380 // Create StrIndexOfNode with fast path checks
1381 Node* LibraryCallKit::make_indexOf_node(Node* src_start, Node* src_count, Node* tgt_start, Node* tgt_count,
1382 RegionNode* region, Node* phi, StrIntrinsicNode::ArgEnc ae) {
1383 // Check for substr count > string count
1384 Node* cmp = _gvn.transform(new CmpINode(tgt_count, src_count));
1385 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::gt));
1386 Node* if_gt = generate_slow_guard(bol, nullptr);
1387 if (if_gt != nullptr) {
1388 phi->init_req(1, intcon(-1));
1389 region->init_req(1, if_gt);
1390 }
1391 if (!stopped()) {
1392 // Check for substr count == 0
1393 cmp = _gvn.transform(new CmpINode(tgt_count, intcon(0)));
1394 bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
1395 Node* if_zero = generate_slow_guard(bol, nullptr);
1396 if (if_zero != nullptr) {
1397 phi->init_req(2, intcon(0));
1398 region->init_req(2, if_zero);
1399 }
1400 }
1401 if (!stopped()) {
1402 return make_string_method_node(Op_StrIndexOf, src_start, src_count, tgt_start, tgt_count, ae);
1403 }
1404 return nullptr;
1405 }
1406
1407 //-----------------------------inline_string_indexOfChar-----------------------
1408 bool LibraryCallKit::inline_string_indexOfChar(StrIntrinsicNode::ArgEnc ae) {
1409 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
1410 return false;
1411 }
1412 if (!Matcher::match_rule_supported(Op_StrIndexOfChar)) {
1413 return false;
1414 }
1415 assert(callee()->signature()->size() == 4, "String.indexOfChar() has 4 arguments");
1416 Node* src = argument(0); // byte[]
1417 Node* int_ch = argument(1);
1418 Node* from_index = argument(2);
1419 Node* max = argument(3);
1420
1421 src = must_be_not_null(src, true);
1422
1423 Node* src_offset = ae == StrIntrinsicNode::L ? from_index : _gvn.transform(new LShiftINode(from_index, intcon(1)));
1424 Node* src_start = array_element_address(src, src_offset, T_BYTE);
1425 Node* src_count = _gvn.transform(new SubINode(max, from_index));
1426
1427 // Range checks
1428 RegionNode* bailout = create_bailout();
1429 generate_string_range_check(src, src_offset, src_count, ae == StrIntrinsicNode::U, bailout);
1430 if (check_bailout(bailout)) {
1431 return true;
1432 }
1433
1434 // Check for int_ch >= 0
1435 Node* int_ch_cmp = _gvn.transform(new CmpINode(int_ch, intcon(0)));
1436 Node* int_ch_bol = _gvn.transform(new BoolNode(int_ch_cmp, BoolTest::ge));
1437 {
1438 BuildCutout unless(this, int_ch_bol, PROB_MAX);
1439 uncommon_trap(Deoptimization::Reason_intrinsic,
1440 Deoptimization::Action_maybe_recompile);
1441 }
1442 if (stopped()) {
1443 return true;
1444 }
1445
1446 RegionNode* region = new RegionNode(3);
1447 Node* phi = new PhiNode(region, TypeInt::INT);
1448
1449 Node* result = new StrIndexOfCharNode(control(), memory(TypeAryPtr::BYTES), src_start, src_count, int_ch, ae);
1450 C->set_has_split_ifs(true); // Has chance for split-if optimization
1451 _gvn.transform(result);
1452
1453 Node* cmp = _gvn.transform(new CmpINode(result, intcon(0)));
1454 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::lt));
1455
1456 Node* if_lt = generate_slow_guard(bol, nullptr);
1457 if (if_lt != nullptr) {
1458 // result == -1
1459 phi->init_req(2, result);
1460 region->init_req(2, if_lt);
1461 }
1462 if (!stopped()) {
1463 result = _gvn.transform(new AddINode(result, from_index));
1464 phi->init_req(1, result);
1465 region->init_req(1, control());
1466 }
1467 set_control(_gvn.transform(region));
1468 record_for_igvn(region);
1469 set_result(_gvn.transform(phi));
1470 clear_upper_avx();
1471
1472 return true;
1473 }
1474 //---------------------------inline_string_copy---------------------
1475 // compressIt == true --> generate a compressed copy operation (compress char[]/byte[] to byte[])
1476 // int StringUTF16.compress0(char[] src, int srcOff, byte[] dst, int dstOff, int len)
1477 // int StringUTF16.compress0(byte[] src, int srcOff, byte[] dst, int dstOff, int len)
1478 // compressIt == false --> generate an inflated copy operation (inflate byte[] to char[]/byte[])
1479 // void StringLatin1.inflate0(byte[] src, int srcOff, char[] dst, int dstOff, int len)
1480 // void StringLatin1.inflate0(byte[] src, int srcOff, byte[] dst, int dstOff, int len)
1481 bool LibraryCallKit::inline_string_copy(bool compress) {
1482 int nargs = 5; // 2 oops, 3 ints
1483 assert(callee()->signature()->size() == nargs, "string copy has 5 arguments");
1484
1485 Node* src = argument(0);
1486 Node* src_offset = argument(1);
1487 Node* dst = argument(2);
1488 Node* dst_offset = argument(3);
1489 Node* length = argument(4);
1490
1491 // Check for allocation before we add nodes that would confuse
1492 // tightly_coupled_allocation()
1493 AllocateArrayNode* alloc = tightly_coupled_allocation(dst);
1494
1495 // Figure out the size and type of the elements we will be copying.
1496 const TypeAryPtr* src_type = src->Value(&_gvn)->isa_aryptr();
1497 const TypeAryPtr* dst_type = dst->Value(&_gvn)->isa_aryptr();
1498 if (src_type == nullptr || dst_type == nullptr) {
1499 return false;
1500 }
1501 BasicType src_elem = src_type->elem()->array_element_basic_type();
1502 BasicType dst_elem = dst_type->elem()->array_element_basic_type();
1503 assert((compress && dst_elem == T_BYTE && (src_elem == T_BYTE || src_elem == T_CHAR)) ||
1504 (!compress && src_elem == T_BYTE && (dst_elem == T_BYTE || dst_elem == T_CHAR)),
1505 "Unsupported array types for inline_string_copy");
1506
1507 src = must_be_not_null(src, true);
1508 dst = must_be_not_null(dst, true);
1509
1510 // Convert char[] offsets to byte[] offsets
1511 bool convert_src = (compress && src_elem == T_BYTE);
1512 bool convert_dst = (!compress && dst_elem == T_BYTE);
1513 if (convert_src) {
1514 src_offset = _gvn.transform(new LShiftINode(src_offset, intcon(1)));
1515 } else if (convert_dst) {
1516 dst_offset = _gvn.transform(new LShiftINode(dst_offset, intcon(1)));
1517 }
1518
1519 // Range checks
1520 RegionNode* bailout = create_bailout();
1521 generate_string_range_check(src, src_offset, length, convert_src, bailout);
1522 generate_string_range_check(dst, dst_offset, length, convert_dst, bailout);
1523 if (check_bailout(bailout)) {
1524 return true;
1525 }
1526
1527 Node* src_start = array_element_address(src, src_offset, src_elem);
1528 Node* dst_start = array_element_address(dst, dst_offset, dst_elem);
1529 // 'src_start' points to src array + scaled offset
1530 // 'dst_start' points to dst array + scaled offset
1531 Node* count = nullptr;
1532 if (compress) {
1533 count = compress_string(src_start, TypeAryPtr::get_array_body_type(src_elem), dst_start, length);
1534 } else {
1535 inflate_string(src_start, dst_start, TypeAryPtr::get_array_body_type(dst_elem), length);
1536 }
1537
1538 if (alloc != nullptr) {
1539 if (alloc->maybe_set_complete(&_gvn)) {
1540 // "You break it, you buy it."
1541 InitializeNode* init = alloc->initialization();
1542 assert(init->is_complete(), "we just did this");
1543 init->set_complete_with_arraycopy();
1544 assert(dst->is_CheckCastPP(), "sanity");
1545 assert(dst->in(0)->in(0) == init, "dest pinned");
1546 }
1547 // Do not let stores that initialize this object be reordered with
1548 // a subsequent store that would make this object accessible by
1549 // other threads.
1550 // Record what AllocateNode this StoreStore protects so that
1551 // escape analysis can go from the MemBarStoreStoreNode to the
1552 // AllocateNode and eliminate the MemBarStoreStoreNode if possible
1553 // based on the escape status of the AllocateNode.
1554 insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
1555 }
1556 if (compress) {
1557 set_result(_gvn.transform(count));
1558 }
1559 clear_upper_avx();
1560
1561 return true;
1562 }
1563
1564 #ifdef _LP64
1565 #define XTOP ,top() /*additional argument*/
1566 #else //_LP64
1567 #define XTOP /*no additional argument*/
1568 #endif //_LP64
1569
1570 //------------------------inline_string_toBytesU--------------------------
1571 // public static byte[] StringUTF16.toBytes0(char[] value, int off, int len)
1572 bool LibraryCallKit::inline_string_toBytesU() {
1573 // Get the arguments.
1574 assert(callee()->signature()->size() == 3, "character array encoder requires 3 arguments");
1575 Node* value = argument(0);
1576 Node* offset = argument(1);
1577 Node* length = argument(2);
1578
1579 Node* newcopy = nullptr;
1580
1581 // Set the original stack and the reexecute bit for the interpreter to reexecute
1582 // the bytecode that invokes StringUTF16.toBytes0() if deoptimization happens.
1583 { PreserveReexecuteState preexecs(this);
1584 jvms()->set_should_reexecute(true);
1585
1586 value = must_be_not_null(value, true);
1587 RegionNode* bailout = create_bailout();
1588 generate_negative_guard(offset, bailout, nullptr, true);
1589 generate_negative_guard(length, bailout, nullptr, true);
1590 generate_limit_guard(offset, length, load_array_length(value), bailout, true);
1591 // Make sure that resulting byte[] length does not overflow Integer.MAX_VALUE
1592 generate_limit_guard(length, intcon(0), intcon(max_jint/2), bailout, true);
1593 if (check_bailout(bailout)) {
1594 return true;
1595 }
1596
1597 Node* size = _gvn.transform(new LShiftINode(length, intcon(1)));
1598 Node* klass_node = makecon(TypeKlassPtr::make(ciTypeArrayKlass::make(T_BYTE)));
1599 newcopy = new_array(klass_node, size, 0); // no arguments to push
1600 AllocateArrayNode* alloc = tightly_coupled_allocation(newcopy);
1601 guarantee(alloc != nullptr, "created above");
1602
1603 // Calculate starting addresses.
1604 Node* src_start = array_element_address(value, offset, T_CHAR);
1605 Node* dst_start = basic_plus_adr(newcopy, arrayOopDesc::base_offset_in_bytes(T_BYTE));
1606
1607 // Check if dst array address is aligned to HeapWordSize
1608 bool aligned = (arrayOopDesc::base_offset_in_bytes(T_BYTE) % HeapWordSize == 0);
1609 // If true, then check if src array address is aligned to HeapWordSize
1610 if (aligned) {
1611 const TypeInt* toffset = gvn().type(offset)->is_int();
1612 aligned = toffset->is_con() && ((arrayOopDesc::base_offset_in_bytes(T_CHAR) +
1613 toffset->get_con() * type2aelembytes(T_CHAR)) % HeapWordSize == 0);
1614 }
1615
1616 // Figure out which arraycopy runtime method to call (disjoint, uninitialized).
1617 const char* copyfunc_name = "arraycopy";
1618 address copyfunc_addr = StubRoutines::select_arraycopy_function(T_CHAR, aligned, true, copyfunc_name, true);
1619 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
1620 OptoRuntime::fast_arraycopy_Type(),
1621 copyfunc_addr, copyfunc_name, TypeRawPtr::BOTTOM,
1622 src_start, dst_start, ConvI2X(length) XTOP);
1623 // Do not let reads from the cloned object float above the arraycopy.
1624 if (alloc->maybe_set_complete(&_gvn)) {
1625 // "You break it, you buy it."
1626 InitializeNode* init = alloc->initialization();
1627 assert(init->is_complete(), "we just did this");
1628 init->set_complete_with_arraycopy();
1629 assert(newcopy->is_CheckCastPP(), "sanity");
1630 assert(newcopy->in(0)->in(0) == init, "dest pinned");
1631 }
1632 // Do not let stores that initialize this object be reordered with
1633 // a subsequent store that would make this object accessible by
1634 // other threads.
1635 // Record what AllocateNode this StoreStore protects so that
1636 // escape analysis can go from the MemBarStoreStoreNode to the
1637 // AllocateNode and eliminate the MemBarStoreStoreNode if possible
1638 // based on the escape status of the AllocateNode.
1639 insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
1640 } // original reexecute is set back here
1641
1642 C->set_has_split_ifs(true); // Has chance for split-if optimization
1643 if (!stopped()) {
1644 set_result(newcopy);
1645 }
1646 clear_upper_avx();
1647
1648 return true;
1649 }
1650
1651 //------------------------inline_string_getCharsU--------------------------
1652 // public void StringUTF16.getChars0(byte[] src, int srcBegin, int srcEnd, char dst[], int dstBegin)
1653 bool LibraryCallKit::inline_string_getCharsU() {
1654 assert(callee()->signature()->size() == 5, "StringUTF16.getChars0() has 5 arguments");
1655 // Get the arguments.
1656 Node* src = argument(0);
1657 Node* src_begin = argument(1);
1658 Node* src_end = argument(2); // exclusive offset (i < src_end)
1659 Node* dst = argument(3);
1660 Node* dst_begin = argument(4);
1661
1662 // Check for allocation before we add nodes that would confuse
1663 // tightly_coupled_allocation()
1664 AllocateArrayNode* alloc = tightly_coupled_allocation(dst);
1665
1666 // Check if a null path was taken unconditionally.
1667 src = must_be_not_null(src, true);
1668 dst = must_be_not_null(dst, true);
1669 if (stopped()) {
1670 return true;
1671 }
1672
1673 // Get length and convert char[] offset to byte[] offset
1674 Node* length = _gvn.transform(new SubINode(src_end, src_begin));
1675 src_begin = _gvn.transform(new LShiftINode(src_begin, intcon(1)));
1676
1677 // Range checks
1678 RegionNode* bailout = create_bailout();
1679 generate_string_range_check(src, src_begin, length, true, bailout);
1680 generate_string_range_check(dst, dst_begin, length, false, bailout);
1681 if (check_bailout(bailout)) {
1682 return true;
1683 }
1684
1685 // Calculate starting addresses.
1686 Node* src_start = array_element_address(src, src_begin, T_BYTE);
1687 Node* dst_start = array_element_address(dst, dst_begin, T_CHAR);
1688
1689 // Check if array addresses are aligned to HeapWordSize
1690 const TypeInt* tsrc = gvn().type(src_begin)->is_int();
1691 const TypeInt* tdst = gvn().type(dst_begin)->is_int();
1692 bool aligned = tsrc->is_con() && ((arrayOopDesc::base_offset_in_bytes(T_BYTE) + tsrc->get_con() * type2aelembytes(T_BYTE)) % HeapWordSize == 0) &&
1693 tdst->is_con() && ((arrayOopDesc::base_offset_in_bytes(T_CHAR) + tdst->get_con() * type2aelembytes(T_CHAR)) % HeapWordSize == 0);
1694
1695 // Figure out which arraycopy runtime method to call (disjoint, uninitialized).
1696 const char* copyfunc_name = "arraycopy";
1697 address copyfunc_addr = StubRoutines::select_arraycopy_function(T_CHAR, aligned, true, copyfunc_name, true);
1698 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
1699 OptoRuntime::fast_arraycopy_Type(),
1700 copyfunc_addr, copyfunc_name, TypeRawPtr::BOTTOM,
1701 src_start, dst_start, ConvI2X(length) XTOP);
1702 // Do not let reads from the cloned object float above the arraycopy.
1703 if (alloc != nullptr) {
1704 if (alloc->maybe_set_complete(&_gvn)) {
1705 // "You break it, you buy it."
1706 InitializeNode* init = alloc->initialization();
1707 assert(init->is_complete(), "we just did this");
1708 init->set_complete_with_arraycopy();
1709 assert(dst->is_CheckCastPP(), "sanity");
1710 assert(dst->in(0)->in(0) == init, "dest pinned");
1711 }
1712 // Do not let stores that initialize this object be reordered with
1713 // a subsequent store that would make this object accessible by
1714 // other threads.
1715 // Record what AllocateNode this StoreStore protects so that
1716 // escape analysis can go from the MemBarStoreStoreNode to the
1717 // AllocateNode and eliminate the MemBarStoreStoreNode if possible
1718 // based on the escape status of the AllocateNode.
1719 insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
1720 } else {
1721 insert_mem_bar(Op_MemBarCPUOrder);
1722 }
1723
1724 C->set_has_split_ifs(true); // Has chance for split-if optimization
1725 return true;
1726 }
1727
1728 //----------------------inline_string_char_access----------------------------
1729 // Store/Load char to/from byte[] array.
1730 // static void StringUTF16.putChar(byte[] val, int index, int c)
1731 // static char StringUTF16.getChar(byte[] val, int index)
1732 bool LibraryCallKit::inline_string_char_access(bool is_store) {
1733 Node* ch;
1734 if (is_store) {
1735 assert(callee()->signature()->size() == 3, "StringUTF16.putChar() has 3 arguments");
1736 ch = argument(2);
1737 } else {
1738 assert(callee()->signature()->size() == 2, "StringUTF16.getChar() has 2 arguments");
1739 ch = nullptr;
1740 }
1741 Node* value = argument(0);
1742 Node* index = argument(1);
1743
1744 // This intrinsic accesses byte[] array as char[] array. Computing the offsets
1745 // correctly requires matched array shapes.
1746 assert (arrayOopDesc::base_offset_in_bytes(T_CHAR) == arrayOopDesc::base_offset_in_bytes(T_BYTE),
1747 "sanity: byte[] and char[] bases agree");
1748 assert (type2aelembytes(T_CHAR) == type2aelembytes(T_BYTE)*2,
1749 "sanity: byte[] and char[] scales agree");
1750
1751 // Bail when getChar over constants is requested: constant folding would
1752 // reject folding mismatched char access over byte[]. A normal inlining for getChar
1753 // Java method would constant fold nicely instead.
1754 if (!is_store && value->is_Con() && index->is_Con()) {
1755 return false;
1756 }
1757
1758 // Save state and restore on bailout
1759 SavedState old_state(this);
1760
1761 value = must_be_not_null(value, true);
1762
1763 Node* adr = array_element_address(value, index, T_CHAR);
1764 if (adr->is_top()) {
1765 return false;
1766 }
1767 old_state.discard();
1768 if (is_store) {
1769 access_store_at(value, adr, TypeAryPtr::BYTES, ch, TypeInt::CHAR, T_CHAR, IN_HEAP | MO_UNORDERED | C2_MISMATCHED);
1770 } else {
1771 ch = access_load_at(value, adr, TypeAryPtr::BYTES, TypeInt::CHAR, T_CHAR, IN_HEAP | MO_UNORDERED | C2_MISMATCHED | C2_CONTROL_DEPENDENT_LOAD | C2_UNKNOWN_CONTROL_LOAD);
1772 set_result(ch);
1773 }
1774 return true;
1775 }
1776
1777
1778 //------------------------------inline_math-----------------------------------
1779 // public static double Math.abs(double)
1780 // public static double Math.sqrt(double)
1781 // public static double Math.log(double)
1782 // public static double Math.log10(double)
1783 // public static double Math.round(double)
1784 bool LibraryCallKit::inline_double_math(vmIntrinsics::ID id) {
1785 Node* arg = argument(0);
1786 Node* n = nullptr;
1787 switch (id) {
1788 case vmIntrinsics::_dabs: n = new AbsDNode( arg); break;
1789 case vmIntrinsics::_dsqrt:
1790 case vmIntrinsics::_dsqrt_strict:
1791 n = new SqrtDNode(C, control(), arg); break;
1792 case vmIntrinsics::_ceil: n = RoundDoubleModeNode::make(_gvn, arg, RoundDoubleModeNode::rmode_ceil); break;
1793 case vmIntrinsics::_floor: n = RoundDoubleModeNode::make(_gvn, arg, RoundDoubleModeNode::rmode_floor); break;
1794 case vmIntrinsics::_rint: n = RoundDoubleModeNode::make(_gvn, arg, RoundDoubleModeNode::rmode_rint); break;
1795 case vmIntrinsics::_roundD: n = new RoundDNode(arg); break;
1796 case vmIntrinsics::_dcopySign: n = CopySignDNode::make(_gvn, arg, argument(2)); break;
1797 case vmIntrinsics::_dsignum: n = SignumDNode::make(_gvn, arg); break;
1798 default: fatal_unexpected_iid(id); break;
1799 }
1800 set_result(_gvn.transform(n));
1801 return true;
1802 }
1803
1804 //------------------------------inline_math-----------------------------------
1805 // public static float Math.abs(float)
1806 // public static int Math.abs(int)
1807 // public static long Math.abs(long)
1808 bool LibraryCallKit::inline_math(vmIntrinsics::ID id) {
1809 Node* arg = argument(0);
1810 Node* n = nullptr;
1811 switch (id) {
1812 case vmIntrinsics::_fabs: n = new AbsFNode( arg); break;
1813 case vmIntrinsics::_iabs: n = new AbsINode( arg); break;
1814 case vmIntrinsics::_labs: n = new AbsLNode( arg); break;
1815 case vmIntrinsics::_fcopySign: n = new CopySignFNode(arg, argument(1)); break;
1816 case vmIntrinsics::_fsignum: n = SignumFNode::make(_gvn, arg); break;
1817 case vmIntrinsics::_roundF: n = new RoundFNode(arg); break;
1818 default: fatal_unexpected_iid(id); break;
1819 }
1820 set_result(_gvn.transform(n));
1821 return true;
1822 }
1823
1824 //------------------------------runtime_math-----------------------------
1825 bool LibraryCallKit::runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName) {
1826 assert(call_type == OptoRuntime::Math_DD_D_Type() || call_type == OptoRuntime::Math_D_D_Type(),
1827 "must be (DD)D or (D)D type");
1828
1829 // Inputs
1830 Node* a = argument(0);
1831 Node* b = (call_type == OptoRuntime::Math_DD_D_Type()) ? argument(2) : nullptr;
1832
1833 const TypePtr* no_memory_effects = nullptr;
1834 Node* trig = make_runtime_call(RC_LEAF | RC_PURE, call_type, funcAddr, funcName,
1835 no_memory_effects,
1836 a, top(), b, b ? top() : nullptr);
1837 Node* value = _gvn.transform(new ProjNode(trig, TypeFunc::Parms+0));
1838 #ifdef ASSERT
1839 Node* value_top = _gvn.transform(new ProjNode(trig, TypeFunc::Parms+1));
1840 assert(value_top == top(), "second value must be top");
1841 #endif
1842
1843 set_result(value);
1844 return true;
1845 }
1846
1847 //------------------------------inline_math_pow-----------------------------
1848 bool LibraryCallKit::inline_math_pow() {
1849 Node* base = argument(0);
1850 Node* exp = argument(2);
1851
1852 CallNode* pow = new PowDNode(C, base, exp);
1853 set_predefined_input_for_runtime_call(pow);
1854 pow = _gvn.transform(pow)->as_CallLeafPure();
1855 set_predefined_output_for_runtime_call(pow);
1856 Node* result = _gvn.transform(new ProjNode(pow, TypeFunc::Parms + 0));
1857 record_for_igvn(pow);
1858 set_result(result);
1859 return true;
1860 }
1861
1862 //------------------------------inline_math_native-----------------------------
1863 bool LibraryCallKit::inline_math_native(vmIntrinsics::ID id) {
1864 switch (id) {
1865 case vmIntrinsics::_dsin:
1866 return StubRoutines::dsin() != nullptr ?
1867 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dsin(), "dsin") :
1868 runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dsin), "SIN");
1869 case vmIntrinsics::_dcos:
1870 return StubRoutines::dcos() != nullptr ?
1871 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dcos(), "dcos") :
1872 runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dcos), "COS");
1873 case vmIntrinsics::_dtan:
1874 return StubRoutines::dtan() != nullptr ?
1875 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dtan(), "dtan") :
1876 runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dtan), "TAN");
1877 case vmIntrinsics::_dsinh:
1878 return StubRoutines::dsinh() != nullptr ?
1879 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dsinh(), "dsinh") : false;
1880 case vmIntrinsics::_dtanh:
1881 return StubRoutines::dtanh() != nullptr ?
1882 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dtanh(), "dtanh") : false;
1883 case vmIntrinsics::_dcbrt:
1884 return StubRoutines::dcbrt() != nullptr ?
1885 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dcbrt(), "dcbrt") : false;
1886 case vmIntrinsics::_dexp:
1887 return StubRoutines::dexp() != nullptr ?
1888 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dexp(), "dexp") :
1889 runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dexp), "EXP");
1890 case vmIntrinsics::_dlog:
1891 return StubRoutines::dlog() != nullptr ?
1892 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dlog(), "dlog") :
1893 runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dlog), "LOG");
1894 case vmIntrinsics::_dlog10:
1895 return StubRoutines::dlog10() != nullptr ?
1896 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dlog10(), "dlog10") :
1897 runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), "LOG10");
1898
1899 case vmIntrinsics::_roundD: return Matcher::match_rule_supported(Op_RoundD) ? inline_double_math(id) : false;
1900 case vmIntrinsics::_ceil:
1901 case vmIntrinsics::_floor:
1902 case vmIntrinsics::_rint: return Matcher::match_rule_supported(Op_RoundDoubleMode) ? inline_double_math(id) : false;
1903
1904 case vmIntrinsics::_dsqrt:
1905 case vmIntrinsics::_dsqrt_strict:
1906 return Matcher::match_rule_supported(Op_SqrtD) ? inline_double_math(id) : false;
1907 case vmIntrinsics::_dabs: return Matcher::has_match_rule(Op_AbsD) ? inline_double_math(id) : false;
1908 case vmIntrinsics::_fabs: return Matcher::match_rule_supported(Op_AbsF) ? inline_math(id) : false;
1909 case vmIntrinsics::_iabs: return Matcher::match_rule_supported(Op_AbsI) ? inline_math(id) : false;
1910 case vmIntrinsics::_labs: return Matcher::match_rule_supported(Op_AbsL) ? inline_math(id) : false;
1911
1912 case vmIntrinsics::_dpow: return inline_math_pow();
1913 case vmIntrinsics::_dcopySign: return inline_double_math(id);
1914 case vmIntrinsics::_fcopySign: return inline_math(id);
1915 case vmIntrinsics::_dsignum: return Matcher::match_rule_supported(Op_SignumD) ? inline_double_math(id) : false;
1916 case vmIntrinsics::_fsignum: return Matcher::match_rule_supported(Op_SignumF) ? inline_math(id) : false;
1917 case vmIntrinsics::_roundF: return Matcher::match_rule_supported(Op_RoundF) ? inline_math(id) : false;
1918
1919 // These intrinsics are not yet correctly implemented
1920 case vmIntrinsics::_datan2:
1921 return false;
1922
1923 default:
1924 fatal_unexpected_iid(id);
1925 return false;
1926 }
1927 }
1928
1929 //----------------------------inline_notify-----------------------------------*
1930 bool LibraryCallKit::inline_notify(vmIntrinsics::ID id) {
1931 const TypeFunc* ftype = OptoRuntime::monitor_notify_Type();
1932 address func;
1933 if (id == vmIntrinsics::_notify) {
1934 func = OptoRuntime::monitor_notify_Java();
1935 } else {
1936 func = OptoRuntime::monitor_notifyAll_Java();
1937 }
1938 Node* call = make_runtime_call(RC_NO_LEAF, ftype, func, nullptr, TypeRawPtr::BOTTOM, argument(0));
1939 make_slow_call_ex(call, env()->Throwable_klass(), false);
1940 return true;
1941 }
1942
1943
1944 //----------------------------inline_min_max-----------------------------------
1945 bool LibraryCallKit::inline_min_max(vmIntrinsics::ID id) {
1946 Node* a = nullptr;
1947 Node* b = nullptr;
1948 Node* n = nullptr;
1949 switch (id) {
1950 case vmIntrinsics::_min:
1951 case vmIntrinsics::_max:
1952 case vmIntrinsics::_minF:
1953 case vmIntrinsics::_maxF:
1954 case vmIntrinsics::_minF_strict:
1955 case vmIntrinsics::_maxF_strict:
1956 case vmIntrinsics::_min_strict:
1957 case vmIntrinsics::_max_strict:
1958 assert(callee()->signature()->size() == 2, "minF/maxF has 2 parameters of size 1 each.");
1959 a = argument(0);
1960 b = argument(1);
1961 break;
1962 case vmIntrinsics::_minD:
1963 case vmIntrinsics::_maxD:
1964 case vmIntrinsics::_minD_strict:
1965 case vmIntrinsics::_maxD_strict:
1966 assert(callee()->signature()->size() == 4, "minD/maxD has 2 parameters of size 2 each.");
1967 a = argument(0);
1968 b = argument(2);
1969 break;
1970 case vmIntrinsics::_minL:
1971 case vmIntrinsics::_maxL:
1972 assert(callee()->signature()->size() == 4, "minL/maxL has 2 parameters of size 2 each.");
1973 a = argument(0);
1974 b = argument(2);
1975 break;
1976 default:
1977 fatal_unexpected_iid(id);
1978 break;
1979 }
1980
1981 switch (id) {
1982 case vmIntrinsics::_min:
1983 case vmIntrinsics::_min_strict:
1984 n = new MinINode(a, b);
1985 break;
1986 case vmIntrinsics::_max:
1987 case vmIntrinsics::_max_strict:
1988 n = new MaxINode(a, b);
1989 break;
1990 case vmIntrinsics::_minF:
1991 case vmIntrinsics::_minF_strict:
1992 n = new MinFNode(a, b);
1993 break;
1994 case vmIntrinsics::_maxF:
1995 case vmIntrinsics::_maxF_strict:
1996 n = new MaxFNode(a, b);
1997 break;
1998 case vmIntrinsics::_minD:
1999 case vmIntrinsics::_minD_strict:
2000 n = new MinDNode(a, b);
2001 break;
2002 case vmIntrinsics::_maxD:
2003 case vmIntrinsics::_maxD_strict:
2004 n = new MaxDNode(a, b);
2005 break;
2006 case vmIntrinsics::_minL:
2007 n = new MinLNode(_gvn.C, a, b);
2008 break;
2009 case vmIntrinsics::_maxL:
2010 n = new MaxLNode(_gvn.C, a, b);
2011 break;
2012 default:
2013 fatal_unexpected_iid(id);
2014 break;
2015 }
2016
2017 set_result(_gvn.transform(n));
2018 return true;
2019 }
2020
2021 bool LibraryCallKit::inline_math_mathExact(Node* math, Node* test) {
2022 if (builtin_throw_too_many_traps(Deoptimization::Reason_intrinsic,
2023 env()->ArithmeticException_instance())) {
2024 // It has been already too many times, but we cannot use builtin_throw (e.g. we care about backtraces),
2025 // so let's bail out intrinsic rather than risking deopting again.
2026 return false;
2027 }
2028
2029 Node* bol = _gvn.transform( new BoolNode(test, BoolTest::overflow) );
2030 IfNode* check = create_and_map_if(control(), bol, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN);
2031 Node* fast_path = _gvn.transform( new IfFalseNode(check));
2032 Node* slow_path = _gvn.transform( new IfTrueNode(check) );
2033
2034 {
2035 PreserveJVMState pjvms(this);
2036 PreserveReexecuteState preexecs(this);
2037 jvms()->set_should_reexecute(true);
2038
2039 set_control(slow_path);
2040 set_i_o(i_o());
2041
2042 builtin_throw(Deoptimization::Reason_intrinsic,
2043 env()->ArithmeticException_instance(),
2044 /*allow_too_many_traps*/ false);
2045 }
2046
2047 set_control(fast_path);
2048 set_result(math);
2049 return true;
2050 }
2051
2052 template <typename OverflowOp>
2053 bool LibraryCallKit::inline_math_overflow(Node* arg1, Node* arg2) {
2054 typedef typename OverflowOp::MathOp MathOp;
2055
2056 MathOp* mathOp = new MathOp(arg1, arg2);
2057 Node* operation = _gvn.transform( mathOp );
2058 Node* ofcheck = _gvn.transform( new OverflowOp(arg1, arg2) );
2059 return inline_math_mathExact(operation, ofcheck);
2060 }
2061
2062 bool LibraryCallKit::inline_math_addExactI(bool is_increment) {
2063 return inline_math_overflow<OverflowAddINode>(argument(0), is_increment ? intcon(1) : argument(1));
2064 }
2065
2066 bool LibraryCallKit::inline_math_addExactL(bool is_increment) {
2067 return inline_math_overflow<OverflowAddLNode>(argument(0), is_increment ? longcon(1) : argument(2));
2068 }
2069
2070 bool LibraryCallKit::inline_math_subtractExactI(bool is_decrement) {
2071 return inline_math_overflow<OverflowSubINode>(argument(0), is_decrement ? intcon(1) : argument(1));
2072 }
2073
2074 bool LibraryCallKit::inline_math_subtractExactL(bool is_decrement) {
2075 return inline_math_overflow<OverflowSubLNode>(argument(0), is_decrement ? longcon(1) : argument(2));
2076 }
2077
2078 bool LibraryCallKit::inline_math_negateExactI() {
2079 return inline_math_overflow<OverflowSubINode>(intcon(0), argument(0));
2080 }
2081
2082 bool LibraryCallKit::inline_math_negateExactL() {
2083 return inline_math_overflow<OverflowSubLNode>(longcon(0), argument(0));
2084 }
2085
2086 bool LibraryCallKit::inline_math_multiplyExactI() {
2087 return inline_math_overflow<OverflowMulINode>(argument(0), argument(1));
2088 }
2089
2090 bool LibraryCallKit::inline_math_multiplyExactL() {
2091 return inline_math_overflow<OverflowMulLNode>(argument(0), argument(2));
2092 }
2093
2094 bool LibraryCallKit::inline_math_multiplyHigh() {
2095 set_result(_gvn.transform(new MulHiLNode(argument(0), argument(2))));
2096 return true;
2097 }
2098
2099 bool LibraryCallKit::inline_math_unsignedMultiplyHigh() {
2100 set_result(_gvn.transform(new UMulHiLNode(argument(0), argument(2))));
2101 return true;
2102 }
2103
2104 inline int
2105 LibraryCallKit::classify_unsafe_addr(Node* &base, Node* &offset, BasicType type) {
2106 const TypePtr* base_type = TypePtr::NULL_PTR;
2107 if (base != nullptr) base_type = _gvn.type(base)->isa_ptr();
2108 if (base_type == nullptr) {
2109 // Unknown type.
2110 return Type::AnyPtr;
2111 } else if (_gvn.type(base->uncast()) == TypePtr::NULL_PTR) {
2112 // Since this is a null+long form, we have to switch to a rawptr.
2113 base = _gvn.transform(new CastX2PNode(offset));
2114 offset = MakeConX(0);
2115 return Type::RawPtr;
2116 } else if (base_type->base() == Type::RawPtr) {
2117 return Type::RawPtr;
2118 } else if (base_type->isa_oopptr()) {
2119 // Base is never null => always a heap address.
2120 if (!TypePtr::NULL_PTR->higher_equal(base_type)) {
2121 return Type::OopPtr;
2122 }
2123 // Offset is small => always a heap address.
2124 const TypeX* offset_type = _gvn.type(offset)->isa_intptr_t();
2125 if (offset_type != nullptr &&
2126 base_type->offset() == 0 && // (should always be?)
2127 offset_type->_lo >= 0 &&
2128 !MacroAssembler::needs_explicit_null_check(offset_type->_hi)) {
2129 return Type::OopPtr;
2130 } else if (type == T_OBJECT) {
2131 // off heap access to an oop doesn't make any sense. Has to be on
2132 // heap.
2133 return Type::OopPtr;
2134 }
2135 // Otherwise, it might either be oop+off or null+addr.
2136 return Type::AnyPtr;
2137 } else {
2138 // No information:
2139 return Type::AnyPtr;
2140 }
2141 }
2142
2143 Node* LibraryCallKit::make_unsafe_address(Node*& base, Node* offset, BasicType type, bool can_cast) {
2144 Node* uncasted_base = base;
2145 int kind = classify_unsafe_addr(uncasted_base, offset, type);
2146 if (kind == Type::RawPtr) {
2147 return off_heap_plus_addr(uncasted_base, offset);
2148 } else if (kind == Type::AnyPtr) {
2149 assert(base == uncasted_base, "unexpected base change");
2150 if (can_cast) {
2151 if (!_gvn.type(base)->speculative_maybe_null() &&
2152 !too_many_traps(Deoptimization::Reason_speculate_null_check)) {
2153 // According to profiling, this access is always on
2154 // heap. Casting the base to not null and thus avoiding membars
2155 // around the access should allow better optimizations
2156 Node* null_ctl = top();
2157 base = null_check_oop(base, &null_ctl, true, true, true);
2158 assert(null_ctl->is_top(), "no null control here");
2159 return basic_plus_adr(base, offset);
2160 } else if (_gvn.type(base)->speculative_always_null() &&
2161 !too_many_traps(Deoptimization::Reason_speculate_null_assert)) {
2162 // According to profiling, this access is always off
2163 // heap.
2164 base = null_assert(base);
2165 Node* raw_base = _gvn.transform(new CastX2PNode(offset));
2166 offset = MakeConX(0);
2167 return off_heap_plus_addr(raw_base, offset);
2168 }
2169 }
2170 // We don't know if it's an on heap or off heap access. Fall back
2171 // to raw memory access.
2172 Node* raw = _gvn.transform(new CheckCastPPNode(control(), base, TypeRawPtr::BOTTOM));
2173 return off_heap_plus_addr(raw, offset);
2174 } else {
2175 assert(base == uncasted_base, "unexpected base change");
2176 // We know it's an on heap access so base can't be null
2177 if (TypePtr::NULL_PTR->higher_equal(_gvn.type(base))) {
2178 base = must_be_not_null(base, true);
2179 }
2180 return basic_plus_adr(base, offset);
2181 }
2182 }
2183
2184 //--------------------------inline_number_methods-----------------------------
2185 // inline int Integer.numberOfLeadingZeros(int)
2186 // inline int Long.numberOfLeadingZeros(long)
2187 //
2188 // inline int Integer.numberOfTrailingZeros(int)
2189 // inline int Long.numberOfTrailingZeros(long)
2190 //
2191 // inline int Integer.bitCount(int)
2192 // inline int Long.bitCount(long)
2193 //
2194 // inline char Character.reverseBytes(char)
2195 // inline short Short.reverseBytes(short)
2196 // inline int Integer.reverseBytes(int)
2197 // inline long Long.reverseBytes(long)
2198 bool LibraryCallKit::inline_number_methods(vmIntrinsics::ID id) {
2199 Node* arg = argument(0);
2200 Node* n = nullptr;
2201 switch (id) {
2202 case vmIntrinsics::_numberOfLeadingZeros_i: n = new CountLeadingZerosINode( arg); break;
2203 case vmIntrinsics::_numberOfLeadingZeros_l: n = new CountLeadingZerosLNode( arg); break;
2204 case vmIntrinsics::_numberOfTrailingZeros_i: n = new CountTrailingZerosINode(arg); break;
2205 case vmIntrinsics::_numberOfTrailingZeros_l: n = new CountTrailingZerosLNode(arg); break;
2206 case vmIntrinsics::_bitCount_i: n = new PopCountINode( arg); break;
2207 case vmIntrinsics::_bitCount_l: n = new PopCountLNode( arg); break;
2208 case vmIntrinsics::_reverseBytes_c: n = new ReverseBytesUSNode( arg); break;
2209 case vmIntrinsics::_reverseBytes_s: n = new ReverseBytesSNode( arg); break;
2210 case vmIntrinsics::_reverseBytes_i: n = new ReverseBytesINode( arg); break;
2211 case vmIntrinsics::_reverseBytes_l: n = new ReverseBytesLNode( arg); break;
2212 case vmIntrinsics::_reverse_i: n = new ReverseINode( arg); break;
2213 case vmIntrinsics::_reverse_l: n = new ReverseLNode( arg); break;
2214 default: fatal_unexpected_iid(id); break;
2215 }
2216 set_result(_gvn.transform(n));
2217 return true;
2218 }
2219
2220 //--------------------------inline_bitshuffle_methods-----------------------------
2221 // inline int Integer.compress(int, int)
2222 // inline int Integer.expand(int, int)
2223 // inline long Long.compress(long, long)
2224 // inline long Long.expand(long, long)
2225 bool LibraryCallKit::inline_bitshuffle_methods(vmIntrinsics::ID id) {
2226 Node* n = nullptr;
2227 switch (id) {
2228 case vmIntrinsics::_compress_i: n = new CompressBitsNode(argument(0), argument(1), TypeInt::INT); break;
2229 case vmIntrinsics::_expand_i: n = new ExpandBitsNode(argument(0), argument(1), TypeInt::INT); break;
2230 case vmIntrinsics::_compress_l: n = new CompressBitsNode(argument(0), argument(2), TypeLong::LONG); break;
2231 case vmIntrinsics::_expand_l: n = new ExpandBitsNode(argument(0), argument(2), TypeLong::LONG); break;
2232 default: fatal_unexpected_iid(id); break;
2233 }
2234 set_result(_gvn.transform(n));
2235 return true;
2236 }
2237
2238 //--------------------------inline_number_methods-----------------------------
2239 // inline int Integer.compareUnsigned(int, int)
2240 // inline int Long.compareUnsigned(long, long)
2241 bool LibraryCallKit::inline_compare_unsigned(vmIntrinsics::ID id) {
2242 Node* arg1 = argument(0);
2243 Node* arg2 = (id == vmIntrinsics::_compareUnsigned_l) ? argument(2) : argument(1);
2244 Node* n = nullptr;
2245 switch (id) {
2246 case vmIntrinsics::_compareUnsigned_i: n = new CmpU3Node(arg1, arg2); break;
2247 case vmIntrinsics::_compareUnsigned_l: n = new CmpUL3Node(arg1, arg2); break;
2248 default: fatal_unexpected_iid(id); break;
2249 }
2250 set_result(_gvn.transform(n));
2251 return true;
2252 }
2253
2254 //--------------------------inline_unsigned_divmod_methods-----------------------------
2255 // inline int Integer.divideUnsigned(int, int)
2256 // inline int Integer.remainderUnsigned(int, int)
2257 // inline long Long.divideUnsigned(long, long)
2258 // inline long Long.remainderUnsigned(long, long)
2259 bool LibraryCallKit::inline_divmod_methods(vmIntrinsics::ID id) {
2260 Node* n = nullptr;
2261 switch (id) {
2262 case vmIntrinsics::_divideUnsigned_i: {
2263 zero_check_int(argument(1));
2264 // Compile-time detect of null-exception
2265 if (stopped()) {
2266 return true; // keep the graph constructed so far
2267 }
2268 n = new UDivINode(control(), argument(0), argument(1));
2269 break;
2270 }
2271 case vmIntrinsics::_divideUnsigned_l: {
2272 zero_check_long(argument(2));
2273 // Compile-time detect of null-exception
2274 if (stopped()) {
2275 return true; // keep the graph constructed so far
2276 }
2277 n = new UDivLNode(control(), argument(0), argument(2));
2278 break;
2279 }
2280 case vmIntrinsics::_remainderUnsigned_i: {
2281 zero_check_int(argument(1));
2282 // Compile-time detect of null-exception
2283 if (stopped()) {
2284 return true; // keep the graph constructed so far
2285 }
2286 n = new UModINode(control(), argument(0), argument(1));
2287 break;
2288 }
2289 case vmIntrinsics::_remainderUnsigned_l: {
2290 zero_check_long(argument(2));
2291 // Compile-time detect of null-exception
2292 if (stopped()) {
2293 return true; // keep the graph constructed so far
2294 }
2295 n = new UModLNode(control(), argument(0), argument(2));
2296 break;
2297 }
2298 default: fatal_unexpected_iid(id); break;
2299 }
2300 set_result(_gvn.transform(n));
2301 return true;
2302 }
2303
2304 //----------------------------inline_unsafe_access----------------------------
2305
2306 const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type) {
2307 // Attempt to infer a sharper value type from the offset and base type.
2308 ciKlass* sharpened_klass = nullptr;
2309 bool null_free = false;
2310
2311 // See if it is an instance field, with an object type.
2312 if (alias_type->field() != nullptr) {
2313 if (alias_type->field()->type()->is_klass()) {
2314 sharpened_klass = alias_type->field()->type()->as_klass();
2315 null_free = alias_type->field()->is_null_free();
2316 }
2317 }
2318
2319 const TypeOopPtr* result = nullptr;
2320 // See if it is a narrow oop array.
2321 if (adr_type->isa_aryptr()) {
2322 if (adr_type->offset() >= refArrayOopDesc::base_offset_in_bytes()) {
2323 const TypeOopPtr* elem_type = adr_type->is_aryptr()->elem()->make_oopptr();
2324 null_free = adr_type->is_aryptr()->is_null_free();
2325 if (elem_type != nullptr && elem_type->is_loaded()) {
2326 // Sharpen the value type.
2327 result = elem_type;
2328 }
2329 }
2330 }
2331
2332 // The sharpened class might be unloaded if there is no class loader
2333 // contraint in place.
2334 if (result == nullptr && sharpened_klass != nullptr && sharpened_klass->is_loaded()) {
2335 // Sharpen the value type.
2336 result = TypeOopPtr::make_from_klass(sharpened_klass);
2337 if (null_free) {
2338 result = result->join_speculative(TypePtr::NOTNULL)->is_oopptr();
2339 }
2340 }
2341 if (result != nullptr) {
2342 #ifndef PRODUCT
2343 if (C->print_intrinsics() || C->print_inlining()) {
2344 tty->print(" from base type: "); adr_type->dump(); tty->cr();
2345 tty->print(" sharpened value: "); result->dump(); tty->cr();
2346 }
2347 #endif
2348 }
2349 return result;
2350 }
2351
2352 DecoratorSet LibraryCallKit::mo_decorator_for_access_kind(AccessKind kind) {
2353 switch (kind) {
2354 case Relaxed:
2355 return MO_UNORDERED;
2356 case Opaque:
2357 return MO_RELAXED;
2358 case Acquire:
2359 return MO_ACQUIRE;
2360 case Release:
2361 return MO_RELEASE;
2362 case Volatile:
2363 return MO_SEQ_CST;
2364 default:
2365 ShouldNotReachHere();
2366 return 0;
2367 }
2368 }
2369
2370 bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, const AccessKind kind, const bool unaligned) {
2371 if (callee()->is_static()) return false; // caller must have the capability!
2372 DecoratorSet decorators = C2_UNSAFE_ACCESS;
2373 guarantee(!is_store || kind != Acquire, "Acquire accesses can be produced only for loads");
2374 guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
2375 assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
2376
2377 if (is_reference_type(type)) {
2378 decorators |= ON_UNKNOWN_OOP_REF;
2379 }
2380
2381 if (unaligned) {
2382 decorators |= C2_UNALIGNED;
2383 }
2384
2385 #ifndef PRODUCT
2386 {
2387 ResourceMark rm;
2388 // Check the signatures.
2389 ciSignature* sig = callee()->signature();
2390 #ifdef ASSERT
2391 if (!is_store) {
2392 // Object getReference(Object base, int/long offset), etc.
2393 BasicType rtype = sig->return_type()->basic_type();
2394 assert(rtype == type, "getter must return the expected value");
2395 assert(sig->count() == 2, "oop getter has 2 arguments");
2396 assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2397 assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2398 } else {
2399 // void putReference(Object base, int/long offset, Object x), etc.
2400 assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
2401 assert(sig->count() == 3, "oop putter has 3 arguments");
2402 assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
2403 assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
2404 BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
2405 assert(vtype == type, "putter must accept the expected value");
2406 }
2407 #endif // ASSERT
2408 }
2409 #endif //PRODUCT
2410
2411 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
2412
2413 Node* receiver = argument(0); // type: oop
2414
2415 // Build address expression.
2416 Node* heap_base_oop = top();
2417
2418 // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2419 Node* base = argument(1); // type: oop
2420 // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2421 Node* offset = argument(2); // type: long
2422 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2423 // to be plain byte offsets, which are also the same as those accepted
2424 // by oopDesc::field_addr.
2425 assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2426 "fieldOffset must be byte-scaled");
2427
2428 if (base->is_InlineType()) {
2429 assert(!is_store, "InlineTypeNodes are non-larval value objects");
2430 InlineTypeNode* vt = base->as_InlineType();
2431 if (offset->is_Con()) {
2432 long off = find_long_con(offset, 0);
2433 ciInlineKlass* vk = vt->type()->inline_klass();
2434 if ((long)(int)off != off || !vk->contains_field_offset(off)) {
2435 return false;
2436 }
2437
2438 ciField* field = vk->get_non_flat_field_by_offset(off);
2439 if (field != nullptr) {
2440 BasicType bt = type2field[field->type()->basic_type()];
2441 if (bt == T_ARRAY || bt == T_NARROWOOP) {
2442 bt = T_OBJECT;
2443 }
2444 if (bt == type && !field->is_flat()) {
2445 Node* value = vt->field_value_by_offset(off, false);
2446 const Type* value_type = _gvn.type(value);
2447 if (value->is_InlineType()) {
2448 value = value->as_InlineType()->adjust_scalarization_depth(this);
2449 } else if (value_type->is_inlinetypeptr()) {
2450 value = InlineTypeNode::make_from_oop(this, value, value_type->inline_klass());
2451 }
2452 set_result(value);
2453 return true;
2454 }
2455 }
2456 }
2457 {
2458 // Re-execute the unsafe access if allocation triggers deoptimization.
2459 PreserveReexecuteState preexecs(this);
2460 jvms()->set_should_reexecute(true);
2461 vt = vt->buffer(this);
2462 }
2463 base = vt->get_oop();
2464 }
2465
2466 // 32-bit machines ignore the high half!
2467 offset = ConvL2X(offset);
2468
2469 // Save state and restore on bailout
2470 SavedState old_state(this);
2471
2472 Node* adr = make_unsafe_address(base, offset, type, kind == Relaxed);
2473 assert(!stopped(), "Inlining of unsafe access failed: address construction stopped unexpectedly");
2474
2475 if (_gvn.type(base->uncast())->isa_ptr() == TypePtr::NULL_PTR) {
2476 if (type != T_OBJECT) {
2477 decorators |= IN_NATIVE; // off-heap primitive access
2478 } else {
2479 return false; // off-heap oop accesses are not supported
2480 }
2481 } else {
2482 heap_base_oop = base; // on-heap or mixed access
2483 }
2484
2485 // Can base be null? Otherwise, always on-heap access.
2486 bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(base));
2487
2488 if (!can_access_non_heap) {
2489 decorators |= IN_HEAP;
2490 }
2491
2492 Node* val = is_store ? argument(4) : nullptr;
2493
2494 const TypePtr* adr_type = _gvn.type(adr)->isa_ptr();
2495 if (adr_type == TypePtr::NULL_PTR) {
2496 return false; // off-heap access with zero address
2497 }
2498
2499 // Try to categorize the address.
2500 Compile::AliasType* alias_type = C->alias_type(adr_type);
2501 assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2502
2503 if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2504 alias_type->adr_type() == TypeAryPtr::RANGE) {
2505 return false; // not supported
2506 }
2507
2508 bool mismatched = false;
2509 BasicType bt = T_ILLEGAL;
2510 ciField* field = nullptr;
2511 if (adr_type->isa_instptr()) {
2512 const TypeInstPtr* instptr = adr_type->is_instptr();
2513 ciInstanceKlass* k = instptr->instance_klass();
2514 int off = instptr->offset();
2515 if (instptr->const_oop() != nullptr &&
2516 k == ciEnv::current()->Class_klass() &&
2517 instptr->offset() >= (k->size_helper() * wordSize)) {
2518 k = instptr->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
2519 field = k->get_field_by_offset(off, true);
2520 } else {
2521 field = k->get_non_flat_field_by_offset(off);
2522 }
2523 if (field != nullptr) {
2524 bt = type2field[field->type()->basic_type()];
2525 }
2526 if (bt != alias_type->basic_type()) {
2527 // Type mismatch. Is it an access to a nested flat field?
2528 field = k->get_field_by_offset(off, false);
2529 if (field != nullptr) {
2530 bt = type2field[field->type()->basic_type()];
2531 }
2532 }
2533 assert(bt == alias_type->basic_type(), "should match");
2534 } else {
2535 bt = alias_type->basic_type();
2536 }
2537
2538 if (bt != T_ILLEGAL) {
2539 assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
2540 if (bt == T_BYTE && adr_type->isa_aryptr()) {
2541 // Alias type doesn't differentiate between byte[] and boolean[]).
2542 // Use address type to get the element type.
2543 bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2544 }
2545 if (is_reference_type(bt, true)) {
2546 // accessing an array field with getReference is not a mismatch
2547 bt = T_OBJECT;
2548 }
2549 if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2550 // Don't intrinsify mismatched object accesses
2551 return false;
2552 }
2553 mismatched = (bt != type);
2554 } else if (alias_type->adr_type()->isa_oopptr()) {
2555 mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
2556 }
2557
2558 old_state.discard();
2559 assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2560
2561 if (mismatched) {
2562 decorators |= C2_MISMATCHED;
2563 }
2564
2565 // First guess at the value type.
2566 const Type *value_type = Type::get_const_basic_type(type);
2567
2568 // Figure out the memory ordering.
2569 decorators |= mo_decorator_for_access_kind(kind);
2570
2571 if (!is_store) {
2572 if (type == T_OBJECT) {
2573 const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2574 if (tjp != nullptr) {
2575 value_type = tjp;
2576 }
2577 }
2578 }
2579
2580 receiver = null_check(receiver);
2581 if (stopped()) {
2582 return true;
2583 }
2584 // Heap pointers get a null-check from the interpreter,
2585 // as a courtesy. However, this is not guaranteed by Unsafe,
2586 // and it is not possible to fully distinguish unintended nulls
2587 // from intended ones in this API.
2588
2589 if (!is_store) {
2590 Node* p = nullptr;
2591 // Try to constant fold a load from a constant field
2592
2593 if (heap_base_oop != top() && field != nullptr && field->is_constant() && !field->is_flat() && !mismatched) {
2594 // final or stable field
2595 p = make_constant_from_field(field, heap_base_oop);
2596 }
2597
2598 if (p == nullptr) { // Could not constant fold the load
2599 p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);
2600 const TypeOopPtr* ptr = value_type->make_oopptr();
2601 if (ptr != nullptr && ptr->is_inlinetypeptr()) {
2602 // Load a non-flattened inline type from memory
2603 p = InlineTypeNode::make_from_oop(this, p, ptr->inline_klass());
2604 }
2605 // Normalize the value returned by getBoolean in the following cases
2606 if (type == T_BOOLEAN &&
2607 (mismatched ||
2608 heap_base_oop == top() || // - heap_base_oop is null or
2609 (can_access_non_heap && field == nullptr)) // - heap_base_oop is potentially null
2610 // and the unsafe access is made to large offset
2611 // (i.e., larger than the maximum offset necessary for any
2612 // field access)
2613 ) {
2614 IdealKit ideal = IdealKit(this);
2615 #define __ ideal.
2616 IdealVariable normalized_result(ideal);
2617 __ declarations_done();
2618 __ set(normalized_result, p);
2619 __ if_then(p, BoolTest::ne, ideal.ConI(0));
2620 __ set(normalized_result, ideal.ConI(1));
2621 ideal.end_if();
2622 final_sync(ideal);
2623 p = __ value(normalized_result);
2624 #undef __
2625 }
2626 }
2627 if (type == T_ADDRESS) {
2628 p = gvn().transform(new CastP2XNode(nullptr, p));
2629 p = ConvX2UL(p);
2630 }
2631 // The load node has the control of the preceding MemBarCPUOrder. All
2632 // following nodes will have the control of the MemBarCPUOrder inserted at
2633 // the end of this method. So, pushing the load onto the stack at a later
2634 // point is fine.
2635 set_result(p);
2636 } else {
2637 if (bt == T_ADDRESS) {
2638 // Repackage the long as a pointer.
2639 val = ConvL2X(val);
2640 val = gvn().transform(new CastX2PNode(val));
2641 }
2642 access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators);
2643 }
2644
2645 return true;
2646 }
2647
2648 bool LibraryCallKit::inline_unsafe_flat_access(bool is_store, AccessKind kind) {
2649 #ifdef ASSERT
2650 {
2651 ResourceMark rm;
2652 // Check the signatures.
2653 ciSignature* sig = callee()->signature();
2654 assert(sig->type_at(0)->basic_type() == T_OBJECT, "base should be object, but is %s", type2name(sig->type_at(0)->basic_type()));
2655 assert(sig->type_at(1)->basic_type() == T_LONG, "offset should be long, but is %s", type2name(sig->type_at(1)->basic_type()));
2656 assert(sig->type_at(2)->basic_type() == T_INT, "layout kind should be int, but is %s", type2name(sig->type_at(3)->basic_type()));
2657 assert(sig->type_at(3)->basic_type() == T_OBJECT, "value klass should be object, but is %s", type2name(sig->type_at(4)->basic_type()));
2658 if (is_store) {
2659 assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value, but returns %s", type2name(sig->return_type()->basic_type()));
2660 assert(sig->count() == 5, "flat putter should have 5 arguments, but has %d", sig->count());
2661 assert(sig->type_at(4)->basic_type() == T_OBJECT, "put value should be object, but is %s", type2name(sig->type_at(5)->basic_type()));
2662 } else {
2663 assert(sig->return_type()->basic_type() == T_OBJECT, "getter must return an object, but returns %s", type2name(sig->return_type()->basic_type()));
2664 assert(sig->count() == 4, "flat getter should have 4 arguments, but has %d", sig->count());
2665 }
2666 }
2667 #endif // ASSERT
2668
2669 assert(kind == Relaxed, "Only plain accesses for now");
2670 if (callee()->is_static()) {
2671 // caller must have the capability!
2672 return false;
2673 }
2674 C->set_has_unsafe_access(true);
2675
2676 const TypeInstPtr* value_klass_node = _gvn.type(argument(5))->isa_instptr();
2677 if (value_klass_node == nullptr || value_klass_node->const_oop() == nullptr) {
2678 // parameter valueType is not a constant
2679 return false;
2680 }
2681 ciType* mirror_type = value_klass_node->const_oop()->as_instance()->java_mirror_type();
2682 if (!mirror_type->is_inlinetype()) {
2683 // Dead code
2684 return false;
2685 }
2686 ciInlineKlass* value_klass = mirror_type->as_inline_klass();
2687
2688 const TypeInt* layout_type = _gvn.type(argument(4))->isa_int();
2689 if (layout_type == nullptr || !layout_type->is_con()) {
2690 // parameter layoutKind is not a constant
2691 return false;
2692 }
2693 assert(layout_type->get_con() >= static_cast<int>(LayoutKind::REFERENCE) &&
2694 layout_type->get_con() < static_cast<int>(LayoutKind::UNKNOWN),
2695 "invalid layoutKind %d", layout_type->get_con());
2696 LayoutKind layout = static_cast<LayoutKind>(layout_type->get_con());
2697 assert(layout == LayoutKind::REFERENCE || layout == LayoutKind::NULL_FREE_NON_ATOMIC_FLAT ||
2698 layout == LayoutKind::NULL_FREE_ATOMIC_FLAT || layout == LayoutKind::NULLABLE_ATOMIC_FLAT,
2699 "unexpected layoutKind %d", layout_type->get_con());
2700
2701 null_check(argument(0));
2702 if (stopped()) {
2703 return true;
2704 }
2705
2706 Node* base = must_be_not_null(argument(1), true);
2707 Node* offset = argument(2);
2708 const Type* base_type = _gvn.type(base);
2709
2710 Node* ptr;
2711 bool immutable_memory = false;
2712 DecoratorSet decorators = C2_UNSAFE_ACCESS | IN_HEAP | MO_UNORDERED;
2713 if (base_type->isa_instptr()) {
2714 const TypeLong* offset_type = _gvn.type(offset)->isa_long();
2715 if (offset_type == nullptr || !offset_type->is_con()) {
2716 // Offset into a non-array should be a constant
2717 decorators |= C2_MISMATCHED;
2718 } else {
2719 int offset_con = checked_cast<int>(offset_type->get_con());
2720 ciInstanceKlass* base_klass = base_type->is_instptr()->instance_klass();
2721 ciField* field = base_klass->get_non_flat_field_by_offset(offset_con);
2722 if (field == nullptr) {
2723 assert(!base_klass->is_final(), "non-existence field at offset %d of class %s", offset_con, base_klass->name()->as_utf8());
2724 decorators |= C2_MISMATCHED;
2725 } else {
2726 assert(field->type() == value_klass, "field at offset %d of %s is of type %s, but valueType is %s",
2727 offset_con, base_klass->name()->as_utf8(), field->type()->name(), value_klass->name()->as_utf8());
2728 immutable_memory = field->is_strict() && field->is_final();
2729
2730 if (base->is_InlineType()) {
2731 assert(!is_store, "Cannot store into a non-larval value object");
2732 set_result(base->as_InlineType()->field_value_by_offset(offset_con, false));
2733 return true;
2734 }
2735 }
2736 }
2737
2738 if (base->is_InlineType()) {
2739 assert(!is_store, "Cannot store into a non-larval value object");
2740 base = base->as_InlineType()->buffer(this, true);
2741 }
2742 ptr = basic_plus_adr(base, ConvL2X(offset));
2743 } else if (base_type->isa_aryptr()) {
2744 decorators |= IS_ARRAY;
2745 if (layout == LayoutKind::REFERENCE) {
2746 if (!base_type->is_aryptr()->is_not_flat()) {
2747 const TypeAryPtr* array_type = base_type->is_aryptr()->cast_to_not_flat();
2748 // TODO 8350865 This should be a CheckCastPP, can we add a test?
2749 Node* new_base = _gvn.transform(new CastPPNode(control(), base, array_type, ConstraintCastNode::DependencyType::NonFloatingNarrowing));
2750 replace_in_map(base, new_base);
2751 base = new_base;
2752 }
2753 ptr = basic_plus_adr(base, ConvL2X(offset));
2754 } else {
2755 if (UseArrayFlattening) {
2756 // Flat array must have an exact type
2757 bool is_null_free = !LayoutKindHelper::is_nullable_flat(layout);
2758 bool is_atomic = LayoutKindHelper::is_atomic_flat(layout);
2759 Node* new_base = cast_to_flat_array_exact(base, value_klass, is_null_free, is_atomic);
2760 replace_in_map(base, new_base);
2761 base = new_base;
2762 ptr = basic_plus_adr(base, ConvL2X(offset));
2763 const TypeAryPtr* ptr_type = _gvn.type(ptr)->is_aryptr();
2764 if (ptr_type->field_offset().get() != 0) {
2765 // TODO 8350865 This should be a CheckCastPP, can we add a test?
2766 ptr = _gvn.transform(new CastPPNode(control(), ptr, ptr_type->with_field_offset(0), ConstraintCastNode::DependencyType::NonFloatingNarrowing));
2767 }
2768 } else {
2769 uncommon_trap(Deoptimization::Reason_intrinsic,
2770 Deoptimization::Action_none);
2771 return true;
2772 }
2773 }
2774 } else {
2775 decorators |= C2_MISMATCHED;
2776 ptr = basic_plus_adr(base, ConvL2X(offset));
2777 }
2778
2779 if (is_store) {
2780 Node* value = argument(6);
2781 const Type* value_type = _gvn.type(value);
2782 if (!value_type->is_inlinetypeptr()) {
2783 value_type = Type::get_const_type(value_klass)->filter_speculative(value_type);
2784 Node* new_value = _gvn.transform(new CheckCastPPNode(control(), value, value_type, ConstraintCastNode::DependencyType::NonFloatingNarrowing));
2785 new_value = InlineTypeNode::make_from_oop(this, new_value, value_klass);
2786 replace_in_map(value, new_value);
2787 value = new_value;
2788 }
2789
2790 assert(value_type->inline_klass() == value_klass, "value is of type %s while valueType is %s", value_type->inline_klass()->name()->as_utf8(), value_klass->name()->as_utf8());
2791 if (layout == LayoutKind::REFERENCE) {
2792 const TypePtr* ptr_type = (decorators & C2_MISMATCHED) != 0 ? TypeRawPtr::BOTTOM : _gvn.type(ptr)->is_ptr();
2793 access_store_at(base, ptr, ptr_type, value, value_type, T_OBJECT, decorators);
2794 } else {
2795 bool atomic = LayoutKindHelper::is_atomic_flat(layout);
2796 bool null_free = !LayoutKindHelper::is_nullable_flat(layout);
2797 value->as_InlineType()->store_flat(this, base, ptr, atomic, immutable_memory, null_free, decorators);
2798 }
2799
2800 return true;
2801 } else {
2802 decorators |= (C2_CONTROL_DEPENDENT_LOAD | C2_UNKNOWN_CONTROL_LOAD);
2803 InlineTypeNode* result;
2804 if (layout == LayoutKind::REFERENCE) {
2805 const TypePtr* ptr_type = (decorators & C2_MISMATCHED) != 0 ? TypeRawPtr::BOTTOM : _gvn.type(ptr)->is_ptr();
2806 Node* oop = access_load_at(base, ptr, ptr_type, Type::get_const_type(value_klass), T_OBJECT, decorators);
2807 result = InlineTypeNode::make_from_oop(this, oop, value_klass);
2808 } else {
2809 bool atomic = LayoutKindHelper::is_atomic_flat(layout);
2810 bool null_free = !LayoutKindHelper::is_nullable_flat(layout);
2811 result = InlineTypeNode::make_from_flat(this, value_klass, base, ptr, atomic, immutable_memory, null_free, decorators);
2812 }
2813
2814 set_result(result);
2815 return true;
2816 }
2817 }
2818
2819 //----------------------------inline_unsafe_load_store----------------------------
2820 // This method serves a couple of different customers (depending on LoadStoreKind):
2821 //
2822 // LS_cmp_swap:
2823 //
2824 // boolean compareAndSetReference(Object o, long offset, Object expected, Object x);
2825 // boolean compareAndSetInt( Object o, long offset, int expected, int x);
2826 // boolean compareAndSetLong( Object o, long offset, long expected, long x);
2827 //
2828 // LS_cmp_swap_weak:
2829 //
2830 // boolean weakCompareAndSetReference( Object o, long offset, Object expected, Object x);
2831 // boolean weakCompareAndSetReferencePlain( Object o, long offset, Object expected, Object x);
2832 // boolean weakCompareAndSetReferenceAcquire(Object o, long offset, Object expected, Object x);
2833 // boolean weakCompareAndSetReferenceRelease(Object o, long offset, Object expected, Object x);
2834 //
2835 // boolean weakCompareAndSetInt( Object o, long offset, int expected, int x);
2836 // boolean weakCompareAndSetIntPlain( Object o, long offset, int expected, int x);
2837 // boolean weakCompareAndSetIntAcquire( Object o, long offset, int expected, int x);
2838 // boolean weakCompareAndSetIntRelease( Object o, long offset, int expected, int x);
2839 //
2840 // boolean weakCompareAndSetLong( Object o, long offset, long expected, long x);
2841 // boolean weakCompareAndSetLongPlain( Object o, long offset, long expected, long x);
2842 // boolean weakCompareAndSetLongAcquire( Object o, long offset, long expected, long x);
2843 // boolean weakCompareAndSetLongRelease( Object o, long offset, long expected, long x);
2844 //
2845 // LS_cmp_exchange:
2846 //
2847 // Object compareAndExchangeReferenceVolatile(Object o, long offset, Object expected, Object x);
2848 // Object compareAndExchangeReferenceAcquire( Object o, long offset, Object expected, Object x);
2849 // Object compareAndExchangeReferenceRelease( Object o, long offset, Object expected, Object x);
2850 //
2851 // Object compareAndExchangeIntVolatile( Object o, long offset, Object expected, Object x);
2852 // Object compareAndExchangeIntAcquire( Object o, long offset, Object expected, Object x);
2853 // Object compareAndExchangeIntRelease( Object o, long offset, Object expected, Object x);
2854 //
2855 // Object compareAndExchangeLongVolatile( Object o, long offset, Object expected, Object x);
2856 // Object compareAndExchangeLongAcquire( Object o, long offset, Object expected, Object x);
2857 // Object compareAndExchangeLongRelease( Object o, long offset, Object expected, Object x);
2858 //
2859 // LS_get_add:
2860 //
2861 // int getAndAddInt( Object o, long offset, int delta)
2862 // long getAndAddLong(Object o, long offset, long delta)
2863 //
2864 // LS_get_set:
2865 //
2866 // int getAndSet(Object o, long offset, int newValue)
2867 // long getAndSet(Object o, long offset, long newValue)
2868 // Object getAndSet(Object o, long offset, Object newValue)
2869 //
2870 bool LibraryCallKit::inline_unsafe_load_store(const BasicType type, const LoadStoreKind kind, const AccessKind access_kind) {
2871 // This basic scheme here is the same as inline_unsafe_access, but
2872 // differs in enough details that combining them would make the code
2873 // overly confusing. (This is a true fact! I originally combined
2874 // them, but even I was confused by it!) As much code/comments as
2875 // possible are retained from inline_unsafe_access though to make
2876 // the correspondences clearer. - dl
2877
2878 if (callee()->is_static()) return false; // caller must have the capability!
2879
2880 DecoratorSet decorators = C2_UNSAFE_ACCESS;
2881 decorators |= mo_decorator_for_access_kind(access_kind);
2882
2883 #ifndef PRODUCT
2884 BasicType rtype;
2885 {
2886 ResourceMark rm;
2887 // Check the signatures.
2888 ciSignature* sig = callee()->signature();
2889 rtype = sig->return_type()->basic_type();
2890 switch(kind) {
2891 case LS_get_add:
2892 case LS_get_set: {
2893 // Check the signatures.
2894 #ifdef ASSERT
2895 assert(rtype == type, "get and set must return the expected type");
2896 assert(sig->count() == 3, "get and set has 3 arguments");
2897 assert(sig->type_at(0)->basic_type() == T_OBJECT, "get and set base is object");
2898 assert(sig->type_at(1)->basic_type() == T_LONG, "get and set offset is long");
2899 assert(sig->type_at(2)->basic_type() == type, "get and set must take expected type as new value/delta");
2900 assert(access_kind == Volatile, "mo is not passed to intrinsic nodes in current implementation");
2901 #endif // ASSERT
2902 break;
2903 }
2904 case LS_cmp_swap:
2905 case LS_cmp_swap_weak: {
2906 // Check the signatures.
2907 #ifdef ASSERT
2908 assert(rtype == T_BOOLEAN, "CAS must return boolean");
2909 assert(sig->count() == 4, "CAS has 4 arguments");
2910 assert(sig->type_at(0)->basic_type() == T_OBJECT, "CAS base is object");
2911 assert(sig->type_at(1)->basic_type() == T_LONG, "CAS offset is long");
2912 #endif // ASSERT
2913 break;
2914 }
2915 case LS_cmp_exchange: {
2916 // Check the signatures.
2917 #ifdef ASSERT
2918 assert(rtype == type, "CAS must return the expected type");
2919 assert(sig->count() == 4, "CAS has 4 arguments");
2920 assert(sig->type_at(0)->basic_type() == T_OBJECT, "CAS base is object");
2921 assert(sig->type_at(1)->basic_type() == T_LONG, "CAS offset is long");
2922 #endif // ASSERT
2923 break;
2924 }
2925 default:
2926 ShouldNotReachHere();
2927 }
2928 }
2929 #endif //PRODUCT
2930
2931 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
2932
2933 // Get arguments:
2934 Node* receiver = nullptr;
2935 Node* base = nullptr;
2936 Node* offset = nullptr;
2937 Node* oldval = nullptr;
2938 Node* newval = nullptr;
2939 switch(kind) {
2940 case LS_cmp_swap:
2941 case LS_cmp_swap_weak:
2942 case LS_cmp_exchange: {
2943 const bool two_slot_type = type2size[type] == 2;
2944 receiver = argument(0); // type: oop
2945 base = argument(1); // type: oop
2946 offset = argument(2); // type: long
2947 oldval = argument(4); // type: oop, int, or long
2948 newval = argument(two_slot_type ? 6 : 5); // type: oop, int, or long
2949 break;
2950 }
2951 case LS_get_add:
2952 case LS_get_set: {
2953 receiver = argument(0); // type: oop
2954 base = argument(1); // type: oop
2955 offset = argument(2); // type: long
2956 oldval = nullptr;
2957 newval = argument(4); // type: oop, int, or long
2958 break;
2959 }
2960 default:
2961 ShouldNotReachHere();
2962 }
2963
2964 // Build field offset expression.
2965 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2966 // to be plain byte offsets, which are also the same as those accepted
2967 // by oopDesc::field_addr.
2968 assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
2969 // 32-bit machines ignore the high half of long offsets
2970 offset = ConvL2X(offset);
2971 // Save state and restore on bailout
2972 SavedState old_state(this);
2973 Node* adr = make_unsafe_address(base, offset,type, false);
2974 const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2975
2976 Compile::AliasType* alias_type = C->alias_type(adr_type);
2977 BasicType bt = alias_type->basic_type();
2978 if (bt != T_ILLEGAL &&
2979 (is_reference_type(bt) != (type == T_OBJECT))) {
2980 // Don't intrinsify mismatched object accesses.
2981 return false;
2982 }
2983
2984 old_state.discard();
2985
2986 // For CAS, unlike inline_unsafe_access, there seems no point in
2987 // trying to refine types. Just use the coarse types here.
2988 assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2989 const Type *value_type = Type::get_const_basic_type(type);
2990
2991 switch (kind) {
2992 case LS_get_set:
2993 case LS_cmp_exchange: {
2994 if (type == T_OBJECT) {
2995 const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2996 if (tjp != nullptr) {
2997 value_type = tjp;
2998 }
2999 }
3000 break;
3001 }
3002 case LS_cmp_swap:
3003 case LS_cmp_swap_weak:
3004 case LS_get_add:
3005 break;
3006 default:
3007 ShouldNotReachHere();
3008 }
3009
3010 // Null check receiver.
3011 receiver = null_check(receiver);
3012 if (stopped()) {
3013 return true;
3014 }
3015
3016 int alias_idx = C->get_alias_index(adr_type);
3017
3018 if (is_reference_type(type)) {
3019 decorators |= IN_HEAP | ON_UNKNOWN_OOP_REF;
3020
3021 if (oldval != nullptr && oldval->is_InlineType()) {
3022 // Re-execute the unsafe access if allocation triggers deoptimization.
3023 PreserveReexecuteState preexecs(this);
3024 jvms()->set_should_reexecute(true);
3025 oldval = oldval->as_InlineType()->buffer(this)->get_oop();
3026 }
3027 if (newval != nullptr && newval->is_InlineType()) {
3028 // Re-execute the unsafe access if allocation triggers deoptimization.
3029 PreserveReexecuteState preexecs(this);
3030 jvms()->set_should_reexecute(true);
3031 newval = newval->as_InlineType()->buffer(this)->get_oop();
3032 }
3033
3034 // Transformation of a value which could be null pointer (CastPP #null)
3035 // could be delayed during Parse (for example, in adjust_map_after_if()).
3036 // Execute transformation here to avoid barrier generation in such case.
3037 if (_gvn.type(newval) == TypePtr::NULL_PTR)
3038 newval = _gvn.makecon(TypePtr::NULL_PTR);
3039
3040 if (oldval != nullptr && _gvn.type(oldval) == TypePtr::NULL_PTR) {
3041 // Refine the value to a null constant, when it is known to be null
3042 oldval = _gvn.makecon(TypePtr::NULL_PTR);
3043 }
3044 }
3045
3046 Node* result = nullptr;
3047 switch (kind) {
3048 case LS_cmp_exchange: {
3049 result = access_atomic_cmpxchg_val_at(base, adr, adr_type, alias_idx,
3050 oldval, newval, value_type, type, decorators);
3051 break;
3052 }
3053 case LS_cmp_swap_weak:
3054 decorators |= C2_WEAK_CMPXCHG;
3055 case LS_cmp_swap: {
3056 result = access_atomic_cmpxchg_bool_at(base, adr, adr_type, alias_idx,
3057 oldval, newval, value_type, type, decorators);
3058 break;
3059 }
3060 case LS_get_set: {
3061 result = access_atomic_xchg_at(base, adr, adr_type, alias_idx,
3062 newval, value_type, type, decorators);
3063 break;
3064 }
3065 case LS_get_add: {
3066 result = access_atomic_add_at(base, adr, adr_type, alias_idx,
3067 newval, value_type, type, decorators);
3068 break;
3069 }
3070 default:
3071 ShouldNotReachHere();
3072 }
3073
3074 assert(type2size[result->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
3075 set_result(result);
3076 return true;
3077 }
3078
3079 bool LibraryCallKit::inline_unsafe_fence(vmIntrinsics::ID id) {
3080 // Regardless of form, don't allow previous ld/st to move down,
3081 // then issue acquire, release, or volatile mem_bar.
3082 insert_mem_bar(Op_MemBarCPUOrder);
3083 switch(id) {
3084 case vmIntrinsics::_loadFence:
3085 insert_mem_bar(Op_LoadFence);
3086 return true;
3087 case vmIntrinsics::_storeFence:
3088 insert_mem_bar(Op_StoreFence);
3089 return true;
3090 case vmIntrinsics::_storeStoreFence:
3091 insert_mem_bar(Op_StoreStoreFence);
3092 return true;
3093 case vmIntrinsics::_fullFence:
3094 insert_mem_bar(Op_MemBarFull);
3095 return true;
3096 default:
3097 fatal_unexpected_iid(id);
3098 return false;
3099 }
3100 }
3101
3102 // private native int arrayInstanceBaseOffset0(Object[] array);
3103 bool LibraryCallKit::inline_arrayInstanceBaseOffset() {
3104 Node* array = argument(1);
3105 Node* klass_node = load_object_klass(array);
3106
3107 jint layout_con = Klass::_lh_neutral_value;
3108 Node* layout_val = get_layout_helper(klass_node, layout_con);
3109 int layout_is_con = (layout_val == nullptr);
3110
3111 Node* header_size = nullptr;
3112 if (layout_is_con) {
3113 int hsize = Klass::layout_helper_header_size(layout_con);
3114 header_size = intcon(hsize);
3115 } else {
3116 Node* hss = intcon(Klass::_lh_header_size_shift);
3117 Node* hsm = intcon(Klass::_lh_header_size_mask);
3118 header_size = _gvn.transform(new URShiftINode(layout_val, hss));
3119 header_size = _gvn.transform(new AndINode(header_size, hsm));
3120 }
3121 set_result(header_size);
3122 return true;
3123 }
3124
3125 // private native int arrayInstanceIndexScale0(Object[] array);
3126 bool LibraryCallKit::inline_arrayInstanceIndexScale() {
3127 Node* array = argument(1);
3128 Node* klass_node = load_object_klass(array);
3129
3130 jint layout_con = Klass::_lh_neutral_value;
3131 Node* layout_val = get_layout_helper(klass_node, layout_con);
3132 int layout_is_con = (layout_val == nullptr);
3133
3134 Node* element_size = nullptr;
3135 if (layout_is_con) {
3136 int log_element_size = Klass::layout_helper_log2_element_size(layout_con);
3137 int elem_size = 1 << log_element_size;
3138 element_size = intcon(elem_size);
3139 } else {
3140 Node* ess = intcon(Klass::_lh_log2_element_size_shift);
3141 Node* esm = intcon(Klass::_lh_log2_element_size_mask);
3142 Node* log_element_size = _gvn.transform(new URShiftINode(layout_val, ess));
3143 log_element_size = _gvn.transform(new AndINode(log_element_size, esm));
3144 element_size = _gvn.transform(new LShiftINode(intcon(1), log_element_size));
3145 }
3146 set_result(element_size);
3147 return true;
3148 }
3149
3150 // private native int arrayLayout0(Object[] array);
3151 bool LibraryCallKit::inline_arrayLayout() {
3152 RegionNode* region = new RegionNode(2);
3153 Node* phi = new PhiNode(region, TypeInt::POS);
3154
3155 Node* array = argument(1);
3156 Node* klass_node = load_object_klass(array);
3157 generate_refArray_guard(klass_node, region);
3158 if (region->req() == 3) {
3159 phi->add_req(intcon((jint)LayoutKind::REFERENCE));
3160 }
3161
3162 int layout_kind_offset = in_bytes(FlatArrayKlass::layout_kind_offset());
3163 Node* layout_kind_addr = basic_plus_adr(top(), klass_node, layout_kind_offset);
3164 Node* layout_kind = make_load(nullptr, layout_kind_addr, TypeInt::POS, T_INT, MemNode::unordered);
3165
3166 region->init_req(1, control());
3167 phi->init_req(1, layout_kind);
3168
3169 set_control(_gvn.transform(region));
3170 set_result(_gvn.transform(phi));
3171 return true;
3172 }
3173
3174 // private native int[] getFieldMap0(Class <?> c);
3175 // int offset = c._klass._acmp_maps_offset;
3176 // return (int[])c.obj_field(offset);
3177 bool LibraryCallKit::inline_getFieldMap() {
3178 Node* mirror = argument(1);
3179 Node* klass = load_klass_from_mirror(mirror, false, nullptr, 0);
3180
3181 int field_map_offset_offset = in_bytes(InstanceKlass::acmp_maps_offset_offset());
3182 Node* field_map_offset_addr = basic_plus_adr(top(), klass, field_map_offset_offset);
3183 Node* field_map_offset = make_load(nullptr, field_map_offset_addr, TypeInt::INT, T_INT, MemNode::unordered);
3184 field_map_offset = _gvn.transform(ConvI2L(field_map_offset));
3185
3186 Node* map_addr = basic_plus_adr(mirror, field_map_offset);
3187 const TypeAryPtr* val_type = TypeAryPtr::INTS->cast_to_ptr_type(TypePtr::NotNull)->with_offset(0);
3188 // TODO 8350865 Remove this
3189 val_type = val_type->cast_to_not_flat(true)->cast_to_not_null_free(true);
3190 Node* map = access_load_at(mirror, map_addr, TypeAryPtr::INTS, val_type, T_ARRAY, IN_HEAP | MO_UNORDERED);
3191
3192 set_result(map);
3193 return true;
3194 }
3195
3196 bool LibraryCallKit::inline_onspinwait() {
3197 insert_mem_bar(Op_OnSpinWait);
3198 return true;
3199 }
3200
3201 bool LibraryCallKit::klass_needs_init_guard(Node* kls) {
3202 if (!kls->is_Con()) {
3203 return true;
3204 }
3205 const TypeInstKlassPtr* klsptr = kls->bottom_type()->isa_instklassptr();
3206 if (klsptr == nullptr) {
3207 return true;
3208 }
3209 ciInstanceKlass* ik = klsptr->instance_klass();
3210 // don't need a guard for a klass that is already initialized
3211 return !ik->is_initialized();
3212 }
3213
3214 //----------------------------inline_unsafe_writeback0-------------------------
3215 // public native void Unsafe.writeback0(long address)
3216 bool LibraryCallKit::inline_unsafe_writeback0() {
3217 if (!Matcher::has_match_rule(Op_CacheWB)) {
3218 return false;
3219 }
3220 #ifndef PRODUCT
3221 assert(Matcher::has_match_rule(Op_CacheWBPreSync), "found match rule for CacheWB but not CacheWBPreSync");
3222 assert(Matcher::has_match_rule(Op_CacheWBPostSync), "found match rule for CacheWB but not CacheWBPostSync");
3223 ciSignature* sig = callee()->signature();
3224 assert(sig->type_at(0)->basic_type() == T_LONG, "Unsafe_writeback0 address is long!");
3225 #endif
3226 null_check_receiver(); // null-check, then ignore
3227 Node *addr = argument(1);
3228 addr = new CastX2PNode(addr);
3229 addr = _gvn.transform(addr);
3230 Node *flush = new CacheWBNode(control(), memory(TypeRawPtr::BOTTOM), addr);
3231 flush = _gvn.transform(flush);
3232 set_memory(flush, TypeRawPtr::BOTTOM);
3233 return true;
3234 }
3235
3236 //----------------------------inline_unsafe_writeback0-------------------------
3237 // public native void Unsafe.writeback0(long address)
3238 bool LibraryCallKit::inline_unsafe_writebackSync0(bool is_pre) {
3239 if (is_pre && !Matcher::has_match_rule(Op_CacheWBPreSync)) {
3240 return false;
3241 }
3242 if (!is_pre && !Matcher::has_match_rule(Op_CacheWBPostSync)) {
3243 return false;
3244 }
3245 #ifndef PRODUCT
3246 assert(Matcher::has_match_rule(Op_CacheWB),
3247 (is_pre ? "found match rule for CacheWBPreSync but not CacheWB"
3248 : "found match rule for CacheWBPostSync but not CacheWB"));
3249
3250 #endif
3251 null_check_receiver(); // null-check, then ignore
3252 Node *sync;
3253 if (is_pre) {
3254 sync = new CacheWBPreSyncNode(control(), memory(TypeRawPtr::BOTTOM));
3255 } else {
3256 sync = new CacheWBPostSyncNode(control(), memory(TypeRawPtr::BOTTOM));
3257 }
3258 sync = _gvn.transform(sync);
3259 set_memory(sync, TypeRawPtr::BOTTOM);
3260 return true;
3261 }
3262
3263 //----------------------------inline_unsafe_allocate---------------------------
3264 // public native Object Unsafe.allocateInstance(Class<?> cls);
3265 bool LibraryCallKit::inline_unsafe_allocate() {
3266
3267 #if INCLUDE_JVMTI
3268 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
3269 return false;
3270 }
3271 #endif //INCLUDE_JVMTI
3272
3273 if (callee()->is_static()) return false; // caller must have the capability!
3274
3275 null_check_receiver(); // null-check, then ignore
3276 Node* cls = null_check(argument(1));
3277 if (stopped()) return true;
3278
3279 Node* kls = load_klass_from_mirror(cls, false, nullptr, 0);
3280 kls = null_check(kls);
3281 if (stopped()) return true; // argument was like int.class
3282
3283 #if INCLUDE_JVMTI
3284 // Don't try to access new allocated obj in the intrinsic.
3285 // It causes perfomance issues even when jvmti event VmObjectAlloc is disabled.
3286 // Deoptimize and allocate in interpreter instead.
3287 Node* addr = makecon(TypeRawPtr::make((address) &JvmtiExport::_should_notify_object_alloc));
3288 Node* should_post_vm_object_alloc = make_load(this->control(), addr, TypeInt::INT, T_INT, MemNode::unordered);
3289 Node* chk = _gvn.transform(new CmpINode(should_post_vm_object_alloc, intcon(0)));
3290 Node* tst = _gvn.transform(new BoolNode(chk, BoolTest::eq));
3291 {
3292 BuildCutout unless(this, tst, PROB_MAX);
3293 uncommon_trap(Deoptimization::Reason_intrinsic,
3294 Deoptimization::Action_make_not_entrant);
3295 }
3296 if (stopped()) {
3297 return true;
3298 }
3299 #endif //INCLUDE_JVMTI
3300
3301 Node* test = nullptr;
3302 if (LibraryCallKit::klass_needs_init_guard(kls)) {
3303 // Note: The argument might still be an illegal value like
3304 // Serializable.class or Object[].class. The runtime will handle it.
3305 // But we must make an explicit check for initialization.
3306 Node* insp = off_heap_plus_addr(kls, in_bytes(InstanceKlass::init_state_offset()));
3307 // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
3308 // can generate code to load it as unsigned byte.
3309 Node* inst = make_load(nullptr, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::acquire);
3310 Node* bits = intcon(InstanceKlass::fully_initialized);
3311 test = _gvn.transform(new SubINode(inst, bits));
3312 // The 'test' is non-zero if we need to take a slow path.
3313 }
3314 Node* obj = new_instance(kls, test);
3315 set_result(obj);
3316 return true;
3317 }
3318
3319 //------------------------inline_native_time_funcs--------------
3320 // inline code for System.currentTimeMillis() and System.nanoTime()
3321 // these have the same type and signature
3322 bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
3323 const TypeFunc* tf = OptoRuntime::void_long_Type();
3324 const TypePtr* no_memory_effects = nullptr;
3325 Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
3326 Node* value = _gvn.transform(new ProjNode(time, TypeFunc::Parms+0));
3327 #ifdef ASSERT
3328 Node* value_top = _gvn.transform(new ProjNode(time, TypeFunc::Parms+1));
3329 assert(value_top == top(), "second value must be top");
3330 #endif
3331 set_result(value);
3332 return true;
3333 }
3334
3335 //--------------------inline_native_vthread_start_transition--------------------
3336 // inline void startTransition(boolean is_mount);
3337 // inline void startFinalTransition();
3338 // Pseudocode of implementation:
3339 //
3340 // java_lang_Thread::set_is_in_vthread_transition(vthread, true);
3341 // carrier->set_is_in_vthread_transition(true);
3342 // OrderAccess::storeload();
3343 // int disable_requests = java_lang_Thread::vthread_transition_disable_count(vthread)
3344 // + global_vthread_transition_disable_count();
3345 // if (disable_requests > 0) {
3346 // slow path: runtime call
3347 // }
3348 bool LibraryCallKit::inline_native_vthread_start_transition(address funcAddr, const char* funcName, bool is_final_transition) {
3349 Node* vt_oop = must_be_not_null(argument(0), true); // VirtualThread this argument
3350 IdealKit ideal(this);
3351
3352 Node* thread = ideal.thread();
3353 Node* jt_addr = off_heap_plus_addr(thread, in_bytes(JavaThread::is_in_vthread_transition_offset()));
3354 Node* vt_addr = basic_plus_adr(vt_oop, java_lang_Thread::is_in_vthread_transition_offset());
3355 access_store_at(nullptr, jt_addr, _gvn.type(jt_addr)->is_ptr(), ideal.ConI(1), TypeInt::BOOL, T_BOOLEAN, IN_NATIVE | MO_UNORDERED);
3356 access_store_at(nullptr, vt_addr, _gvn.type(vt_addr)->is_ptr(), ideal.ConI(1), TypeInt::BOOL, T_BOOLEAN, IN_NATIVE | MO_UNORDERED);
3357 insert_mem_bar(Op_MemBarStoreLoad);
3358 ideal.sync_kit(this);
3359
3360 Node* global_disable_addr = makecon(TypeRawPtr::make((address)MountUnmountDisabler::global_vthread_transition_disable_count_address()));
3361 Node* global_disable = ideal.load(ideal.ctrl(), global_disable_addr, TypeInt::INT, T_INT, Compile::AliasIdxRaw, true /*require_atomic_access*/);
3362 Node* vt_disable_addr = basic_plus_adr(vt_oop, java_lang_Thread::vthread_transition_disable_count_offset());
3363 const TypePtr* vt_disable_addr_t = _gvn.type(vt_disable_addr)->is_ptr();
3364 Node* vt_disable = ideal.load(ideal.ctrl(), vt_disable_addr, TypeInt::INT, T_INT, C->get_alias_index(vt_disable_addr_t), true /*require_atomic_access*/);
3365 Node* disabled = _gvn.transform(new AddINode(global_disable, vt_disable));
3366
3367 ideal.if_then(disabled, BoolTest::ne, ideal.ConI(0)); {
3368 sync_kit(ideal);
3369 Node* is_mount = is_final_transition ? ideal.ConI(0) : argument(1);
3370 const TypeFunc* tf = OptoRuntime::vthread_transition_Type();
3371 make_runtime_call(RC_NO_LEAF, tf, funcAddr, funcName, TypePtr::BOTTOM, vt_oop, is_mount);
3372 ideal.sync_kit(this);
3373 }
3374 ideal.end_if();
3375
3376 final_sync(ideal);
3377 return true;
3378 }
3379
3380 bool LibraryCallKit::inline_native_vthread_end_transition(address funcAddr, const char* funcName, bool is_first_transition) {
3381 Node* vt_oop = must_be_not_null(argument(0), true); // VirtualThread this argument
3382 IdealKit ideal(this);
3383
3384 Node* _notify_jvmti_addr = makecon(TypeRawPtr::make((address)MountUnmountDisabler::notify_jvmti_events_address()));
3385 Node* _notify_jvmti = ideal.load(ideal.ctrl(), _notify_jvmti_addr, TypeInt::BOOL, T_BOOLEAN, Compile::AliasIdxRaw);
3386
3387 ideal.if_then(_notify_jvmti, BoolTest::eq, ideal.ConI(1)); {
3388 sync_kit(ideal);
3389 Node* is_mount = is_first_transition ? ideal.ConI(1) : argument(1);
3390 const TypeFunc* tf = OptoRuntime::vthread_transition_Type();
3391 make_runtime_call(RC_NO_LEAF, tf, funcAddr, funcName, TypePtr::BOTTOM, vt_oop, is_mount);
3392 ideal.sync_kit(this);
3393 } ideal.else_(); {
3394 Node* thread = ideal.thread();
3395 Node* jt_addr = off_heap_plus_addr(thread, in_bytes(JavaThread::is_in_vthread_transition_offset()));
3396 Node* vt_addr = basic_plus_adr(vt_oop, java_lang_Thread::is_in_vthread_transition_offset());
3397
3398 sync_kit(ideal);
3399 access_store_at(nullptr, jt_addr, _gvn.type(jt_addr)->is_ptr(), ideal.ConI(0), TypeInt::BOOL, T_BOOLEAN, IN_NATIVE | MO_UNORDERED);
3400 access_store_at(nullptr, vt_addr, _gvn.type(vt_addr)->is_ptr(), ideal.ConI(0), TypeInt::BOOL, T_BOOLEAN, IN_NATIVE | MO_UNORDERED);
3401 ideal.sync_kit(this);
3402 } ideal.end_if();
3403
3404 final_sync(ideal);
3405 return true;
3406 }
3407
3408 #if INCLUDE_JVMTI
3409
3410 // Always update the is_disable_suspend bit.
3411 bool LibraryCallKit::inline_native_notify_jvmti_sync() {
3412 if (!DoJVMTIVirtualThreadTransitions) {
3413 return true;
3414 }
3415 IdealKit ideal(this);
3416
3417 {
3418 // unconditionally update the is_disable_suspend bit in current JavaThread
3419 Node* thread = ideal.thread();
3420 Node* arg = argument(0); // argument for notification
3421 Node* addr = off_heap_plus_addr(thread, in_bytes(JavaThread::is_disable_suspend_offset()));
3422 const TypePtr *addr_type = _gvn.type(addr)->isa_ptr();
3423
3424 sync_kit(ideal);
3425 access_store_at(nullptr, addr, addr_type, arg, _gvn.type(arg), T_BOOLEAN, IN_NATIVE | MO_UNORDERED);
3426 ideal.sync_kit(this);
3427 }
3428 final_sync(ideal);
3429
3430 return true;
3431 }
3432
3433 #endif // INCLUDE_JVMTI
3434
3435 #ifdef JFR_HAVE_INTRINSICS
3436
3437 /**
3438 * if oop->klass != null
3439 * // normal class
3440 * epoch = _epoch_state ? 2 : 1
3441 * if oop->klass->trace_id & ((epoch << META_SHIFT) | epoch)) != epoch {
3442 * ... // enter slow path when the klass is first recorded or the epoch of JFR shifts
3443 * }
3444 * id = oop->klass->trace_id >> TRACE_ID_SHIFT // normal class path
3445 * else
3446 * // primitive class
3447 * if oop->array_klass != null
3448 * id = (oop->array_klass->trace_id >> TRACE_ID_SHIFT) + 1 // primitive class path
3449 * else
3450 * id = LAST_TYPE_ID + 1 // void class path
3451 * if (!signaled)
3452 * signaled = true
3453 */
3454 bool LibraryCallKit::inline_native_classID() {
3455 Node* cls = argument(0);
3456
3457 IdealKit ideal(this);
3458 #define __ ideal.
3459 IdealVariable result(ideal); __ declarations_done();
3460 Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, immutable_memory(),
3461 basic_plus_adr(cls, java_lang_Class::klass_offset()),
3462 TypeRawPtr::BOTTOM, TypeInstKlassPtr::OBJECT_OR_NULL));
3463
3464
3465 __ if_then(kls, BoolTest::ne, null()); {
3466 Node* kls_trace_id_addr = basic_plus_adr(kls, in_bytes(KLASS_TRACE_ID_OFFSET));
3467 Node* kls_trace_id_raw = ideal.load(ideal.ctrl(), kls_trace_id_addr,TypeLong::LONG, T_LONG, Compile::AliasIdxRaw);
3468
3469 Node* epoch_address = makecon(TypeRawPtr::make(JfrIntrinsicSupport::epoch_address()));
3470 Node* epoch = ideal.load(ideal.ctrl(), epoch_address, TypeInt::BOOL, T_BOOLEAN, Compile::AliasIdxRaw);
3471 epoch = _gvn.transform(new LShiftLNode(longcon(1), epoch));
3472 Node* mask = _gvn.transform(new LShiftLNode(epoch, intcon(META_SHIFT)));
3473 mask = _gvn.transform(new OrLNode(mask, epoch));
3474 Node* kls_trace_id_raw_and_mask = _gvn.transform(new AndLNode(kls_trace_id_raw, mask));
3475
3476 float unlikely = PROB_UNLIKELY(0.999);
3477 __ if_then(kls_trace_id_raw_and_mask, BoolTest::ne, epoch, unlikely); {
3478 sync_kit(ideal);
3479 make_runtime_call(RC_LEAF,
3480 OptoRuntime::class_id_load_barrier_Type(),
3481 CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::load_barrier),
3482 "class id load barrier",
3483 TypePtr::BOTTOM,
3484 kls);
3485 ideal.sync_kit(this);
3486 } __ end_if();
3487
3488 ideal.set(result, _gvn.transform(new URShiftLNode(kls_trace_id_raw, ideal.ConI(TRACE_ID_SHIFT))));
3489 } __ else_(); {
3490 Node* array_kls = _gvn.transform(LoadKlassNode::make(_gvn, immutable_memory(),
3491 basic_plus_adr(cls, java_lang_Class::array_klass_offset()),
3492 TypeRawPtr::BOTTOM, TypeInstKlassPtr::OBJECT_OR_NULL));
3493 __ if_then(array_kls, BoolTest::ne, null()); {
3494 Node* array_kls_trace_id_addr = basic_plus_adr(array_kls, in_bytes(KLASS_TRACE_ID_OFFSET));
3495 Node* array_kls_trace_id_raw = ideal.load(ideal.ctrl(), array_kls_trace_id_addr, TypeLong::LONG, T_LONG, Compile::AliasIdxRaw);
3496 Node* array_kls_trace_id = _gvn.transform(new URShiftLNode(array_kls_trace_id_raw, ideal.ConI(TRACE_ID_SHIFT)));
3497 ideal.set(result, _gvn.transform(new AddLNode(array_kls_trace_id, longcon(1))));
3498 } __ else_(); {
3499 // void class case
3500 ideal.set(result, longcon(LAST_TYPE_ID + 1));
3501 } __ end_if();
3502
3503 Node* signaled_flag_address = makecon(TypeRawPtr::make(JfrIntrinsicSupport::signal_address()));
3504 Node* signaled = ideal.load(ideal.ctrl(), signaled_flag_address, TypeInt::BOOL, T_BOOLEAN, Compile::AliasIdxRaw, true, MemNode::acquire);
3505 __ if_then(signaled, BoolTest::ne, ideal.ConI(1)); {
3506 ideal.store(ideal.ctrl(), signaled_flag_address, ideal.ConI(1), T_BOOLEAN, Compile::AliasIdxRaw, MemNode::release, true);
3507 } __ end_if();
3508 } __ end_if();
3509
3510 final_sync(ideal);
3511 set_result(ideal.value(result));
3512 #undef __
3513 return true;
3514 }
3515
3516 //------------------------inline_native_jvm_commit------------------
3517 bool LibraryCallKit::inline_native_jvm_commit() {
3518 enum { _true_path = 1, _false_path = 2, PATH_LIMIT };
3519
3520 // Save input memory and i_o state.
3521 Node* input_memory_state = reset_memory();
3522 set_all_memory(input_memory_state);
3523 Node* input_io_state = i_o();
3524
3525 // TLS.
3526 Node* tls_ptr = _gvn.transform(new ThreadLocalNode());
3527 // Jfr java buffer.
3528 Node* java_buffer_offset = _gvn.transform(AddPNode::make_off_heap(tls_ptr, MakeConX(in_bytes(JAVA_BUFFER_OFFSET_JFR))));
3529 Node* java_buffer = _gvn.transform(new LoadPNode(control(), input_memory_state, java_buffer_offset, TypePtr::BOTTOM, TypeRawPtr::NOTNULL, MemNode::unordered));
3530 Node* java_buffer_pos_offset = _gvn.transform(AddPNode::make_off_heap(java_buffer, MakeConX(in_bytes(JFR_BUFFER_POS_OFFSET))));
3531
3532 // Load the current value of the notified field in the JfrThreadLocal.
3533 Node* notified_offset = off_heap_plus_addr(tls_ptr, in_bytes(NOTIFY_OFFSET_JFR));
3534 Node* notified = make_load(control(), notified_offset, TypeInt::BOOL, T_BOOLEAN, MemNode::unordered);
3535
3536 // Test for notification.
3537 Node* notified_cmp = _gvn.transform(new CmpINode(notified, _gvn.intcon(1)));
3538 Node* test_notified = _gvn.transform(new BoolNode(notified_cmp, BoolTest::eq));
3539 IfNode* iff_notified = create_and_map_if(control(), test_notified, PROB_MIN, COUNT_UNKNOWN);
3540
3541 // True branch, is notified.
3542 Node* is_notified = _gvn.transform(new IfTrueNode(iff_notified));
3543 set_control(is_notified);
3544
3545 // Reset notified state.
3546 store_to_memory(control(), notified_offset, _gvn.intcon(0), T_BOOLEAN, MemNode::unordered);
3547 Node* notified_reset_memory = reset_memory();
3548
3549 // Iff notified, the return address of the commit method is the current position of the backing java buffer. This is used to reset the event writer.
3550 Node* current_pos_X = _gvn.transform(new LoadXNode(control(), input_memory_state, java_buffer_pos_offset, TypeRawPtr::NOTNULL, TypeX_X, MemNode::unordered));
3551 // Convert the machine-word to a long.
3552 Node* current_pos = ConvX2L(current_pos_X);
3553
3554 // False branch, not notified.
3555 Node* not_notified = _gvn.transform(new IfFalseNode(iff_notified));
3556 set_control(not_notified);
3557 set_all_memory(input_memory_state);
3558
3559 // Arg is the next position as a long.
3560 Node* arg = argument(0);
3561 // Convert long to machine-word.
3562 Node* next_pos_X = ConvL2X(arg);
3563
3564 // Store the next_position to the underlying jfr java buffer.
3565 store_to_memory(control(), java_buffer_pos_offset, next_pos_X, LP64_ONLY(T_LONG) NOT_LP64(T_INT), MemNode::release);
3566
3567 Node* commit_memory = reset_memory();
3568 set_all_memory(commit_memory);
3569
3570 // Now load the flags from off the java buffer and decide if the buffer is a lease. If so, it needs to be returned post-commit.
3571 Node* java_buffer_flags_offset = _gvn.transform(AddPNode::make_off_heap(java_buffer, MakeConX(in_bytes(JFR_BUFFER_FLAGS_OFFSET))));
3572 Node* flags = make_load(control(), java_buffer_flags_offset, TypeInt::UBYTE, T_BYTE, MemNode::unordered);
3573 Node* lease_constant = _gvn.intcon(4);
3574
3575 // And flags with lease constant.
3576 Node* lease = _gvn.transform(new AndINode(flags, lease_constant));
3577
3578 // Branch on lease to conditionalize returning the leased java buffer.
3579 Node* lease_cmp = _gvn.transform(new CmpINode(lease, lease_constant));
3580 Node* test_lease = _gvn.transform(new BoolNode(lease_cmp, BoolTest::eq));
3581 IfNode* iff_lease = create_and_map_if(control(), test_lease, PROB_MIN, COUNT_UNKNOWN);
3582
3583 // False branch, not a lease.
3584 Node* not_lease = _gvn.transform(new IfFalseNode(iff_lease));
3585
3586 // True branch, is lease.
3587 Node* is_lease = _gvn.transform(new IfTrueNode(iff_lease));
3588 set_control(is_lease);
3589
3590 // Make a runtime call, which can safepoint, to return the leased buffer. This updates both the JfrThreadLocal and the Java event writer oop.
3591 Node* call_return_lease = make_runtime_call(RC_NO_LEAF,
3592 OptoRuntime::void_void_Type(),
3593 SharedRuntime::jfr_return_lease(),
3594 "return_lease", TypePtr::BOTTOM);
3595 Node* call_return_lease_control = _gvn.transform(new ProjNode(call_return_lease, TypeFunc::Control));
3596
3597 RegionNode* lease_compare_rgn = new RegionNode(PATH_LIMIT);
3598 record_for_igvn(lease_compare_rgn);
3599 PhiNode* lease_compare_mem = new PhiNode(lease_compare_rgn, Type::MEMORY, TypePtr::BOTTOM);
3600 record_for_igvn(lease_compare_mem);
3601 PhiNode* lease_compare_io = new PhiNode(lease_compare_rgn, Type::ABIO);
3602 record_for_igvn(lease_compare_io);
3603 PhiNode* lease_result_value = new PhiNode(lease_compare_rgn, TypeLong::LONG);
3604 record_for_igvn(lease_result_value);
3605
3606 // Update control and phi nodes.
3607 lease_compare_rgn->init_req(_true_path, call_return_lease_control);
3608 lease_compare_rgn->init_req(_false_path, not_lease);
3609
3610 lease_compare_mem->init_req(_true_path, reset_memory());
3611 lease_compare_mem->init_req(_false_path, commit_memory);
3612
3613 lease_compare_io->init_req(_true_path, i_o());
3614 lease_compare_io->init_req(_false_path, input_io_state);
3615
3616 lease_result_value->init_req(_true_path, _gvn.longcon(0)); // if the lease was returned, return 0L.
3617 lease_result_value->init_req(_false_path, arg); // if not lease, return new updated position.
3618
3619 RegionNode* result_rgn = new RegionNode(PATH_LIMIT);
3620 PhiNode* result_mem = new PhiNode(result_rgn, Type::MEMORY, TypePtr::BOTTOM);
3621 PhiNode* result_io = new PhiNode(result_rgn, Type::ABIO);
3622 PhiNode* result_value = new PhiNode(result_rgn, TypeLong::LONG);
3623
3624 // Update control and phi nodes.
3625 result_rgn->init_req(_true_path, is_notified);
3626 result_rgn->init_req(_false_path, _gvn.transform(lease_compare_rgn));
3627
3628 result_mem->init_req(_true_path, notified_reset_memory);
3629 result_mem->init_req(_false_path, _gvn.transform(lease_compare_mem));
3630
3631 result_io->init_req(_true_path, input_io_state);
3632 result_io->init_req(_false_path, _gvn.transform(lease_compare_io));
3633
3634 result_value->init_req(_true_path, current_pos);
3635 result_value->init_req(_false_path, _gvn.transform(lease_result_value));
3636
3637 // Set output state.
3638 set_control(_gvn.transform(result_rgn));
3639 set_all_memory(_gvn.transform(result_mem));
3640 set_i_o(_gvn.transform(result_io));
3641 set_result(result_rgn, result_value);
3642 return true;
3643 }
3644
3645 /*
3646 * The intrinsic is a model of this pseudo-code:
3647 *
3648 * JfrThreadLocal* const tl = Thread::jfr_thread_local()
3649 * jobject h_event_writer = tl->java_event_writer();
3650 * if (h_event_writer == nullptr) {
3651 * return nullptr;
3652 * }
3653 * oop threadObj = Thread::threadObj();
3654 * oop vthread = java_lang_Thread::vthread(threadObj);
3655 * traceid tid;
3656 * bool pinVirtualThread;
3657 * bool excluded;
3658 * if (vthread != threadObj) { // i.e. current thread is virtual
3659 * tid = java_lang_Thread::tid(vthread);
3660 * u2 vthread_epoch_raw = java_lang_Thread::jfr_epoch(vthread);
3661 * pinVirtualThread = VMContinuations;
3662 * excluded = vthread_epoch_raw & excluded_mask;
3663 * if (!excluded) {
3664 * traceid current_epoch = JfrTraceIdEpoch::current_generation();
3665 * u2 vthread_epoch = vthread_epoch_raw & epoch_mask;
3666 * if (vthread_epoch != current_epoch) {
3667 * write_checkpoint();
3668 * }
3669 * }
3670 * } else {
3671 * tid = java_lang_Thread::tid(threadObj);
3672 * u2 thread_epoch_raw = java_lang_Thread::jfr_epoch(threadObj);
3673 * pinVirtualThread = false;
3674 * excluded = thread_epoch_raw & excluded_mask;
3675 * }
3676 * oop event_writer = JNIHandles::resolve_non_null(h_event_writer);
3677 * traceid tid_in_event_writer = getField(event_writer, "threadID");
3678 * if (tid_in_event_writer != tid) {
3679 * setField(event_writer, "pinVirtualThread", pinVirtualThread);
3680 * setField(event_writer, "excluded", excluded);
3681 * setField(event_writer, "threadID", tid);
3682 * }
3683 * return event_writer
3684 */
3685 bool LibraryCallKit::inline_native_getEventWriter() {
3686 enum { _true_path = 1, _false_path = 2, PATH_LIMIT };
3687
3688 // Save input memory and i_o state.
3689 Node* input_memory_state = reset_memory();
3690 set_all_memory(input_memory_state);
3691 Node* input_io_state = i_o();
3692
3693 // The most significant bit of the u2 is used to denote thread exclusion
3694 Node* excluded_shift = _gvn.intcon(15);
3695 Node* excluded_mask = _gvn.intcon(1 << 15);
3696 // The epoch generation is the range [1-32767]
3697 Node* epoch_mask = _gvn.intcon(32767);
3698
3699 // TLS
3700 Node* tls_ptr = _gvn.transform(new ThreadLocalNode());
3701
3702 // Load the address of java event writer jobject handle from the jfr_thread_local structure.
3703 Node* jobj_ptr = off_heap_plus_addr(tls_ptr, in_bytes(THREAD_LOCAL_WRITER_OFFSET_JFR));
3704
3705 // Load the eventwriter jobject handle.
3706 Node* jobj = make_load(control(), jobj_ptr, TypeRawPtr::BOTTOM, T_ADDRESS, MemNode::unordered);
3707
3708 // Null check the jobject handle.
3709 Node* jobj_cmp_null = _gvn.transform(new CmpPNode(jobj, null()));
3710 Node* test_jobj_not_equal_null = _gvn.transform(new BoolNode(jobj_cmp_null, BoolTest::ne));
3711 IfNode* iff_jobj_not_equal_null = create_and_map_if(control(), test_jobj_not_equal_null, PROB_MAX, COUNT_UNKNOWN);
3712
3713 // False path, jobj is null.
3714 Node* jobj_is_null = _gvn.transform(new IfFalseNode(iff_jobj_not_equal_null));
3715
3716 // True path, jobj is not null.
3717 Node* jobj_is_not_null = _gvn.transform(new IfTrueNode(iff_jobj_not_equal_null));
3718
3719 set_control(jobj_is_not_null);
3720
3721 // Load the threadObj for the CarrierThread.
3722 Node* threadObj = generate_current_thread(tls_ptr);
3723
3724 // Load the vthread.
3725 Node* vthread = generate_virtual_thread(tls_ptr);
3726
3727 // If vthread != threadObj, this is a virtual thread.
3728 Node* vthread_cmp_threadObj = _gvn.transform(new CmpPNode(vthread, threadObj));
3729 Node* test_vthread_not_equal_threadObj = _gvn.transform(new BoolNode(vthread_cmp_threadObj, BoolTest::ne));
3730 IfNode* iff_vthread_not_equal_threadObj =
3731 create_and_map_if(jobj_is_not_null, test_vthread_not_equal_threadObj, PROB_FAIR, COUNT_UNKNOWN);
3732
3733 // False branch, fallback to threadObj.
3734 Node* vthread_equal_threadObj = _gvn.transform(new IfFalseNode(iff_vthread_not_equal_threadObj));
3735 set_control(vthread_equal_threadObj);
3736
3737 // Load the tid field from the vthread object.
3738 Node* thread_obj_tid = load_field_from_object(threadObj, "tid", "J");
3739
3740 // Load the raw epoch value from the threadObj.
3741 Node* threadObj_epoch_offset = basic_plus_adr(threadObj, java_lang_Thread::jfr_epoch_offset());
3742 Node* threadObj_epoch_raw = access_load_at(threadObj, threadObj_epoch_offset,
3743 _gvn.type(threadObj_epoch_offset)->isa_ptr(),
3744 TypeInt::CHAR, T_CHAR,
3745 IN_HEAP | MO_UNORDERED | C2_MISMATCHED | C2_CONTROL_DEPENDENT_LOAD);
3746
3747 // Mask off the excluded information from the epoch.
3748 Node * threadObj_is_excluded = _gvn.transform(new AndINode(threadObj_epoch_raw, excluded_mask));
3749
3750 // True branch, this is a virtual thread.
3751 Node* vthread_not_equal_threadObj = _gvn.transform(new IfTrueNode(iff_vthread_not_equal_threadObj));
3752 set_control(vthread_not_equal_threadObj);
3753
3754 // Load the tid field from the vthread object.
3755 Node* vthread_tid = load_field_from_object(vthread, "tid", "J");
3756
3757 // Continuation support determines if a virtual thread should be pinned.
3758 Node* global_addr = makecon(TypeRawPtr::make((address)&VMContinuations));
3759 Node* continuation_support = make_load(control(), global_addr, TypeInt::BOOL, T_BOOLEAN, MemNode::unordered);
3760
3761 // Load the raw epoch value from the vthread.
3762 Node* vthread_epoch_offset = basic_plus_adr(vthread, java_lang_Thread::jfr_epoch_offset());
3763 Node* vthread_epoch_raw = access_load_at(vthread, vthread_epoch_offset, _gvn.type(vthread_epoch_offset)->is_ptr(),
3764 TypeInt::CHAR, T_CHAR,
3765 IN_HEAP | MO_UNORDERED | C2_MISMATCHED | C2_CONTROL_DEPENDENT_LOAD);
3766
3767 // Mask off the excluded information from the epoch.
3768 Node * vthread_is_excluded = _gvn.transform(new AndINode(vthread_epoch_raw, excluded_mask));
3769
3770 // Branch on excluded to conditionalize updating the epoch for the virtual thread.
3771 Node* is_excluded_cmp = _gvn.transform(new CmpINode(vthread_is_excluded, excluded_mask));
3772 Node* test_not_excluded = _gvn.transform(new BoolNode(is_excluded_cmp, BoolTest::ne));
3773 IfNode* iff_not_excluded = create_and_map_if(control(), test_not_excluded, PROB_MAX, COUNT_UNKNOWN);
3774
3775 // False branch, vthread is excluded, no need to write epoch info.
3776 Node* excluded = _gvn.transform(new IfFalseNode(iff_not_excluded));
3777
3778 // True branch, vthread is included, update epoch info.
3779 Node* included = _gvn.transform(new IfTrueNode(iff_not_excluded));
3780 set_control(included);
3781
3782 // Get epoch value.
3783 Node* epoch = _gvn.transform(new AndINode(vthread_epoch_raw, epoch_mask));
3784
3785 // Load the current epoch generation. The value is unsigned 16-bit, so we type it as T_CHAR.
3786 Node* epoch_generation_address = makecon(TypeRawPtr::make(JfrIntrinsicSupport::epoch_generation_address()));
3787 Node* current_epoch_generation = make_load(control(), epoch_generation_address, TypeInt::CHAR, T_CHAR, MemNode::unordered);
3788
3789 // Compare the epoch in the vthread to the current epoch generation.
3790 Node* const epoch_cmp = _gvn.transform(new CmpUNode(current_epoch_generation, epoch));
3791 Node* test_epoch_not_equal = _gvn.transform(new BoolNode(epoch_cmp, BoolTest::ne));
3792 IfNode* iff_epoch_not_equal = create_and_map_if(control(), test_epoch_not_equal, PROB_FAIR, COUNT_UNKNOWN);
3793
3794 // False path, epoch is equal, checkpoint information is valid.
3795 Node* epoch_is_equal = _gvn.transform(new IfFalseNode(iff_epoch_not_equal));
3796
3797 // True path, epoch is not equal, write a checkpoint for the vthread.
3798 Node* epoch_is_not_equal = _gvn.transform(new IfTrueNode(iff_epoch_not_equal));
3799
3800 set_control(epoch_is_not_equal);
3801
3802 // Make a runtime call, which can safepoint, to write a checkpoint for the vthread for this epoch.
3803 // The call also updates the native thread local thread id and the vthread with the current epoch.
3804 Node* call_write_checkpoint = make_runtime_call(RC_NO_LEAF,
3805 OptoRuntime::jfr_write_checkpoint_Type(),
3806 SharedRuntime::jfr_write_checkpoint(),
3807 "write_checkpoint", TypePtr::BOTTOM);
3808 Node* call_write_checkpoint_control = _gvn.transform(new ProjNode(call_write_checkpoint, TypeFunc::Control));
3809
3810 // vthread epoch != current epoch
3811 RegionNode* epoch_compare_rgn = new RegionNode(PATH_LIMIT);
3812 record_for_igvn(epoch_compare_rgn);
3813 PhiNode* epoch_compare_mem = new PhiNode(epoch_compare_rgn, Type::MEMORY, TypePtr::BOTTOM);
3814 record_for_igvn(epoch_compare_mem);
3815 PhiNode* epoch_compare_io = new PhiNode(epoch_compare_rgn, Type::ABIO);
3816 record_for_igvn(epoch_compare_io);
3817
3818 // Update control and phi nodes.
3819 epoch_compare_rgn->init_req(_true_path, call_write_checkpoint_control);
3820 epoch_compare_rgn->init_req(_false_path, epoch_is_equal);
3821 epoch_compare_mem->init_req(_true_path, reset_memory());
3822 epoch_compare_mem->init_req(_false_path, input_memory_state);
3823 epoch_compare_io->init_req(_true_path, i_o());
3824 epoch_compare_io->init_req(_false_path, input_io_state);
3825
3826 // excluded != true
3827 RegionNode* exclude_compare_rgn = new RegionNode(PATH_LIMIT);
3828 record_for_igvn(exclude_compare_rgn);
3829 PhiNode* exclude_compare_mem = new PhiNode(exclude_compare_rgn, Type::MEMORY, TypePtr::BOTTOM);
3830 record_for_igvn(exclude_compare_mem);
3831 PhiNode* exclude_compare_io = new PhiNode(exclude_compare_rgn, Type::ABIO);
3832 record_for_igvn(exclude_compare_io);
3833
3834 // Update control and phi nodes.
3835 exclude_compare_rgn->init_req(_true_path, _gvn.transform(epoch_compare_rgn));
3836 exclude_compare_rgn->init_req(_false_path, excluded);
3837 exclude_compare_mem->init_req(_true_path, _gvn.transform(epoch_compare_mem));
3838 exclude_compare_mem->init_req(_false_path, input_memory_state);
3839 exclude_compare_io->init_req(_true_path, _gvn.transform(epoch_compare_io));
3840 exclude_compare_io->init_req(_false_path, input_io_state);
3841
3842 // vthread != threadObj
3843 RegionNode* vthread_compare_rgn = new RegionNode(PATH_LIMIT);
3844 record_for_igvn(vthread_compare_rgn);
3845 PhiNode* vthread_compare_mem = new PhiNode(vthread_compare_rgn, Type::MEMORY, TypePtr::BOTTOM);
3846 PhiNode* vthread_compare_io = new PhiNode(vthread_compare_rgn, Type::ABIO);
3847 record_for_igvn(vthread_compare_io);
3848 PhiNode* tid = new PhiNode(vthread_compare_rgn, TypeLong::LONG);
3849 record_for_igvn(tid);
3850 PhiNode* exclusion = new PhiNode(vthread_compare_rgn, TypeInt::CHAR);
3851 record_for_igvn(exclusion);
3852 PhiNode* pinVirtualThread = new PhiNode(vthread_compare_rgn, TypeInt::BOOL);
3853 record_for_igvn(pinVirtualThread);
3854
3855 // Update control and phi nodes.
3856 vthread_compare_rgn->init_req(_true_path, _gvn.transform(exclude_compare_rgn));
3857 vthread_compare_rgn->init_req(_false_path, vthread_equal_threadObj);
3858 vthread_compare_mem->init_req(_true_path, _gvn.transform(exclude_compare_mem));
3859 vthread_compare_mem->init_req(_false_path, input_memory_state);
3860 vthread_compare_io->init_req(_true_path, _gvn.transform(exclude_compare_io));
3861 vthread_compare_io->init_req(_false_path, input_io_state);
3862 tid->init_req(_true_path, vthread_tid);
3863 tid->init_req(_false_path, thread_obj_tid);
3864 exclusion->init_req(_true_path, vthread_is_excluded);
3865 exclusion->init_req(_false_path, threadObj_is_excluded);
3866 pinVirtualThread->init_req(_true_path, continuation_support);
3867 pinVirtualThread->init_req(_false_path, _gvn.intcon(0));
3868
3869 // Update branch state.
3870 set_control(_gvn.transform(vthread_compare_rgn));
3871 set_all_memory(_gvn.transform(vthread_compare_mem));
3872 set_i_o(_gvn.transform(vthread_compare_io));
3873
3874 // Load the event writer oop by dereferencing the jobject handle.
3875 ciKlass* klass_EventWriter = env()->find_system_klass(ciSymbol::make("jdk/jfr/internal/event/EventWriter"));
3876 assert(klass_EventWriter->is_loaded(), "invariant");
3877 ciInstanceKlass* const instklass_EventWriter = klass_EventWriter->as_instance_klass();
3878 const TypeKlassPtr* const aklass = TypeKlassPtr::make(instklass_EventWriter);
3879 const TypeOopPtr* const xtype = aklass->as_instance_type();
3880 Node* jobj_untagged = _gvn.transform(AddPNode::make_off_heap(jobj, _gvn.MakeConX(-JNIHandles::TypeTag::global)));
3881 Node* event_writer = access_load(jobj_untagged, xtype, T_OBJECT, IN_NATIVE | C2_CONTROL_DEPENDENT_LOAD);
3882
3883 // Load the current thread id from the event writer object.
3884 Node* const event_writer_tid = load_field_from_object(event_writer, "threadID", "J");
3885 // Get the field offset to, conditionally, store an updated tid value later.
3886 Node* const event_writer_tid_field = field_address_from_object(event_writer, "threadID", "J", false);
3887 // Get the field offset to, conditionally, store an updated exclusion value later.
3888 Node* const event_writer_excluded_field = field_address_from_object(event_writer, "excluded", "Z", false);
3889 // Get the field offset to, conditionally, store an updated pinVirtualThread value later.
3890 Node* const event_writer_pin_field = field_address_from_object(event_writer, "pinVirtualThread", "Z", false);
3891
3892 RegionNode* event_writer_tid_compare_rgn = new RegionNode(PATH_LIMIT);
3893 record_for_igvn(event_writer_tid_compare_rgn);
3894 PhiNode* event_writer_tid_compare_mem = new PhiNode(event_writer_tid_compare_rgn, Type::MEMORY, TypePtr::BOTTOM);
3895 record_for_igvn(event_writer_tid_compare_mem);
3896 PhiNode* event_writer_tid_compare_io = new PhiNode(event_writer_tid_compare_rgn, Type::ABIO);
3897 record_for_igvn(event_writer_tid_compare_io);
3898
3899 // Compare the current tid from the thread object to what is currently stored in the event writer object.
3900 Node* const tid_cmp = _gvn.transform(new CmpLNode(event_writer_tid, _gvn.transform(tid)));
3901 Node* test_tid_not_equal = _gvn.transform(new BoolNode(tid_cmp, BoolTest::ne));
3902 IfNode* iff_tid_not_equal = create_and_map_if(_gvn.transform(vthread_compare_rgn), test_tid_not_equal, PROB_FAIR, COUNT_UNKNOWN);
3903
3904 // False path, tids are the same.
3905 Node* tid_is_equal = _gvn.transform(new IfFalseNode(iff_tid_not_equal));
3906
3907 // True path, tid is not equal, need to update the tid in the event writer.
3908 Node* tid_is_not_equal = _gvn.transform(new IfTrueNode(iff_tid_not_equal));
3909 record_for_igvn(tid_is_not_equal);
3910
3911 // Store the pin state to the event writer.
3912 store_to_memory(tid_is_not_equal, event_writer_pin_field, _gvn.transform(pinVirtualThread), T_BOOLEAN, MemNode::unordered);
3913
3914 // Store the exclusion state to the event writer.
3915 Node* excluded_bool = _gvn.transform(new URShiftINode(_gvn.transform(exclusion), excluded_shift));
3916 store_to_memory(tid_is_not_equal, event_writer_excluded_field, excluded_bool, T_BOOLEAN, MemNode::unordered);
3917
3918 // Store the tid to the event writer.
3919 store_to_memory(tid_is_not_equal, event_writer_tid_field, tid, T_LONG, MemNode::unordered);
3920
3921 // Update control and phi nodes.
3922 event_writer_tid_compare_rgn->init_req(_true_path, tid_is_not_equal);
3923 event_writer_tid_compare_rgn->init_req(_false_path, tid_is_equal);
3924 event_writer_tid_compare_mem->init_req(_true_path, reset_memory());
3925 event_writer_tid_compare_mem->init_req(_false_path, _gvn.transform(vthread_compare_mem));
3926 event_writer_tid_compare_io->init_req(_true_path, i_o());
3927 event_writer_tid_compare_io->init_req(_false_path, _gvn.transform(vthread_compare_io));
3928
3929 // Result of top level CFG, Memory, IO and Value.
3930 RegionNode* result_rgn = new RegionNode(PATH_LIMIT);
3931 PhiNode* result_mem = new PhiNode(result_rgn, Type::MEMORY, TypePtr::BOTTOM);
3932 PhiNode* result_io = new PhiNode(result_rgn, Type::ABIO);
3933 PhiNode* result_value = new PhiNode(result_rgn, TypeInstPtr::BOTTOM);
3934
3935 // Result control.
3936 result_rgn->init_req(_true_path, _gvn.transform(event_writer_tid_compare_rgn));
3937 result_rgn->init_req(_false_path, jobj_is_null);
3938
3939 // Result memory.
3940 result_mem->init_req(_true_path, _gvn.transform(event_writer_tid_compare_mem));
3941 result_mem->init_req(_false_path, input_memory_state);
3942
3943 // Result IO.
3944 result_io->init_req(_true_path, _gvn.transform(event_writer_tid_compare_io));
3945 result_io->init_req(_false_path, input_io_state);
3946
3947 // Result value.
3948 result_value->init_req(_true_path, event_writer); // return event writer oop
3949 result_value->init_req(_false_path, null()); // return null
3950
3951 // Set output state.
3952 set_control(_gvn.transform(result_rgn));
3953 set_all_memory(_gvn.transform(result_mem));
3954 set_i_o(_gvn.transform(result_io));
3955 set_result(result_rgn, result_value);
3956 return true;
3957 }
3958
3959 /*
3960 * The intrinsic is a model of this pseudo-code:
3961 *
3962 * JfrThreadLocal* const tl = thread->jfr_thread_local();
3963 * if (carrierThread != thread) { // is virtual thread
3964 * const u2 vthread_epoch_raw = java_lang_Thread::jfr_epoch(thread);
3965 * bool excluded = vthread_epoch_raw & excluded_mask;
3966 * AtomicAccess::store(&tl->_contextual_tid, java_lang_Thread::tid(thread));
3967 * AtomicAccess::store(&tl->_contextual_thread_excluded, is_excluded);
3968 * if (!excluded) {
3969 * const u2 vthread_epoch = vthread_epoch_raw & epoch_mask;
3970 * AtomicAccess::store(&tl->_vthread_epoch, vthread_epoch);
3971 * }
3972 * AtomicAccess::release_store(&tl->_vthread, true);
3973 * return;
3974 * }
3975 * AtomicAccess::release_store(&tl->_vthread, false);
3976 */
3977 void LibraryCallKit::extend_setCurrentThread(Node* jt, Node* thread) {
3978 enum { _true_path = 1, _false_path = 2, PATH_LIMIT };
3979
3980 Node* input_memory_state = reset_memory();
3981 set_all_memory(input_memory_state);
3982
3983 // The most significant bit of the u2 is used to denote thread exclusion
3984 Node* excluded_mask = _gvn.intcon(1 << 15);
3985 // The epoch generation is the range [1-32767]
3986 Node* epoch_mask = _gvn.intcon(32767);
3987
3988 Node* const carrierThread = generate_current_thread(jt);
3989 // If thread != carrierThread, this is a virtual thread.
3990 Node* thread_cmp_carrierThread = _gvn.transform(new CmpPNode(thread, carrierThread));
3991 Node* test_thread_not_equal_carrierThread = _gvn.transform(new BoolNode(thread_cmp_carrierThread, BoolTest::ne));
3992 IfNode* iff_thread_not_equal_carrierThread =
3993 create_and_map_if(control(), test_thread_not_equal_carrierThread, PROB_FAIR, COUNT_UNKNOWN);
3994
3995 Node* vthread_offset = off_heap_plus_addr(jt, in_bytes(THREAD_LOCAL_OFFSET_JFR + VTHREAD_OFFSET_JFR));
3996
3997 // False branch, is carrierThread.
3998 Node* thread_equal_carrierThread = _gvn.transform(new IfFalseNode(iff_thread_not_equal_carrierThread));
3999 // Store release
4000 Node* vthread_false_memory = store_to_memory(thread_equal_carrierThread, vthread_offset, _gvn.intcon(0), T_BOOLEAN, MemNode::release, true);
4001
4002 set_all_memory(input_memory_state);
4003
4004 // True branch, is virtual thread.
4005 Node* thread_not_equal_carrierThread = _gvn.transform(new IfTrueNode(iff_thread_not_equal_carrierThread));
4006 set_control(thread_not_equal_carrierThread);
4007
4008 // Load the raw epoch value from the vthread.
4009 Node* epoch_offset = basic_plus_adr(thread, java_lang_Thread::jfr_epoch_offset());
4010 Node* epoch_raw = access_load_at(thread, epoch_offset, _gvn.type(epoch_offset)->is_ptr(), TypeInt::CHAR, T_CHAR,
4011 IN_HEAP | MO_UNORDERED | C2_MISMATCHED | C2_CONTROL_DEPENDENT_LOAD);
4012
4013 // Mask off the excluded information from the epoch.
4014 Node * const is_excluded = _gvn.transform(new AndINode(epoch_raw, excluded_mask));
4015
4016 // Load the tid field from the thread.
4017 Node* tid = load_field_from_object(thread, "tid", "J");
4018
4019 // Store the vthread tid to the jfr thread local.
4020 Node* thread_id_offset = off_heap_plus_addr(jt, in_bytes(THREAD_LOCAL_OFFSET_JFR + VTHREAD_ID_OFFSET_JFR));
4021 Node* tid_memory = store_to_memory(control(), thread_id_offset, tid, T_LONG, MemNode::unordered, true);
4022
4023 // Branch is_excluded to conditionalize updating the epoch .
4024 Node* excluded_cmp = _gvn.transform(new CmpINode(is_excluded, excluded_mask));
4025 Node* test_excluded = _gvn.transform(new BoolNode(excluded_cmp, BoolTest::eq));
4026 IfNode* iff_excluded = create_and_map_if(control(), test_excluded, PROB_MIN, COUNT_UNKNOWN);
4027
4028 // True branch, vthread is excluded, no need to write epoch info.
4029 Node* excluded = _gvn.transform(new IfTrueNode(iff_excluded));
4030 set_control(excluded);
4031 Node* vthread_is_excluded = _gvn.intcon(1);
4032
4033 // False branch, vthread is included, update epoch info.
4034 Node* included = _gvn.transform(new IfFalseNode(iff_excluded));
4035 set_control(included);
4036 Node* vthread_is_included = _gvn.intcon(0);
4037
4038 // Get epoch value.
4039 Node* epoch = _gvn.transform(new AndINode(epoch_raw, epoch_mask));
4040
4041 // Store the vthread epoch to the jfr thread local.
4042 Node* vthread_epoch_offset = off_heap_plus_addr(jt, in_bytes(THREAD_LOCAL_OFFSET_JFR + VTHREAD_EPOCH_OFFSET_JFR));
4043 Node* included_memory = store_to_memory(control(), vthread_epoch_offset, epoch, T_CHAR, MemNode::unordered, true);
4044
4045 RegionNode* excluded_rgn = new RegionNode(PATH_LIMIT);
4046 record_for_igvn(excluded_rgn);
4047 PhiNode* excluded_mem = new PhiNode(excluded_rgn, Type::MEMORY, TypePtr::BOTTOM);
4048 record_for_igvn(excluded_mem);
4049 PhiNode* exclusion = new PhiNode(excluded_rgn, TypeInt::BOOL);
4050 record_for_igvn(exclusion);
4051
4052 // Merge the excluded control and memory.
4053 excluded_rgn->init_req(_true_path, excluded);
4054 excluded_rgn->init_req(_false_path, included);
4055 excluded_mem->init_req(_true_path, tid_memory);
4056 excluded_mem->init_req(_false_path, included_memory);
4057 exclusion->init_req(_true_path, vthread_is_excluded);
4058 exclusion->init_req(_false_path, vthread_is_included);
4059
4060 // Set intermediate state.
4061 set_control(_gvn.transform(excluded_rgn));
4062 set_all_memory(excluded_mem);
4063
4064 // Store the vthread exclusion state to the jfr thread local.
4065 Node* thread_local_excluded_offset = off_heap_plus_addr(jt, in_bytes(THREAD_LOCAL_OFFSET_JFR + VTHREAD_EXCLUDED_OFFSET_JFR));
4066 store_to_memory(control(), thread_local_excluded_offset, _gvn.transform(exclusion), T_BOOLEAN, MemNode::unordered, true);
4067
4068 // Store release
4069 Node * vthread_true_memory = store_to_memory(control(), vthread_offset, _gvn.intcon(1), T_BOOLEAN, MemNode::release, true);
4070
4071 RegionNode* thread_compare_rgn = new RegionNode(PATH_LIMIT);
4072 record_for_igvn(thread_compare_rgn);
4073 PhiNode* thread_compare_mem = new PhiNode(thread_compare_rgn, Type::MEMORY, TypePtr::BOTTOM);
4074 record_for_igvn(thread_compare_mem);
4075 PhiNode* vthread = new PhiNode(thread_compare_rgn, TypeInt::BOOL);
4076 record_for_igvn(vthread);
4077
4078 // Merge the thread_compare control and memory.
4079 thread_compare_rgn->init_req(_true_path, control());
4080 thread_compare_rgn->init_req(_false_path, thread_equal_carrierThread);
4081 thread_compare_mem->init_req(_true_path, vthread_true_memory);
4082 thread_compare_mem->init_req(_false_path, vthread_false_memory);
4083
4084 // Set output state.
4085 set_control(_gvn.transform(thread_compare_rgn));
4086 set_all_memory(_gvn.transform(thread_compare_mem));
4087 }
4088
4089 #endif // JFR_HAVE_INTRINSICS
4090
4091 //------------------------inline_native_currentCarrierThread------------------
4092 bool LibraryCallKit::inline_native_currentCarrierThread() {
4093 Node* junk = nullptr;
4094 set_result(generate_current_thread(junk));
4095 return true;
4096 }
4097
4098 //------------------------inline_native_currentThread------------------
4099 bool LibraryCallKit::inline_native_currentThread() {
4100 Node* junk = nullptr;
4101 set_result(generate_virtual_thread(junk));
4102 return true;
4103 }
4104
4105 //------------------------inline_native_setVthread------------------
4106 bool LibraryCallKit::inline_native_setCurrentThread() {
4107 assert(C->method()->changes_current_thread(),
4108 "method changes current Thread but is not annotated ChangesCurrentThread");
4109 Node* arr = argument(1);
4110 Node* thread = _gvn.transform(new ThreadLocalNode());
4111 Node* p = off_heap_plus_addr(thread, in_bytes(JavaThread::vthread_offset()));
4112 Node* thread_obj_handle
4113 = make_load(nullptr, p, p->bottom_type()->is_ptr(), T_OBJECT, MemNode::unordered);
4114 const TypePtr *adr_type = _gvn.type(thread_obj_handle)->isa_ptr();
4115 access_store_at(nullptr, thread_obj_handle, adr_type, arr, _gvn.type(arr), T_OBJECT, IN_NATIVE | MO_UNORDERED);
4116
4117 // Change the _monitor_owner_id of the JavaThread
4118 Node* tid = load_field_from_object(arr, "tid", "J");
4119 Node* monitor_owner_id_offset = off_heap_plus_addr(thread, in_bytes(JavaThread::monitor_owner_id_offset()));
4120 store_to_memory(control(), monitor_owner_id_offset, tid, T_LONG, MemNode::unordered, true);
4121
4122 JFR_ONLY(extend_setCurrentThread(thread, arr);)
4123 return true;
4124 }
4125
4126 const Type* LibraryCallKit::scopedValueCache_type() {
4127 ciKlass* objects_klass = ciObjArrayKlass::make(env()->Object_klass());
4128 const TypeOopPtr* etype = TypeOopPtr::make_from_klass(env()->Object_klass());
4129 const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS, /* stable= */ false, /* flat= */ false, /* not_flat= */ true, /* not_null_free= */ true, true);
4130
4131 // Because we create the scopedValue cache lazily we have to make the
4132 // type of the result BotPTR.
4133 bool xk = etype->klass_is_exact();
4134 const Type* objects_type = TypeAryPtr::make(TypePtr::BotPTR, arr0, objects_klass, xk, TypeAryPtr::Offset(0));
4135 return objects_type;
4136 }
4137
4138 Node* LibraryCallKit::scopedValueCache_helper() {
4139 Node* thread = _gvn.transform(new ThreadLocalNode());
4140 Node* p = off_heap_plus_addr(thread, in_bytes(JavaThread::scopedValueCache_offset()));
4141 // We cannot use immutable_memory() because we might flip onto a
4142 // different carrier thread, at which point we'll need to use that
4143 // carrier thread's cache.
4144 // return _gvn.transform(LoadNode::make(_gvn, nullptr, immutable_memory(), p, p->bottom_type()->is_ptr(),
4145 // TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered));
4146 return make_load(nullptr, p, p->bottom_type()->is_ptr(), T_ADDRESS, MemNode::unordered);
4147 }
4148
4149 //------------------------inline_native_scopedValueCache------------------
4150 bool LibraryCallKit::inline_native_scopedValueCache() {
4151 Node* cache_obj_handle = scopedValueCache_helper();
4152 const Type* objects_type = scopedValueCache_type();
4153 set_result(access_load(cache_obj_handle, objects_type, T_OBJECT, IN_NATIVE));
4154
4155 return true;
4156 }
4157
4158 //------------------------inline_native_setScopedValueCache------------------
4159 bool LibraryCallKit::inline_native_setScopedValueCache() {
4160 Node* arr = argument(0);
4161 Node* cache_obj_handle = scopedValueCache_helper();
4162 const Type* objects_type = scopedValueCache_type();
4163
4164 const TypePtr *adr_type = _gvn.type(cache_obj_handle)->isa_ptr();
4165 access_store_at(nullptr, cache_obj_handle, adr_type, arr, objects_type, T_OBJECT, IN_NATIVE | MO_UNORDERED);
4166
4167 return true;
4168 }
4169
4170 //------------------------inline_native_Continuation_pin and unpin-----------
4171
4172 // Shared implementation routine for both pin and unpin.
4173 bool LibraryCallKit::inline_native_Continuation_pinning(bool unpin) {
4174 enum { _true_path = 1, _false_path = 2, PATH_LIMIT };
4175
4176 // Save input memory.
4177 Node* input_memory_state = reset_memory();
4178 set_all_memory(input_memory_state);
4179
4180 // TLS
4181 Node* tls_ptr = _gvn.transform(new ThreadLocalNode());
4182 Node* last_continuation_offset = off_heap_plus_addr(tls_ptr, in_bytes(JavaThread::cont_entry_offset()));
4183 Node* last_continuation = make_load(control(), last_continuation_offset, last_continuation_offset->get_ptr_type(), T_ADDRESS, MemNode::unordered);
4184
4185 // Null check the last continuation object.
4186 Node* continuation_cmp_null = _gvn.transform(new CmpPNode(last_continuation, null()));
4187 Node* test_continuation_not_equal_null = _gvn.transform(new BoolNode(continuation_cmp_null, BoolTest::ne));
4188 IfNode* iff_continuation_not_equal_null = create_and_map_if(control(), test_continuation_not_equal_null, PROB_MAX, COUNT_UNKNOWN);
4189
4190 // False path, last continuation is null.
4191 Node* continuation_is_null = _gvn.transform(new IfFalseNode(iff_continuation_not_equal_null));
4192
4193 // True path, last continuation is not null.
4194 Node* continuation_is_not_null = _gvn.transform(new IfTrueNode(iff_continuation_not_equal_null));
4195
4196 set_control(continuation_is_not_null);
4197
4198 // Load the pin count from the last continuation.
4199 Node* pin_count_offset = off_heap_plus_addr(last_continuation, in_bytes(ContinuationEntry::pin_count_offset()));
4200 Node* pin_count = make_load(control(), pin_count_offset, TypeInt::INT, T_INT, MemNode::unordered);
4201
4202 // The loaded pin count is compared against a context specific rhs for over/underflow detection.
4203 Node* pin_count_rhs;
4204 if (unpin) {
4205 pin_count_rhs = _gvn.intcon(0);
4206 } else {
4207 pin_count_rhs = _gvn.intcon(UINT32_MAX);
4208 }
4209 Node* pin_count_cmp = _gvn.transform(new CmpUNode(pin_count, pin_count_rhs));
4210 Node* test_pin_count_over_underflow = _gvn.transform(new BoolNode(pin_count_cmp, BoolTest::eq));
4211 IfNode* iff_pin_count_over_underflow = create_and_map_if(control(), test_pin_count_over_underflow, PROB_MIN, COUNT_UNKNOWN);
4212
4213 // True branch, pin count over/underflow.
4214 Node* pin_count_over_underflow = _gvn.transform(new IfTrueNode(iff_pin_count_over_underflow));
4215 {
4216 // Trap (but not deoptimize (Action_none)) and continue in the interpreter
4217 // which will throw IllegalStateException for pin count over/underflow.
4218 // No memory changed so far - we can use memory create by reset_memory()
4219 // at the beginning of this intrinsic. No need to call reset_memory() again.
4220 PreserveJVMState pjvms(this);
4221 set_control(pin_count_over_underflow);
4222 uncommon_trap(Deoptimization::Reason_intrinsic,
4223 Deoptimization::Action_none);
4224 assert(stopped(), "invariant");
4225 }
4226
4227 // False branch, no pin count over/underflow. Increment or decrement pin count and store back.
4228 Node* valid_pin_count = _gvn.transform(new IfFalseNode(iff_pin_count_over_underflow));
4229 set_control(valid_pin_count);
4230
4231 Node* next_pin_count;
4232 if (unpin) {
4233 next_pin_count = _gvn.transform(new SubINode(pin_count, _gvn.intcon(1)));
4234 } else {
4235 next_pin_count = _gvn.transform(new AddINode(pin_count, _gvn.intcon(1)));
4236 }
4237
4238 store_to_memory(control(), pin_count_offset, next_pin_count, T_INT, MemNode::unordered);
4239
4240 // Result of top level CFG and Memory.
4241 RegionNode* result_rgn = new RegionNode(PATH_LIMIT);
4242 record_for_igvn(result_rgn);
4243 PhiNode* result_mem = new PhiNode(result_rgn, Type::MEMORY, TypePtr::BOTTOM);
4244 record_for_igvn(result_mem);
4245
4246 result_rgn->init_req(_true_path, valid_pin_count);
4247 result_rgn->init_req(_false_path, continuation_is_null);
4248 result_mem->init_req(_true_path, reset_memory());
4249 result_mem->init_req(_false_path, input_memory_state);
4250
4251 // Set output state.
4252 set_control(_gvn.transform(result_rgn));
4253 set_all_memory(_gvn.transform(result_mem));
4254
4255 return true;
4256 }
4257
4258 //---------------------------load_mirror_from_klass----------------------------
4259 // Given a klass oop, load its java mirror (a java.lang.Class oop).
4260 Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
4261 Node* p = off_heap_plus_addr(klass, in_bytes(Klass::java_mirror_offset()));
4262 Node* load = make_load(nullptr, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
4263 // mirror = ((OopHandle)mirror)->resolve();
4264 return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE);
4265 }
4266
4267 //-----------------------load_klass_from_mirror_common-------------------------
4268 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
4269 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
4270 // and branch to the given path on the region.
4271 // If never_see_null, take an uncommon trap on null, so we can optimistically
4272 // compile for the non-null case.
4273 // If the region is null, force never_see_null = true.
4274 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
4275 bool never_see_null,
4276 RegionNode* region,
4277 int null_path,
4278 int offset) {
4279 if (region == nullptr) never_see_null = true;
4280 Node* p = basic_plus_adr(mirror, offset);
4281 const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
4282 Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
4283 Node* null_ctl = top();
4284 kls = null_check_oop(kls, &null_ctl, never_see_null);
4285 if (region != nullptr) {
4286 // Set region->in(null_path) if the mirror is a primitive (e.g, int.class).
4287 region->init_req(null_path, null_ctl);
4288 } else {
4289 assert(null_ctl == top(), "no loose ends");
4290 }
4291 return kls;
4292 }
4293
4294 //--------------------(inline_native_Class_query helpers)---------------------
4295 // Use this for JVM_ACC_INTERFACE.
4296 // Fall through if (mods & mask) == bits, take the guard otherwise.
4297 Node* LibraryCallKit::generate_klass_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region,
4298 ByteSize offset, const Type* type, BasicType bt) {
4299 // Branch around if the given klass has the given modifier bit set.
4300 // Like generate_guard, adds a new path onto the region.
4301 Node* modp = off_heap_plus_addr(kls, in_bytes(offset));
4302 Node* mods = make_load(nullptr, modp, type, bt, MemNode::unordered);
4303 Node* mask = intcon(modifier_mask);
4304 Node* bits = intcon(modifier_bits);
4305 Node* mbit = _gvn.transform(new AndINode(mods, mask));
4306 Node* cmp = _gvn.transform(new CmpINode(mbit, bits));
4307 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
4308 return generate_fair_guard(bol, region);
4309 }
4310
4311 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
4312 return generate_klass_flags_guard(kls, JVM_ACC_INTERFACE, 0, region,
4313 InstanceKlass::access_flags_offset(), TypeInt::CHAR, T_CHAR);
4314 }
4315
4316 // Use this for testing if Klass is_hidden, has_finalizer, and is_cloneable_fast.
4317 Node* LibraryCallKit::generate_misc_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
4318 return generate_klass_flags_guard(kls, modifier_mask, modifier_bits, region,
4319 Klass::misc_flags_offset(), TypeInt::UBYTE, T_BOOLEAN);
4320 }
4321
4322 Node* LibraryCallKit::generate_hidden_class_guard(Node* kls, RegionNode* region) {
4323 return generate_misc_flags_guard(kls, KlassFlags::_misc_is_hidden_class, 0, region);
4324 }
4325
4326 //-------------------------inline_native_Class_query-------------------
4327 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
4328 const Type* return_type = TypeInt::BOOL;
4329 Node* prim_return_value = top(); // what happens if it's a primitive class?
4330 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
4331 bool expect_prim = false; // most of these guys expect to work on refs
4332
4333 enum { _normal_path = 1, _prim_path = 2, PATH_LIMIT };
4334
4335 Node* mirror = argument(0);
4336 Node* obj = top();
4337
4338 switch (id) {
4339 case vmIntrinsics::_isInstance:
4340 // nothing is an instance of a primitive type
4341 prim_return_value = intcon(0);
4342 obj = argument(1);
4343 break;
4344 case vmIntrinsics::_isHidden:
4345 prim_return_value = intcon(0);
4346 break;
4347 case vmIntrinsics::_getSuperclass:
4348 prim_return_value = null();
4349 return_type = TypeInstPtr::MIRROR->cast_to_ptr_type(TypePtr::BotPTR);
4350 break;
4351 default:
4352 fatal_unexpected_iid(id);
4353 break;
4354 }
4355
4356 const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
4357 if (mirror_con == nullptr) return false; // cannot happen?
4358
4359 #ifndef PRODUCT
4360 if (C->print_intrinsics() || C->print_inlining()) {
4361 ciType* k = mirror_con->java_mirror_type();
4362 if (k) {
4363 tty->print("Inlining %s on constant Class ", vmIntrinsics::name_at(intrinsic_id()));
4364 k->print_name();
4365 tty->cr();
4366 }
4367 }
4368 #endif
4369
4370 // Null-check the mirror, and the mirror's klass ptr (in case it is a primitive).
4371 RegionNode* region = new RegionNode(PATH_LIMIT);
4372 record_for_igvn(region);
4373 PhiNode* phi = new PhiNode(region, return_type);
4374
4375 // The mirror will never be null of Reflection.getClassAccessFlags, however
4376 // it may be null for Class.isInstance or Class.getModifiers. Throw a NPE
4377 // if it is. See bug 4774291.
4378
4379 // For Reflection.getClassAccessFlags(), the null check occurs in
4380 // the wrong place; see inline_unsafe_access(), above, for a similar
4381 // situation.
4382 mirror = null_check(mirror);
4383 // If mirror or obj is dead, only null-path is taken.
4384 if (stopped()) return true;
4385
4386 if (expect_prim) never_see_null = false; // expect nulls (meaning prims)
4387
4388 // Now load the mirror's klass metaobject, and null-check it.
4389 // Side-effects region with the control path if the klass is null.
4390 Node* kls = load_klass_from_mirror(mirror, never_see_null, region, _prim_path);
4391 // If kls is null, we have a primitive mirror.
4392 phi->init_req(_prim_path, prim_return_value);
4393 if (stopped()) { set_result(region, phi); return true; }
4394 bool safe_for_replace = (region->in(_prim_path) == top());
4395
4396 Node* p; // handy temp
4397 Node* null_ctl;
4398
4399 // Now that we have the non-null klass, we can perform the real query.
4400 // For constant classes, the query will constant-fold in LoadNode::Value.
4401 Node* query_value = top();
4402 switch (id) {
4403 case vmIntrinsics::_isInstance:
4404 // nothing is an instance of a primitive type
4405 query_value = gen_instanceof(obj, kls, safe_for_replace);
4406 break;
4407
4408 case vmIntrinsics::_isHidden:
4409 // (To verify this code sequence, check the asserts in JVM_IsHiddenClass.)
4410 if (generate_hidden_class_guard(kls, region) != nullptr)
4411 // A guard was added. If the guard is taken, it was an hidden class.
4412 phi->add_req(intcon(1));
4413 // If we fall through, it's a plain class.
4414 query_value = intcon(0);
4415 break;
4416
4417
4418 case vmIntrinsics::_getSuperclass:
4419 // The rules here are somewhat unfortunate, but we can still do better
4420 // with random logic than with a JNI call.
4421 // Interfaces store null or Object as _super, but must report null.
4422 // Arrays store an intermediate super as _super, but must report Object.
4423 // Other types can report the actual _super.
4424 // (To verify this code sequence, check the asserts in JVM_IsInterface.)
4425 if (generate_array_guard(kls, region) != nullptr) {
4426 // A guard was added. If the guard is taken, it was an array.
4427 phi->add_req(makecon(TypeInstPtr::make(env()->Object_klass()->java_mirror())));
4428 }
4429 // Check for interface after array since this checks AccessFlags offset into InstanceKlass.
4430 // In other words, we are accessing subtype-specific information, so we need to determine the subtype first.
4431 if (generate_interface_guard(kls, region) != nullptr) {
4432 // A guard was added. If the guard is taken, it was an interface.
4433 phi->add_req(null());
4434 }
4435 // If we fall through, it's a plain class. Get its _super.
4436 if (!stopped()) {
4437 p = basic_plus_adr(top(), kls, in_bytes(Klass::super_offset()));
4438 kls = _gvn.transform(LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeInstKlassPtr::OBJECT_OR_NULL));
4439 null_ctl = top();
4440 kls = null_check_oop(kls, &null_ctl);
4441 if (null_ctl != top()) {
4442 // If the guard is taken, Object.superClass is null (both klass and mirror).
4443 region->add_req(null_ctl);
4444 phi ->add_req(null());
4445 }
4446 if (!stopped()) {
4447 query_value = load_mirror_from_klass(kls);
4448 }
4449 }
4450 break;
4451
4452 default:
4453 fatal_unexpected_iid(id);
4454 break;
4455 }
4456
4457 // Fall-through is the normal case of a query to a real class.
4458 phi->init_req(1, query_value);
4459 region->init_req(1, control());
4460
4461 C->set_has_split_ifs(true); // Has chance for split-if optimization
4462 set_result(region, phi);
4463 return true;
4464 }
4465
4466
4467 //-------------------------inline_Class_cast-------------------
4468 bool LibraryCallKit::inline_Class_cast() {
4469 Node* mirror = argument(0); // Class
4470 Node* obj = argument(1);
4471 const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
4472 if (mirror_con == nullptr) {
4473 return false; // dead path (mirror->is_top()).
4474 }
4475 if (obj == nullptr || obj->is_top()) {
4476 return false; // dead path
4477 }
4478 const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr();
4479
4480 // First, see if Class.cast() can be folded statically.
4481 // java_mirror_type() returns non-null for compile-time Class constants.
4482 ciType* tm = mirror_con->java_mirror_type();
4483 if (tm != nullptr && tm->is_klass() &&
4484 tp != nullptr) {
4485 if (!tp->is_loaded()) {
4486 // Don't use intrinsic when class is not loaded.
4487 return false;
4488 } else {
4489 const TypeKlassPtr* tklass = TypeKlassPtr::make(tm->as_klass(), Type::trust_interfaces);
4490 int static_res = C->static_subtype_check(tklass, tp->as_klass_type());
4491 if (static_res == Compile::SSC_always_true) {
4492 // isInstance() is true - fold the code.
4493 set_result(obj);
4494 return true;
4495 } else if (static_res == Compile::SSC_always_false) {
4496 // Don't use intrinsic, have to throw ClassCastException.
4497 // If the reference is null, the non-intrinsic bytecode will
4498 // be optimized appropriately.
4499 return false;
4500 }
4501 }
4502 }
4503
4504 // Bailout intrinsic and do normal inlining if exception path is frequent.
4505 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
4506 return false;
4507 }
4508
4509 // Generate dynamic checks.
4510 // Class.cast() is java implementation of _checkcast bytecode.
4511 // Do checkcast (Parse::do_checkcast()) optimizations here.
4512
4513 mirror = null_check(mirror);
4514 // If mirror is dead, only null-path is taken.
4515 if (stopped()) {
4516 return true;
4517 }
4518
4519 // Not-subtype or the mirror's klass ptr is nullptr (in case it is a primitive).
4520 enum { _bad_type_path = 1, _prim_path = 2, _npe_path = 3, PATH_LIMIT };
4521 RegionNode* region = new RegionNode(PATH_LIMIT);
4522 record_for_igvn(region);
4523
4524 // Now load the mirror's klass metaobject, and null-check it.
4525 // If kls is null, we have a primitive mirror and
4526 // nothing is an instance of a primitive type.
4527 Node* kls = load_klass_from_mirror(mirror, false, region, _prim_path);
4528
4529 Node* res = top();
4530 Node* io = i_o();
4531 Node* mem = merged_memory();
4532 SafePointNode* new_cast_failure_map = nullptr;
4533
4534 if (!stopped()) {
4535
4536 Node* bad_type_ctrl = top();
4537 // Do checkcast optimizations.
4538 res = gen_checkcast(obj, kls, &bad_type_ctrl, &new_cast_failure_map);
4539 region->init_req(_bad_type_path, bad_type_ctrl);
4540 }
4541 if (region->in(_prim_path) != top() ||
4542 region->in(_bad_type_path) != top() ||
4543 region->in(_npe_path) != top()) {
4544 // Let Interpreter throw ClassCastException.
4545 PreserveJVMState pjvms(this);
4546 if (new_cast_failure_map != nullptr) {
4547 // The current map on the success path could have been modified. Use the dedicated failure path map.
4548 set_map(new_cast_failure_map);
4549 }
4550 set_control(_gvn.transform(region));
4551 // Set IO and memory because gen_checkcast may override them when buffering inline types
4552 set_i_o(io);
4553 set_all_memory(mem);
4554 uncommon_trap(Deoptimization::Reason_intrinsic,
4555 Deoptimization::Action_maybe_recompile);
4556 }
4557 if (!stopped()) {
4558 set_result(res);
4559 }
4560 return true;
4561 }
4562
4563
4564 //--------------------------inline_native_subtype_check------------------------
4565 // This intrinsic takes the JNI calls out of the heart of
4566 // UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
4567 bool LibraryCallKit::inline_native_subtype_check() {
4568 // Pull both arguments off the stack.
4569 Node* args[2]; // two java.lang.Class mirrors: superc, subc
4570 args[0] = argument(0);
4571 args[1] = argument(1);
4572 Node* klasses[2]; // corresponding Klasses: superk, subk
4573 klasses[0] = klasses[1] = top();
4574
4575 enum {
4576 // A full decision tree on {superc is prim, subc is prim}:
4577 _prim_0_path = 1, // {P,N} => false
4578 // {P,P} & superc!=subc => false
4579 _prim_same_path, // {P,P} & superc==subc => true
4580 _prim_1_path, // {N,P} => false
4581 _ref_subtype_path, // {N,N} & subtype check wins => true
4582 _both_ref_path, // {N,N} & subtype check loses => false
4583 PATH_LIMIT
4584 };
4585
4586 RegionNode* region = new RegionNode(PATH_LIMIT);
4587 RegionNode* prim_region = new RegionNode(2);
4588 Node* phi = new PhiNode(region, TypeInt::BOOL);
4589 record_for_igvn(region);
4590 record_for_igvn(prim_region);
4591
4592 const TypePtr* adr_type = TypeRawPtr::BOTTOM; // memory type of loads
4593 const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
4594 int class_klass_offset = java_lang_Class::klass_offset();
4595
4596 // First null-check both mirrors and load each mirror's klass metaobject.
4597 int which_arg;
4598 for (which_arg = 0; which_arg <= 1; which_arg++) {
4599 Node* arg = args[which_arg];
4600 arg = null_check(arg);
4601 if (stopped()) break;
4602 args[which_arg] = arg;
4603
4604 Node* p = basic_plus_adr(arg, class_klass_offset);
4605 Node* kls = LoadKlassNode::make(_gvn, immutable_memory(), p, adr_type, kls_type);
4606 klasses[which_arg] = _gvn.transform(kls);
4607 }
4608
4609 // Having loaded both klasses, test each for null.
4610 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
4611 for (which_arg = 0; which_arg <= 1; which_arg++) {
4612 Node* kls = klasses[which_arg];
4613 Node* null_ctl = top();
4614 kls = null_check_oop(kls, &null_ctl, never_see_null);
4615 if (which_arg == 0) {
4616 prim_region->init_req(1, null_ctl);
4617 } else {
4618 region->init_req(_prim_1_path, null_ctl);
4619 }
4620 if (stopped()) break;
4621 klasses[which_arg] = kls;
4622 }
4623
4624 if (!stopped()) {
4625 // now we have two reference types, in klasses[0..1]
4626 Node* subk = klasses[1]; // the argument to isAssignableFrom
4627 Node* superk = klasses[0]; // the receiver
4628 region->set_req(_both_ref_path, gen_subtype_check(subk, superk));
4629 region->set_req(_ref_subtype_path, control());
4630 }
4631
4632 // If both operands are primitive (both klasses null), then
4633 // we must return true when they are identical primitives.
4634 // It is convenient to test this after the first null klass check.
4635 // This path is also used if superc is a value mirror.
4636 set_control(_gvn.transform(prim_region));
4637 if (!stopped()) {
4638 // Since superc is primitive, make a guard for the superc==subc case.
4639 Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1]));
4640 Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq));
4641 generate_fair_guard(bol_eq, region);
4642 if (region->req() == PATH_LIMIT+1) {
4643 // A guard was added. If the added guard is taken, superc==subc.
4644 region->swap_edges(PATH_LIMIT, _prim_same_path);
4645 region->del_req(PATH_LIMIT);
4646 }
4647 region->set_req(_prim_0_path, control()); // Not equal after all.
4648 }
4649
4650 // these are the only paths that produce 'true':
4651 phi->set_req(_prim_same_path, intcon(1));
4652 phi->set_req(_ref_subtype_path, intcon(1));
4653
4654 // pull together the cases:
4655 assert(region->req() == PATH_LIMIT, "sane region");
4656 for (uint i = 1; i < region->req(); i++) {
4657 Node* ctl = region->in(i);
4658 if (ctl == nullptr || ctl == top()) {
4659 region->set_req(i, top());
4660 phi ->set_req(i, top());
4661 } else if (phi->in(i) == nullptr) {
4662 phi->set_req(i, intcon(0)); // all other paths produce 'false'
4663 }
4664 }
4665
4666 set_control(_gvn.transform(region));
4667 set_result(_gvn.transform(phi));
4668 return true;
4669 }
4670
4671 //---------------------generate_array_guard_common------------------------
4672 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region, ArrayKind kind, Node** obj) {
4673
4674 if (stopped()) {
4675 return nullptr;
4676 }
4677
4678 // Like generate_guard, adds a new path onto the region.
4679 jint layout_con = 0;
4680 Node* layout_val = get_layout_helper(kls, layout_con);
4681 if (layout_val == nullptr) {
4682 bool query = 0;
4683 switch(kind) {
4684 case RefArray: query = Klass::layout_helper_is_refArray(layout_con); break;
4685 case NonRefArray: query = !Klass::layout_helper_is_refArray(layout_con); break;
4686 case TypeArray: query = Klass::layout_helper_is_typeArray(layout_con); break;
4687 case AnyArray: query = Klass::layout_helper_is_array(layout_con); break;
4688 case NonArray: query = !Klass::layout_helper_is_array(layout_con); break;
4689 default:
4690 ShouldNotReachHere();
4691 }
4692 if (!query) {
4693 return nullptr; // never a branch
4694 } else { // always a branch
4695 Node* always_branch = control();
4696 if (region != nullptr)
4697 region->add_req(always_branch);
4698 set_control(top());
4699 return always_branch;
4700 }
4701 }
4702 unsigned int value = 0;
4703 BoolTest::mask btest = BoolTest::illegal;
4704 switch(kind) {
4705 case RefArray:
4706 case NonRefArray: {
4707 value = Klass::_lh_array_tag_ref_value;
4708 layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
4709 btest = (kind == RefArray) ? BoolTest::eq : BoolTest::ne;
4710 break;
4711 }
4712 case TypeArray: {
4713 value = Klass::_lh_array_tag_type_value;
4714 layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
4715 btest = BoolTest::eq;
4716 break;
4717 }
4718 case AnyArray: value = Klass::_lh_neutral_value; btest = BoolTest::lt; break;
4719 case NonArray: value = Klass::_lh_neutral_value; btest = BoolTest::gt; break;
4720 default:
4721 ShouldNotReachHere();
4722 }
4723 // Now test the correct condition.
4724 jint nval = (jint)value;
4725 Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval)));
4726 Node* bol = _gvn.transform(new BoolNode(cmp, btest));
4727 Node* ctrl = generate_fair_guard(bol, region);
4728 Node* is_array_ctrl = kind == NonArray ? control() : ctrl;
4729 if (obj != nullptr && is_array_ctrl != nullptr && is_array_ctrl != top()) {
4730 // Keep track of the fact that 'obj' is an array to prevent
4731 // array specific accesses from floating above the guard.
4732 *obj = _gvn.transform(new CheckCastPPNode(is_array_ctrl, *obj, TypeAryPtr::BOTTOM));
4733 }
4734 return ctrl;
4735 }
4736
4737 // public static native Object[] ValueClass::newNullRestrictedAtomicArray(Class<?> componentType, int length, Object initVal);
4738 // public static native Object[] ValueClass::newNullRestrictedNonAtomicArray(Class<?> componentType, int length, Object initVal);
4739 // public static native Object[] ValueClass::newNullableAtomicArray(Class<?> componentType, int length);
4740 bool LibraryCallKit::inline_newArray(bool null_free, bool atomic) {
4741 assert(null_free || atomic, "nullable implies atomic");
4742 Node* componentType = argument(0);
4743 Node* length = argument(1);
4744 Node* init_val = null_free ? argument(2) : nullptr;
4745
4746 const TypeInstPtr* tp = _gvn.type(componentType)->isa_instptr();
4747 if (tp != nullptr) {
4748 ciInstanceKlass* ik = tp->instance_klass();
4749 if (ik == C->env()->Class_klass()) {
4750 ciType* t = tp->java_mirror_type();
4751 if (t != nullptr && t->is_inlinetype()) {
4752
4753 ciArrayKlass* array_klass = ciArrayKlass::make(t, null_free, atomic, true);
4754 assert(array_klass->is_elem_null_free() == null_free, "inconsistency");
4755
4756 // TOOD 8350865 ZGC needs card marks on initializing oop stores
4757 if (UseZGC && null_free && !array_klass->is_flat_array_klass()) {
4758 return false;
4759 }
4760
4761 if (array_klass->is_loaded() && array_klass->element_klass()->as_inline_klass()->is_initialized()) {
4762 const TypeAryKlassPtr* array_klass_type = TypeAryKlassPtr::make(array_klass, Type::trust_interfaces);
4763 if (null_free) {
4764 if (init_val->is_InlineType()) {
4765 if (array_klass_type->is_flat() && init_val->as_InlineType()->is_all_zero(&gvn(), /* flat */ true)) {
4766 // Zeroing is enough because the init value is the all-zero value
4767 init_val = nullptr;
4768 } else {
4769 init_val = init_val->as_InlineType()->buffer(this);
4770 }
4771 }
4772 if (init_val != nullptr) {
4773 #ifdef ASSERT
4774 init_val = null_check(init_val);
4775 Node* wrong_type_ctl = gen_subtype_check(init_val, makecon(TypeKlassPtr::make(array_klass->element_klass())));
4776 {
4777 PreserveJVMState pjvms(this);
4778 set_control(wrong_type_ctl);
4779 halt(control(), frameptr(), "incompatible type for initVal in newArray");
4780 stop_and_kill_map();
4781 }
4782 #endif
4783 init_val = _gvn.transform(new CheckCastPPNode(control(), init_val, TypeOopPtr::make_from_klass(array_klass->element_klass()), ConstraintCastNode::DependencyType::NonFloatingNarrowing));
4784 }
4785 }
4786 Node* obj = new_array(makecon(array_klass_type), length, 0, nullptr, false, init_val);
4787 const TypeAryPtr* arytype = gvn().type(obj)->is_aryptr();
4788 assert(arytype->is_null_free() == null_free, "inconsistency");
4789 assert(arytype->is_not_null_free() == !null_free, "inconsistency");
4790 set_result(obj);
4791 return true;
4792 }
4793 }
4794 }
4795 }
4796 return false;
4797 }
4798
4799 // public static native boolean ValueClass::isFlatArray(Object array);
4800 // public static native boolean ValueClass::isNullRestrictedArray(Object array);
4801 // public static native boolean ValueClass::isAtomicArray(Object array);
4802 bool LibraryCallKit::inline_getArrayProperties(ArrayPropertiesCheck check) {
4803 Node* array = argument(0);
4804
4805 Node* bol;
4806 switch(check) {
4807 case IsFlat:
4808 // TODO 8350865 Use the object version here instead of loading the klass
4809 // The problem is that PhaseMacroExpand::expand_flatarraycheck_node can only handle some IR shapes and will fail, for example, if the bol is directly wired to a ReturnNode
4810 bol = flat_array_test(load_object_klass(array));
4811 break;
4812 case IsNullRestricted:
4813 bol = null_free_array_test(array);
4814 break;
4815 case IsAtomic:
4816 // TODO 8350865 Implement this. It's a bit more complicated, see conditions in JVM_IsAtomicArray
4817 // Enable TestIntrinsics::test87/88 once this is implemented
4818 // bol = null_free_atomic_array_test
4819 return false;
4820 default:
4821 ShouldNotReachHere();
4822 }
4823
4824 Node* res = gvn().transform(new CMoveINode(bol, intcon(0), intcon(1), TypeInt::BOOL));
4825 set_result(res);
4826 return true;
4827 }
4828
4829 // Load the default refined array klass from an ObjArrayKlass. This relies on the first entry in the
4830 // '_next_refined_array_klass' linked list being the default (see ObjArrayKlass::klass_with_properties).
4831 Node* LibraryCallKit::load_default_refined_array_klass(Node* klass_node, bool type_array_guard) {
4832 RegionNode* region = new RegionNode(2);
4833 Node* phi = new PhiNode(region, TypeInstKlassPtr::OBJECT_OR_NULL);
4834
4835 if (type_array_guard) {
4836 generate_typeArray_guard(klass_node, region);
4837 if (region->req() == 3) {
4838 phi->add_req(klass_node);
4839 }
4840 }
4841 Node* adr_refined_klass = basic_plus_adr(top(), klass_node, in_bytes(ObjArrayKlass::next_refined_array_klass_offset()));
4842 Node* refined_klass = _gvn.transform(LoadKlassNode::make(_gvn, immutable_memory(), adr_refined_klass, TypeRawPtr::BOTTOM, TypeInstKlassPtr::OBJECT_OR_NULL));
4843
4844 // Can be null if not initialized yet, just deopt
4845 Node* null_ctl = top();
4846 refined_klass = null_check_oop(refined_klass, &null_ctl, /* never_see_null= */ true);
4847
4848 region->init_req(1, control());
4849 phi->init_req(1, refined_klass);
4850
4851 set_control(_gvn.transform(region));
4852 return _gvn.transform(phi);
4853 }
4854
4855 // Load the non-refined array klass from an ObjArrayKlass.
4856 Node* LibraryCallKit::load_non_refined_array_klass(Node* klass_node) {
4857 const TypeAryKlassPtr* ary_klass_ptr = _gvn.type(klass_node)->isa_aryklassptr();
4858 if (ary_klass_ptr != nullptr && ary_klass_ptr->klass_is_exact()) {
4859 return _gvn.makecon(ary_klass_ptr->cast_to_refined_array_klass_ptr(false));
4860 }
4861
4862 RegionNode* region = new RegionNode(2);
4863 Node* phi = new PhiNode(region, TypeInstKlassPtr::OBJECT);
4864
4865 generate_typeArray_guard(klass_node, region);
4866 if (region->req() == 3) {
4867 phi->add_req(klass_node);
4868 }
4869 Node* super_adr = basic_plus_adr(top(), klass_node, in_bytes(Klass::super_offset()));
4870 Node* super_klass = _gvn.transform(LoadKlassNode::make(_gvn, immutable_memory(), super_adr, TypeRawPtr::BOTTOM, TypeInstKlassPtr::OBJECT));
4871
4872 region->init_req(1, control());
4873 phi->init_req(1, super_klass);
4874
4875 set_control(_gvn.transform(region));
4876 return _gvn.transform(phi);
4877 }
4878
4879 //-----------------------inline_native_newArray--------------------------
4880 // private static native Object java.lang.reflect.Array.newArray(Class<?> componentType, int length);
4881 // private native Object Unsafe.allocateUninitializedArray0(Class<?> cls, int size);
4882 bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) {
4883 Node* mirror;
4884 Node* count_val;
4885 if (uninitialized) {
4886 null_check_receiver();
4887 mirror = argument(1);
4888 count_val = argument(2);
4889 } else {
4890 mirror = argument(0);
4891 count_val = argument(1);
4892 }
4893
4894 mirror = null_check(mirror);
4895 // If mirror or obj is dead, only null-path is taken.
4896 if (stopped()) return true;
4897
4898 enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
4899 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4900 PhiNode* result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
4901 PhiNode* result_io = new PhiNode(result_reg, Type::ABIO);
4902 PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4903
4904 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
4905 Node* klass_node = load_array_klass_from_mirror(mirror, never_see_null,
4906 result_reg, _slow_path);
4907 Node* normal_ctl = control();
4908 Node* no_array_ctl = result_reg->in(_slow_path);
4909
4910 // Generate code for the slow case. We make a call to newArray().
4911 set_control(no_array_ctl);
4912 if (!stopped()) {
4913 // Either the input type is void.class, or else the
4914 // array klass has not yet been cached. Either the
4915 // ensuing call will throw an exception, or else it
4916 // will cache the array klass for next time.
4917 PreserveJVMState pjvms(this);
4918 CallJavaNode* slow_call = nullptr;
4919 if (uninitialized) {
4920 // Generate optimized virtual call (holder class 'Unsafe' is final)
4921 slow_call = generate_method_call(vmIntrinsics::_allocateUninitializedArray, false, false, true);
4922 } else {
4923 slow_call = generate_method_call_static(vmIntrinsics::_newArray, true);
4924 }
4925 Node* slow_result = set_results_for_java_call(slow_call);
4926 // this->control() comes from set_results_for_java_call
4927 result_reg->set_req(_slow_path, control());
4928 result_val->set_req(_slow_path, slow_result);
4929 result_io ->set_req(_slow_path, i_o());
4930 result_mem->set_req(_slow_path, reset_memory());
4931 }
4932
4933 set_control(normal_ctl);
4934 if (!stopped()) {
4935 // Normal case: The array type has been cached in the java.lang.Class.
4936 // The following call works fine even if the array type is polymorphic.
4937 // It could be a dynamic mix of int[], boolean[], Object[], etc.
4938
4939 klass_node = load_default_refined_array_klass(klass_node);
4940
4941 Node* obj = new_array(klass_node, count_val, 0); // no arguments to push
4942 result_reg->init_req(_normal_path, control());
4943 result_val->init_req(_normal_path, obj);
4944 result_io ->init_req(_normal_path, i_o());
4945 result_mem->init_req(_normal_path, reset_memory());
4946
4947 if (uninitialized) {
4948 // Mark the allocation so that zeroing is skipped
4949 AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(obj);
4950 alloc->maybe_set_complete(&_gvn);
4951 }
4952 }
4953
4954 // Return the combined state.
4955 set_i_o( _gvn.transform(result_io) );
4956 set_all_memory( _gvn.transform(result_mem));
4957
4958 C->set_has_split_ifs(true); // Has chance for split-if optimization
4959 set_result(result_reg, result_val);
4960 return true;
4961 }
4962
4963 //----------------------inline_native_getLength--------------------------
4964 // public static native int java.lang.reflect.Array.getLength(Object array);
4965 bool LibraryCallKit::inline_native_getLength() {
4966 if (too_many_traps(Deoptimization::Reason_intrinsic)) return false;
4967
4968 Node* array = null_check(argument(0));
4969 // If array is dead, only null-path is taken.
4970 if (stopped()) return true;
4971
4972 // Deoptimize if it is a non-array.
4973 Node* non_array = generate_non_array_guard(load_object_klass(array), nullptr, &array);
4974
4975 if (non_array != nullptr) {
4976 PreserveJVMState pjvms(this);
4977 set_control(non_array);
4978 uncommon_trap(Deoptimization::Reason_intrinsic,
4979 Deoptimization::Action_maybe_recompile);
4980 }
4981
4982 // If control is dead, only non-array-path is taken.
4983 if (stopped()) return true;
4984
4985 // The works fine even if the array type is polymorphic.
4986 // It could be a dynamic mix of int[], boolean[], Object[], etc.
4987 Node* result = load_array_length(array);
4988
4989 C->set_has_split_ifs(true); // Has chance for split-if optimization
4990 set_result(result);
4991 return true;
4992 }
4993
4994 //------------------------inline_array_copyOf----------------------------
4995 // public static <T,U> T[] java.util.Arrays.copyOf( U[] original, int newLength, Class<? extends T[]> newType);
4996 // public static <T,U> T[] java.util.Arrays.copyOfRange(U[] original, int from, int to, Class<? extends T[]> newType);
4997 bool LibraryCallKit::inline_array_copyOf(bool is_copyOfRange) {
4998 if (too_many_traps(Deoptimization::Reason_intrinsic)) return false;
4999
5000 // Get the arguments.
5001 Node* original = argument(0);
5002 Node* start = is_copyOfRange? argument(1): intcon(0);
5003 Node* end = is_copyOfRange? argument(2): argument(1);
5004 Node* array_type_mirror = is_copyOfRange? argument(3): argument(2);
5005
5006 Node* newcopy = nullptr;
5007
5008 // Set the original stack and the reexecute bit for the interpreter to reexecute
5009 // the bytecode that invokes Arrays.copyOf if deoptimization happens.
5010 { PreserveReexecuteState preexecs(this);
5011 jvms()->set_should_reexecute(true);
5012
5013 array_type_mirror = null_check(array_type_mirror);
5014 original = null_check(original);
5015
5016 // Check if a null path was taken unconditionally.
5017 if (stopped()) return true;
5018
5019 Node* orig_length = load_array_length(original);
5020
5021 Node* klass_node = load_klass_from_mirror(array_type_mirror, false, nullptr, 0);
5022 klass_node = null_check(klass_node);
5023
5024 RegionNode* bailout = new RegionNode(1);
5025 record_for_igvn(bailout);
5026
5027 // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
5028 // Bail out if that is so.
5029 // Inline type array may have object field that would require a
5030 // write barrier. Conservatively, go to slow path.
5031 // TODO 8251971: Optimize for the case when flat src/dst are later found
5032 // to not contain oops (i.e., move this check to the macro expansion phase).
5033 // TODO 8382226: Revisit for flat abstract value class arrays
5034 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
5035 const TypeAryPtr* orig_t = _gvn.type(original)->isa_aryptr();
5036 const TypeKlassPtr* tklass = _gvn.type(klass_node)->is_klassptr();
5037 bool exclude_flat = UseArrayFlattening && bs->array_copy_requires_gc_barriers(true, T_OBJECT, false, false, BarrierSetC2::Parsing) &&
5038 // Can src array be flat and contain oops?
5039 (orig_t == nullptr || (!orig_t->is_not_flat() && (!orig_t->is_flat() || orig_t->elem()->inline_klass()->contains_oops()))) &&
5040 // Can dest array be flat and contain oops?
5041 tklass->can_be_inline_array() && (!tklass->is_flat() || tklass->is_aryklassptr()->elem()->is_instklassptr()->instance_klass()->as_inline_klass()->contains_oops());
5042 Node* not_objArray = exclude_flat ? generate_non_refArray_guard(klass_node, bailout) : generate_typeArray_guard(klass_node, bailout);
5043
5044 Node* refined_klass_node = load_default_refined_array_klass(klass_node, /* type_array_guard= */ false);
5045
5046 if (not_objArray != nullptr) {
5047 // Improve the klass node's type from the new optimistic assumption:
5048 ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
5049 bool not_flat = !UseArrayFlattening;
5050 bool not_null_free = !Arguments::is_valhalla_enabled();
5051 const Type* akls = TypeAryKlassPtr::make(TypePtr::NotNull, ak, Type::Offset(0), Type::trust_interfaces, not_flat, not_null_free, false, false, not_flat, true);
5052 Node* cast = new CastPPNode(control(), refined_klass_node, akls);
5053 refined_klass_node = _gvn.transform(cast);
5054 }
5055
5056 // Bail out if either start or end is negative.
5057 generate_negative_guard(start, bailout, &start);
5058 generate_negative_guard(end, bailout, &end);
5059
5060 Node* length = end;
5061 if (_gvn.type(start) != TypeInt::ZERO) {
5062 length = _gvn.transform(new SubINode(end, start));
5063 }
5064
5065 // Bail out if length is negative (i.e., if start > end).
5066 // Without this the new_array would throw
5067 // NegativeArraySizeException but IllegalArgumentException is what
5068 // should be thrown
5069 generate_negative_guard(length, bailout, &length);
5070
5071 // Handle inline type arrays
5072 // TODO 8251971 This is too strong
5073 generate_fair_guard(flat_array_test(load_object_klass(original)), bailout);
5074 generate_fair_guard(flat_array_test(refined_klass_node), bailout);
5075 generate_fair_guard(null_free_array_test(original), bailout);
5076
5077 // Bail out if start is larger than the original length
5078 Node* orig_tail = _gvn.transform(new SubINode(orig_length, start));
5079 generate_negative_guard(orig_tail, bailout, &orig_tail);
5080
5081 if (bailout->req() > 1) {
5082 PreserveJVMState pjvms(this);
5083 set_control(_gvn.transform(bailout));
5084 uncommon_trap(Deoptimization::Reason_intrinsic,
5085 Deoptimization::Action_maybe_recompile);
5086 }
5087
5088 if (!stopped()) {
5089 // How many elements will we copy from the original?
5090 // The answer is MinI(orig_tail, length).
5091 Node* moved = _gvn.transform(new MinINode(orig_tail, length));
5092
5093 // Generate a direct call to the right arraycopy function(s).
5094 // We know the copy is disjoint but we might not know if the
5095 // oop stores need checking.
5096 // Extreme case: Arrays.copyOf((Integer[])x, 10, String[].class).
5097 // This will fail a store-check if x contains any non-nulls.
5098
5099 // ArrayCopyNode:Ideal may transform the ArrayCopyNode to
5100 // loads/stores but it is legal only if we're sure the
5101 // Arrays.copyOf would succeed. So we need all input arguments
5102 // to the copyOf to be validated, including that the copy to the
5103 // new array won't trigger an ArrayStoreException. That subtype
5104 // check can be optimized if we know something on the type of
5105 // the input array from type speculation.
5106 if (_gvn.type(klass_node)->singleton()) {
5107 const TypeKlassPtr* subk = _gvn.type(load_object_klass(original))->is_klassptr();
5108 const TypeKlassPtr* superk = _gvn.type(klass_node)->is_klassptr();
5109
5110 int test = C->static_subtype_check(superk, subk);
5111 if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
5112 const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();
5113 if (t_original->speculative_type() != nullptr) {
5114 original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true);
5115 }
5116 }
5117 }
5118
5119 bool validated = false;
5120 // Reason_class_check rather than Reason_intrinsic because we
5121 // want to intrinsify even if this traps.
5122 if (!too_many_traps(Deoptimization::Reason_class_check)) {
5123 Node* not_subtype_ctrl = gen_subtype_check(original, klass_node);
5124
5125 if (not_subtype_ctrl != top()) {
5126 PreserveJVMState pjvms(this);
5127 set_control(not_subtype_ctrl);
5128 uncommon_trap(Deoptimization::Reason_class_check,
5129 Deoptimization::Action_make_not_entrant);
5130 assert(stopped(), "Should be stopped");
5131 }
5132 validated = true;
5133 }
5134
5135 if (!stopped()) {
5136 newcopy = new_array(refined_klass_node, length, 0); // no arguments to push
5137
5138 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true, true,
5139 load_object_klass(original), klass_node);
5140 if (!is_copyOfRange) {
5141 ac->set_copyof(validated);
5142 } else {
5143 ac->set_copyofrange(validated);
5144 }
5145 Node* n = _gvn.transform(ac);
5146 if (n == ac) {
5147 ac->connect_outputs(this);
5148 } else {
5149 assert(validated, "shouldn't transform if all arguments not validated");
5150 set_all_memory(n);
5151 }
5152 }
5153 }
5154 } // original reexecute is set back here
5155
5156 C->set_has_split_ifs(true); // Has chance for split-if optimization
5157 if (!stopped()) {
5158 set_result(newcopy);
5159 }
5160 return true;
5161 }
5162
5163
5164 //----------------------generate_virtual_guard---------------------------
5165 // Helper for hashCode and clone. Peeks inside the vtable to avoid a call.
5166 Node* LibraryCallKit::generate_virtual_guard(Node* obj_klass,
5167 RegionNode* slow_region) {
5168 ciMethod* method = callee();
5169 int vtable_index = method->vtable_index();
5170 assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
5171 "bad index %d", vtable_index);
5172 // Get the Method* out of the appropriate vtable entry.
5173 int entry_offset = in_bytes(Klass::vtable_start_offset()) +
5174 vtable_index*vtableEntry::size_in_bytes() +
5175 in_bytes(vtableEntry::method_offset());
5176 Node* entry_addr = off_heap_plus_addr(obj_klass, entry_offset);
5177 Node* target_call = make_load(nullptr, entry_addr, TypePtr::NOTNULL, T_ADDRESS, MemNode::unordered);
5178
5179 // Compare the target method with the expected method (e.g., Object.hashCode).
5180 const TypePtr* native_call_addr = TypeMetadataPtr::make(method);
5181
5182 Node* native_call = makecon(native_call_addr);
5183 Node* chk_native = _gvn.transform(new CmpPNode(target_call, native_call));
5184 Node* test_native = _gvn.transform(new BoolNode(chk_native, BoolTest::ne));
5185
5186 return generate_slow_guard(test_native, slow_region);
5187 }
5188
5189 //-----------------------generate_method_call----------------------------
5190 // Use generate_method_call to make a slow-call to the real
5191 // method if the fast path fails. An alternative would be to
5192 // use a stub like OptoRuntime::slow_arraycopy_Java.
5193 // This only works for expanding the current library call,
5194 // not another intrinsic. (E.g., don't use this for making an
5195 // arraycopy call inside of the copyOf intrinsic.)
5196 CallJavaNode*
5197 LibraryCallKit::generate_method_call(vmIntrinsicID method_id, bool is_virtual, bool is_static, bool res_not_null) {
5198 // When compiling the intrinsic method itself, do not use this technique.
5199 guarantee(callee() != C->method(), "cannot make slow-call to self");
5200
5201 ciMethod* method = callee();
5202 // ensure the JVMS we have will be correct for this call
5203 guarantee(method_id == method->intrinsic_id(), "must match");
5204
5205 const TypeFunc* tf = TypeFunc::make(method);
5206 if (res_not_null) {
5207 assert(tf->return_type() == T_OBJECT, "");
5208 const TypeTuple* range = tf->range_cc();
5209 const Type** fields = TypeTuple::fields(range->cnt());
5210 fields[TypeFunc::Parms] = range->field_at(TypeFunc::Parms)->filter_speculative(TypePtr::NOTNULL);
5211 const TypeTuple* new_range = TypeTuple::make(range->cnt(), fields);
5212 tf = TypeFunc::make(tf->domain_cc(), new_range);
5213 }
5214 CallJavaNode* slow_call;
5215 if (is_static) {
5216 assert(!is_virtual, "");
5217 slow_call = new CallStaticJavaNode(C, tf,
5218 SharedRuntime::get_resolve_static_call_stub(), method);
5219 } else if (is_virtual) {
5220 assert(!gvn().type(argument(0))->maybe_null(), "should not be null");
5221 int vtable_index = Method::invalid_vtable_index;
5222 if (UseInlineCaches) {
5223 // Suppress the vtable call
5224 } else {
5225 // hashCode and clone are not a miranda methods,
5226 // so the vtable index is fixed.
5227 // No need to use the linkResolver to get it.
5228 vtable_index = method->vtable_index();
5229 assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
5230 "bad index %d", vtable_index);
5231 }
5232 slow_call = new CallDynamicJavaNode(tf,
5233 SharedRuntime::get_resolve_virtual_call_stub(),
5234 method, vtable_index);
5235 } else { // neither virtual nor static: opt_virtual
5236 assert(!gvn().type(argument(0))->maybe_null(), "should not be null");
5237 slow_call = new CallStaticJavaNode(C, tf,
5238 SharedRuntime::get_resolve_opt_virtual_call_stub(), method);
5239 slow_call->set_optimized_virtual(true);
5240 }
5241 if (CallGenerator::is_inlined_method_handle_intrinsic(this->method(), bci(), callee())) {
5242 // To be able to issue a direct call (optimized virtual or virtual)
5243 // and skip a call to MH.linkTo*/invokeBasic adapter, additional information
5244 // about the method being invoked should be attached to the call site to
5245 // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
5246 slow_call->set_override_symbolic_info(true);
5247 }
5248 set_arguments_for_java_call(slow_call);
5249 set_edges_for_java_call(slow_call);
5250 return slow_call;
5251 }
5252
5253
5254 /**
5255 * Build special case code for calls to hashCode on an object. This call may
5256 * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
5257 * slightly different code.
5258 */
5259 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
5260 assert(is_static == callee()->is_static(), "correct intrinsic selection");
5261 assert(!(is_virtual && is_static), "either virtual, special, or static");
5262
5263 enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
5264
5265 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
5266 PhiNode* result_val = new PhiNode(result_reg, TypeInt::INT);
5267 PhiNode* result_io = new PhiNode(result_reg, Type::ABIO);
5268 PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
5269 Node* obj = argument(0);
5270
5271 // Don't intrinsify hashcode on inline types for now.
5272 // The "is locked" runtime check also subsumes the inline type check (as inline types cannot be locked) and goes to the slow path.
5273 if (gvn().type(obj)->is_inlinetypeptr()) {
5274 return false;
5275 }
5276
5277 if (!is_static) {
5278 // Check for hashing null object
5279 obj = null_check_receiver();
5280 if (stopped()) return true; // unconditionally null
5281 result_reg->init_req(_null_path, top());
5282 result_val->init_req(_null_path, top());
5283 } else {
5284 // Do a null check, and return zero if null.
5285 // System.identityHashCode(null) == 0
5286 Node* null_ctl = top();
5287 obj = null_check_oop(obj, &null_ctl);
5288 result_reg->init_req(_null_path, null_ctl);
5289 result_val->init_req(_null_path, _gvn.intcon(0));
5290 }
5291
5292 // Unconditionally null? Then return right away.
5293 if (stopped()) {
5294 set_control( result_reg->in(_null_path));
5295 if (!stopped())
5296 set_result(result_val->in(_null_path));
5297 return true;
5298 }
5299
5300 // We only go to the fast case code if we pass a number of guards. The
5301 // paths which do not pass are accumulated in the slow_region.
5302 RegionNode* slow_region = new RegionNode(1);
5303 record_for_igvn(slow_region);
5304
5305 // If this is a virtual call, we generate a funny guard. We pull out
5306 // the vtable entry corresponding to hashCode() from the target object.
5307 // If the target method which we are calling happens to be the native
5308 // Object hashCode() method, we pass the guard. We do not need this
5309 // guard for non-virtual calls -- the caller is known to be the native
5310 // Object hashCode().
5311 if (is_virtual) {
5312 // After null check, get the object's klass.
5313 Node* obj_klass = load_object_klass(obj);
5314 generate_virtual_guard(obj_klass, slow_region);
5315 }
5316
5317 // Get the header out of the object, use LoadMarkNode when available
5318 Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
5319 // The control of the load must be null. Otherwise, the load can move before
5320 // the null check after castPP removal.
5321 Node* no_ctrl = nullptr;
5322 Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
5323
5324 if (!UseObjectMonitorTable) {
5325 // Test the header to see if it is safe to read w.r.t. locking.
5326 // We cannot use the inline type mask as this may check bits that are overriden
5327 // by an object monitor's pointer when inflating locking.
5328 Node *lock_mask = _gvn.MakeConX(markWord::lock_mask_in_place);
5329 Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
5330 Node *monitor_val = _gvn.MakeConX(markWord::monitor_value);
5331 Node *chk_monitor = _gvn.transform(new CmpXNode(lmasked_header, monitor_val));
5332 Node *test_monitor = _gvn.transform(new BoolNode(chk_monitor, BoolTest::eq));
5333
5334 generate_slow_guard(test_monitor, slow_region);
5335 }
5336
5337 // Get the hash value and check to see that it has been properly assigned.
5338 // We depend on hash_mask being at most 32 bits and avoid the use of
5339 // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
5340 // vm: see markWord.hpp.
5341 Node *hash_mask = _gvn.intcon(markWord::hash_mask);
5342 Node *hash_shift = _gvn.intcon(markWord::hash_shift);
5343 Node *hshifted_header= _gvn.transform(new URShiftXNode(header, hash_shift));
5344 // This hack lets the hash bits live anywhere in the mark object now, as long
5345 // as the shift drops the relevant bits into the low 32 bits. Note that
5346 // Java spec says that HashCode is an int so there's no point in capturing
5347 // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build).
5348 hshifted_header = ConvX2I(hshifted_header);
5349 Node *hash_val = _gvn.transform(new AndINode(hshifted_header, hash_mask));
5350
5351 Node *no_hash_val = _gvn.intcon(markWord::no_hash);
5352 Node *chk_assigned = _gvn.transform(new CmpINode( hash_val, no_hash_val));
5353 Node *test_assigned = _gvn.transform(new BoolNode( chk_assigned, BoolTest::eq));
5354
5355 generate_slow_guard(test_assigned, slow_region);
5356
5357 Node* init_mem = reset_memory();
5358 // fill in the rest of the null path:
5359 result_io ->init_req(_null_path, i_o());
5360 result_mem->init_req(_null_path, init_mem);
5361
5362 result_val->init_req(_fast_path, hash_val);
5363 result_reg->init_req(_fast_path, control());
5364 result_io ->init_req(_fast_path, i_o());
5365 result_mem->init_req(_fast_path, init_mem);
5366
5367 // Generate code for the slow case. We make a call to hashCode().
5368 set_control(_gvn.transform(slow_region));
5369 if (!stopped()) {
5370 // No need for PreserveJVMState, because we're using up the present state.
5371 set_all_memory(init_mem);
5372 vmIntrinsics::ID hashCode_id = is_static ? vmIntrinsics::_identityHashCode : vmIntrinsics::_hashCode;
5373 CallJavaNode* slow_call = generate_method_call(hashCode_id, is_virtual, is_static, false);
5374 Node* slow_result = set_results_for_java_call(slow_call);
5375 // this->control() comes from set_results_for_java_call
5376 result_reg->init_req(_slow_path, control());
5377 result_val->init_req(_slow_path, slow_result);
5378 result_io ->set_req(_slow_path, i_o());
5379 result_mem ->set_req(_slow_path, reset_memory());
5380 }
5381
5382 // Return the combined state.
5383 set_i_o( _gvn.transform(result_io) );
5384 set_all_memory( _gvn.transform(result_mem));
5385
5386 set_result(result_reg, result_val);
5387 return true;
5388 }
5389
5390 //---------------------------inline_native_getClass----------------------------
5391 // public final native Class<?> java.lang.Object.getClass();
5392 //
5393 // Build special case code for calls to getClass on an object.
5394 bool LibraryCallKit::inline_native_getClass() {
5395 Node* obj = argument(0);
5396 if (obj->is_InlineType()) {
5397 const Type* t = _gvn.type(obj);
5398 if (t->maybe_null()) {
5399 null_check(obj);
5400 }
5401 set_result(makecon(TypeInstPtr::make(t->inline_klass()->java_mirror())));
5402 return true;
5403 }
5404 obj = null_check_receiver();
5405 if (stopped()) return true;
5406 set_result(load_mirror_from_klass(load_object_klass(obj)));
5407 return true;
5408 }
5409
5410 //-----------------inline_native_Reflection_getCallerClass---------------------
5411 // public static native Class<?> sun.reflect.Reflection.getCallerClass();
5412 //
5413 // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
5414 //
5415 // NOTE: This code must perform the same logic as JVM_GetCallerClass
5416 // in that it must skip particular security frames and checks for
5417 // caller sensitive methods.
5418 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
5419 #ifndef PRODUCT
5420 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
5421 tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
5422 }
5423 #endif
5424
5425 if (!jvms()->has_method()) {
5426 #ifndef PRODUCT
5427 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
5428 tty->print_cr(" Bailing out because intrinsic was inlined at top level");
5429 }
5430 #endif
5431 return false;
5432 }
5433
5434 // Walk back up the JVM state to find the caller at the required
5435 // depth.
5436 JVMState* caller_jvms = jvms();
5437
5438 // Cf. JVM_GetCallerClass
5439 // NOTE: Start the loop at depth 1 because the current JVM state does
5440 // not include the Reflection.getCallerClass() frame.
5441 for (int n = 1; caller_jvms != nullptr; caller_jvms = caller_jvms->caller(), n++) {
5442 ciMethod* m = caller_jvms->method();
5443 switch (n) {
5444 case 0:
5445 fatal("current JVM state does not include the Reflection.getCallerClass frame");
5446 break;
5447 case 1:
5448 // Frame 0 and 1 must be caller sensitive (see JVM_GetCallerClass).
5449 if (!m->caller_sensitive()) {
5450 #ifndef PRODUCT
5451 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
5452 tty->print_cr(" Bailing out: CallerSensitive annotation expected at frame %d", n);
5453 }
5454 #endif
5455 return false; // bail-out; let JVM_GetCallerClass do the work
5456 }
5457 break;
5458 default:
5459 if (!m->is_ignored_by_security_stack_walk()) {
5460 // We have reached the desired frame; return the holder class.
5461 // Acquire method holder as java.lang.Class and push as constant.
5462 ciInstanceKlass* caller_klass = caller_jvms->method()->holder();
5463 ciInstance* caller_mirror = caller_klass->java_mirror();
5464 set_result(makecon(TypeInstPtr::make(caller_mirror)));
5465
5466 #ifndef PRODUCT
5467 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
5468 tty->print_cr(" Succeeded: caller = %d) %s.%s, JVMS depth = %d", n, caller_klass->name()->as_utf8(), caller_jvms->method()->name()->as_utf8(), jvms()->depth());
5469 tty->print_cr(" JVM state at this point:");
5470 for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
5471 ciMethod* m = jvms()->of_depth(i)->method();
5472 tty->print_cr(" %d) %s.%s", n, m->holder()->name()->as_utf8(), m->name()->as_utf8());
5473 }
5474 }
5475 #endif
5476 return true;
5477 }
5478 break;
5479 }
5480 }
5481
5482 #ifndef PRODUCT
5483 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
5484 tty->print_cr(" Bailing out because caller depth exceeded inlining depth = %d", jvms()->depth());
5485 tty->print_cr(" JVM state at this point:");
5486 for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
5487 ciMethod* m = jvms()->of_depth(i)->method();
5488 tty->print_cr(" %d) %s.%s", n, m->holder()->name()->as_utf8(), m->name()->as_utf8());
5489 }
5490 }
5491 #endif
5492
5493 return false; // bail-out; let JVM_GetCallerClass do the work
5494 }
5495
5496 bool LibraryCallKit::inline_fp_conversions(vmIntrinsics::ID id) {
5497 Node* arg = argument(0);
5498 Node* result = nullptr;
5499
5500 switch (id) {
5501 case vmIntrinsics::_floatToRawIntBits: result = new MoveF2INode(arg); break;
5502 case vmIntrinsics::_intBitsToFloat: result = new MoveI2FNode(arg); break;
5503 case vmIntrinsics::_doubleToRawLongBits: result = new MoveD2LNode(arg); break;
5504 case vmIntrinsics::_longBitsToDouble: result = new MoveL2DNode(arg); break;
5505 case vmIntrinsics::_floatToFloat16: result = new ConvF2HFNode(arg); break;
5506 case vmIntrinsics::_float16ToFloat: result = new ConvHF2FNode(arg); break;
5507
5508 case vmIntrinsics::_doubleToLongBits: {
5509 // two paths (plus control) merge in a wood
5510 RegionNode *r = new RegionNode(3);
5511 Node *phi = new PhiNode(r, TypeLong::LONG);
5512
5513 Node *cmpisnan = _gvn.transform(new CmpDNode(arg, arg));
5514 // Build the boolean node
5515 Node *bolisnan = _gvn.transform(new BoolNode(cmpisnan, BoolTest::ne));
5516
5517 // Branch either way.
5518 // NaN case is less traveled, which makes all the difference.
5519 IfNode *ifisnan = create_and_xform_if(control(), bolisnan, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
5520 Node *opt_isnan = _gvn.transform(ifisnan);
5521 assert( opt_isnan->is_If(), "Expect an IfNode");
5522 IfNode *opt_ifisnan = (IfNode*)opt_isnan;
5523 Node *iftrue = _gvn.transform(new IfTrueNode(opt_ifisnan));
5524
5525 set_control(iftrue);
5526
5527 static const jlong nan_bits = CONST64(0x7ff8000000000000);
5528 Node *slow_result = longcon(nan_bits); // return NaN
5529 phi->init_req(1, _gvn.transform( slow_result ));
5530 r->init_req(1, iftrue);
5531
5532 // Else fall through
5533 Node *iffalse = _gvn.transform(new IfFalseNode(opt_ifisnan));
5534 set_control(iffalse);
5535
5536 phi->init_req(2, _gvn.transform(new MoveD2LNode(arg)));
5537 r->init_req(2, iffalse);
5538
5539 // Post merge
5540 set_control(_gvn.transform(r));
5541 record_for_igvn(r);
5542
5543 C->set_has_split_ifs(true); // Has chance for split-if optimization
5544 result = phi;
5545 assert(result->bottom_type()->isa_long(), "must be");
5546 break;
5547 }
5548
5549 case vmIntrinsics::_floatToIntBits: {
5550 // two paths (plus control) merge in a wood
5551 RegionNode *r = new RegionNode(3);
5552 Node *phi = new PhiNode(r, TypeInt::INT);
5553
5554 Node *cmpisnan = _gvn.transform(new CmpFNode(arg, arg));
5555 // Build the boolean node
5556 Node *bolisnan = _gvn.transform(new BoolNode(cmpisnan, BoolTest::ne));
5557
5558 // Branch either way.
5559 // NaN case is less traveled, which makes all the difference.
5560 IfNode *ifisnan = create_and_xform_if(control(), bolisnan, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
5561 Node *opt_isnan = _gvn.transform(ifisnan);
5562 assert( opt_isnan->is_If(), "Expect an IfNode");
5563 IfNode *opt_ifisnan = (IfNode*)opt_isnan;
5564 Node *iftrue = _gvn.transform(new IfTrueNode(opt_ifisnan));
5565
5566 set_control(iftrue);
5567
5568 static const jint nan_bits = 0x7fc00000;
5569 Node *slow_result = makecon(TypeInt::make(nan_bits)); // return NaN
5570 phi->init_req(1, _gvn.transform( slow_result ));
5571 r->init_req(1, iftrue);
5572
5573 // Else fall through
5574 Node *iffalse = _gvn.transform(new IfFalseNode(opt_ifisnan));
5575 set_control(iffalse);
5576
5577 phi->init_req(2, _gvn.transform(new MoveF2INode(arg)));
5578 r->init_req(2, iffalse);
5579
5580 // Post merge
5581 set_control(_gvn.transform(r));
5582 record_for_igvn(r);
5583
5584 C->set_has_split_ifs(true); // Has chance for split-if optimization
5585 result = phi;
5586 assert(result->bottom_type()->isa_int(), "must be");
5587 break;
5588 }
5589
5590 default:
5591 fatal_unexpected_iid(id);
5592 break;
5593 }
5594 set_result(_gvn.transform(result));
5595 return true;
5596 }
5597
5598 bool LibraryCallKit::inline_fp_range_check(vmIntrinsics::ID id) {
5599 Node* arg = argument(0);
5600 Node* result = nullptr;
5601
5602 switch (id) {
5603 case vmIntrinsics::_floatIsInfinite:
5604 result = new IsInfiniteFNode(arg);
5605 break;
5606 case vmIntrinsics::_floatIsFinite:
5607 result = new IsFiniteFNode(arg);
5608 break;
5609 case vmIntrinsics::_doubleIsInfinite:
5610 result = new IsInfiniteDNode(arg);
5611 break;
5612 case vmIntrinsics::_doubleIsFinite:
5613 result = new IsFiniteDNode(arg);
5614 break;
5615 default:
5616 fatal_unexpected_iid(id);
5617 break;
5618 }
5619 set_result(_gvn.transform(result));
5620 return true;
5621 }
5622
5623 //----------------------inline_unsafe_copyMemory-------------------------
5624 // public native void Unsafe.copyMemory0(Object srcBase, long srcOffset, Object destBase, long destOffset, long bytes);
5625
5626 static bool has_wide_mem(PhaseGVN& gvn, Node* addr, Node* base) {
5627 const TypeAryPtr* addr_t = gvn.type(addr)->isa_aryptr();
5628 const Type* base_t = gvn.type(base);
5629
5630 bool in_native = (base_t == TypePtr::NULL_PTR);
5631 bool in_heap = !TypePtr::NULL_PTR->higher_equal(base_t);
5632 bool is_mixed = !in_heap && !in_native;
5633
5634 if (is_mixed) {
5635 return true; // mixed accesses can touch both on-heap and off-heap memory
5636 }
5637 if (in_heap) {
5638 bool is_prim_array = (addr_t != nullptr) && (addr_t->elem() != Type::BOTTOM);
5639 if (!is_prim_array) {
5640 // Though Unsafe.copyMemory() ensures at runtime for on-heap accesses that base is a primitive array,
5641 // there's not enough type information available to determine proper memory slice for it.
5642 return true;
5643 }
5644 }
5645 return false;
5646 }
5647
5648 bool LibraryCallKit::inline_unsafe_copyMemory() {
5649 if (callee()->is_static()) return false; // caller must have the capability!
5650 null_check_receiver(); // null-check receiver
5651 if (stopped()) return true;
5652
5653 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
5654
5655 Node* src_base = argument(1); // type: oop
5656 Node* src_off = ConvL2X(argument(2)); // type: long
5657 Node* dst_base = argument(4); // type: oop
5658 Node* dst_off = ConvL2X(argument(5)); // type: long
5659 Node* size = ConvL2X(argument(7)); // type: long
5660
5661 assert(Unsafe_field_offset_to_byte_offset(11) == 11,
5662 "fieldOffset must be byte-scaled");
5663
5664 Node* src_addr = make_unsafe_address(src_base, src_off);
5665 Node* dst_addr = make_unsafe_address(dst_base, dst_off);
5666
5667 Node* thread = _gvn.transform(new ThreadLocalNode());
5668 Node* doing_unsafe_access_addr = off_heap_plus_addr(thread, in_bytes(JavaThread::doing_unsafe_access_offset()));
5669 BasicType doing_unsafe_access_bt = T_BYTE;
5670 assert((sizeof(bool) * CHAR_BIT) == 8, "not implemented");
5671
5672 // update volatile field
5673 store_to_memory(control(), doing_unsafe_access_addr, intcon(1), doing_unsafe_access_bt, MemNode::unordered);
5674
5675 int flags = RC_LEAF | RC_NO_FP;
5676
5677 const TypePtr* dst_type = TypePtr::BOTTOM;
5678
5679 // Adjust memory effects of the runtime call based on input values.
5680 if (!has_wide_mem(_gvn, src_addr, src_base) &&
5681 !has_wide_mem(_gvn, dst_addr, dst_base)) {
5682 dst_type = _gvn.type(dst_addr)->is_ptr(); // narrow out memory
5683
5684 const TypePtr* src_type = _gvn.type(src_addr)->is_ptr();
5685 if (C->get_alias_index(src_type) == C->get_alias_index(dst_type)) {
5686 flags |= RC_NARROW_MEM; // narrow in memory
5687 }
5688 }
5689
5690 // Call it. Note that the length argument is not scaled.
5691 make_runtime_call(flags,
5692 OptoRuntime::fast_arraycopy_Type(),
5693 StubRoutines::unsafe_arraycopy(),
5694 "unsafe_arraycopy",
5695 dst_type,
5696 src_addr, dst_addr, size XTOP);
5697
5698 store_to_memory(control(), doing_unsafe_access_addr, intcon(0), doing_unsafe_access_bt, MemNode::unordered);
5699
5700 return true;
5701 }
5702
5703 // unsafe_setmemory(void *base, ulong offset, size_t length, char fill_value);
5704 // Fill 'length' bytes starting from 'base[offset]' with 'fill_value'
5705 bool LibraryCallKit::inline_unsafe_setMemory() {
5706 if (callee()->is_static()) return false; // caller must have the capability!
5707 null_check_receiver(); // null-check receiver
5708 if (stopped()) return true;
5709
5710 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
5711
5712 Node* dst_base = argument(1); // type: oop
5713 Node* dst_off = ConvL2X(argument(2)); // type: long
5714 Node* size = ConvL2X(argument(4)); // type: long
5715 Node* byte = argument(6); // type: byte
5716
5717 assert(Unsafe_field_offset_to_byte_offset(11) == 11,
5718 "fieldOffset must be byte-scaled");
5719
5720 Node* dst_addr = make_unsafe_address(dst_base, dst_off);
5721
5722 Node* thread = _gvn.transform(new ThreadLocalNode());
5723 Node* doing_unsafe_access_addr = off_heap_plus_addr(thread, in_bytes(JavaThread::doing_unsafe_access_offset()));
5724 BasicType doing_unsafe_access_bt = T_BYTE;
5725 assert((sizeof(bool) * CHAR_BIT) == 8, "not implemented");
5726
5727 // update volatile field
5728 store_to_memory(control(), doing_unsafe_access_addr, intcon(1), doing_unsafe_access_bt, MemNode::unordered);
5729
5730 int flags = RC_LEAF | RC_NO_FP;
5731
5732 const TypePtr* dst_type = TypePtr::BOTTOM;
5733
5734 // Adjust memory effects of the runtime call based on input values.
5735 if (!has_wide_mem(_gvn, dst_addr, dst_base)) {
5736 dst_type = _gvn.type(dst_addr)->is_ptr(); // narrow out memory
5737
5738 flags |= RC_NARROW_MEM; // narrow in memory
5739 }
5740
5741 // Call it. Note that the length argument is not scaled.
5742 make_runtime_call(flags,
5743 OptoRuntime::unsafe_setmemory_Type(),
5744 StubRoutines::unsafe_setmemory(),
5745 "unsafe_setmemory",
5746 dst_type,
5747 dst_addr, size XTOP, byte);
5748
5749 store_to_memory(control(), doing_unsafe_access_addr, intcon(0), doing_unsafe_access_bt, MemNode::unordered);
5750
5751 return true;
5752 }
5753
5754 #undef XTOP
5755
5756 //------------------------clone_coping-----------------------------------
5757 // Helper function for inline_native_clone.
5758 void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array) {
5759 assert(obj_size != nullptr, "");
5760 Node* raw_obj = alloc_obj->in(1);
5761 assert(alloc_obj->is_CheckCastPP() && raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
5762
5763 AllocateNode* alloc = nullptr;
5764 if (ReduceBulkZeroing &&
5765 // If we are implementing an array clone without knowing its source type
5766 // (can happen when compiling the array-guarded branch of a reflective
5767 // Object.clone() invocation), initialize the array within the allocation.
5768 // This is needed because some GCs (e.g. ZGC) might fall back in this case
5769 // to a runtime clone call that assumes fully initialized source arrays.
5770 (!is_array || obj->get_ptr_type()->isa_aryptr() != nullptr)) {
5771 // We will be completely responsible for initializing this object -
5772 // mark Initialize node as complete.
5773 alloc = AllocateNode::Ideal_allocation(alloc_obj);
5774 // The object was just allocated - there should be no any stores!
5775 guarantee(alloc != nullptr && alloc->maybe_set_complete(&_gvn), "");
5776 // Mark as complete_with_arraycopy so that on AllocateNode
5777 // expansion, we know this AllocateNode is initialized by an array
5778 // copy and a StoreStore barrier exists after the array copy.
5779 alloc->initialization()->set_complete_with_arraycopy();
5780 }
5781
5782 Node* size = _gvn.transform(obj_size);
5783 access_clone(obj, alloc_obj, size, is_array);
5784
5785 // Do not let reads from the cloned object float above the arraycopy.
5786 if (alloc != nullptr) {
5787 // Do not let stores that initialize this object be reordered with
5788 // a subsequent store that would make this object accessible by
5789 // other threads.
5790 // Record what AllocateNode this StoreStore protects so that
5791 // escape analysis can go from the MemBarStoreStoreNode to the
5792 // AllocateNode and eliminate the MemBarStoreStoreNode if possible
5793 // based on the escape status of the AllocateNode.
5794 insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
5795 } else {
5796 insert_mem_bar(Op_MemBarCPUOrder);
5797 }
5798 }
5799
5800 //------------------------inline_native_clone----------------------------
5801 // protected native Object java.lang.Object.clone();
5802 //
5803 // Here are the simple edge cases:
5804 // null receiver => normal trap
5805 // virtual and clone was overridden => slow path to out-of-line clone
5806 // not cloneable or finalizer => slow path to out-of-line Object.clone
5807 //
5808 // The general case has two steps, allocation and copying.
5809 // Allocation has two cases, and uses GraphKit::new_instance or new_array.
5810 //
5811 // Copying also has two cases, oop arrays and everything else.
5812 // Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
5813 // Everything else uses the tight inline loop supplied by CopyArrayNode.
5814 //
5815 // These steps fold up nicely if and when the cloned object's klass
5816 // can be sharply typed as an object array, a type array, or an instance.
5817 //
5818 bool LibraryCallKit::inline_native_clone(bool is_virtual) {
5819 PhiNode* result_val;
5820
5821 // Set the reexecute bit for the interpreter to reexecute
5822 // the bytecode that invokes Object.clone if deoptimization happens.
5823 { PreserveReexecuteState preexecs(this);
5824 jvms()->set_should_reexecute(true);
5825
5826 Node* obj = argument(0);
5827 obj = null_check_receiver();
5828 if (stopped()) return true;
5829
5830 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
5831 if (obj_type->is_inlinetypeptr()) {
5832 // If the object to clone is an inline type, we can simply return it (i.e. a nop) since inline types have
5833 // no identity.
5834 set_result(obj);
5835 return true;
5836 }
5837
5838 // If we are going to clone an instance, we need its exact type to
5839 // know the number and types of fields to convert the clone to
5840 // loads/stores. Maybe a speculative type can help us.
5841 if (!obj_type->klass_is_exact() &&
5842 obj_type->speculative_type() != nullptr &&
5843 obj_type->speculative_type()->is_instance_klass() &&
5844 !obj_type->speculative_type()->is_inlinetype()) {
5845 ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass();
5846 if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem &&
5847 !spec_ik->has_injected_fields()) {
5848 if (!obj_type->isa_instptr() ||
5849 obj_type->is_instptr()->instance_klass()->has_subklass()) {
5850 obj = maybe_cast_profiled_obj(obj, obj_type->speculative_type(), false);
5851 }
5852 }
5853 }
5854
5855 // Conservatively insert a memory barrier on all memory slices.
5856 // Do not let writes into the original float below the clone.
5857 insert_mem_bar(Op_MemBarCPUOrder);
5858
5859 // paths into result_reg:
5860 enum {
5861 _slow_path = 1, // out-of-line call to clone method (virtual or not)
5862 _objArray_path, // plain array allocation, plus arrayof_oop_arraycopy
5863 _array_path, // plain array allocation, plus arrayof_long_arraycopy
5864 _instance_path, // plain instance allocation, plus arrayof_long_arraycopy
5865 PATH_LIMIT
5866 };
5867 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
5868 result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
5869 PhiNode* result_i_o = new PhiNode(result_reg, Type::ABIO);
5870 PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
5871 record_for_igvn(result_reg);
5872
5873 Node* obj_klass = load_object_klass(obj);
5874 // We only go to the fast case code if we pass a number of guards.
5875 // The paths which do not pass are accumulated in the slow_region.
5876 RegionNode* slow_region = new RegionNode(1);
5877 record_for_igvn(slow_region);
5878
5879 Node* array_obj = obj;
5880 Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)nullptr, &array_obj);
5881 if (array_ctl != nullptr) {
5882 // It's an array.
5883 PreserveJVMState pjvms(this);
5884 set_control(array_ctl);
5885
5886 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
5887 const TypeAryPtr* ary_ptr = obj_type->isa_aryptr();
5888 if (UseArrayFlattening && bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Expansion) &&
5889 obj_type->can_be_inline_array() &&
5890 (ary_ptr == nullptr || (!ary_ptr->is_not_flat() && (!ary_ptr->is_flat() || ary_ptr->elem()->inline_klass()->contains_oops())))) {
5891 // Flat inline type array may have object field that would require a
5892 // write barrier. Conservatively, go to slow path.
5893 generate_fair_guard(flat_array_test(obj_klass), slow_region);
5894 }
5895
5896 if (!stopped()) {
5897 Node* obj_length = load_array_length(array_obj);
5898 Node* array_size = nullptr; // Size of the array without object alignment padding.
5899 Node* alloc_obj = new_array(obj_klass, obj_length, 0, &array_size, /*deoptimize_on_exception=*/true);
5900
5901 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
5902 if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Parsing)) {
5903 // If it is an oop array, it requires very special treatment,
5904 // because gc barriers are required when accessing the array.
5905 Node* is_obja = generate_refArray_guard(obj_klass, (RegionNode*)nullptr);
5906 if (is_obja != nullptr) {
5907 PreserveJVMState pjvms2(this);
5908 set_control(is_obja);
5909 // Generate a direct call to the right arraycopy function(s).
5910 // Clones are always tightly coupled.
5911 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, array_obj, intcon(0), alloc_obj, intcon(0), obj_length, true, false);
5912 ac->set_clone_oop_array();
5913 Node* n = _gvn.transform(ac);
5914 assert(n == ac, "cannot disappear");
5915 ac->connect_outputs(this, /*deoptimize_on_exception=*/true);
5916
5917 result_reg->init_req(_objArray_path, control());
5918 result_val->init_req(_objArray_path, alloc_obj);
5919 result_i_o ->set_req(_objArray_path, i_o());
5920 result_mem ->set_req(_objArray_path, reset_memory());
5921 }
5922 }
5923 // Otherwise, there are no barriers to worry about.
5924 // (We can dispense with card marks if we know the allocation
5925 // comes out of eden (TLAB)... In fact, ReduceInitialCardMarks
5926 // causes the non-eden paths to take compensating steps to
5927 // simulate a fresh allocation, so that no further
5928 // card marks are required in compiled code to initialize
5929 // the object.)
5930
5931 if (!stopped()) {
5932 copy_to_clone(obj, alloc_obj, array_size, true);
5933
5934 // Present the results of the copy.
5935 result_reg->init_req(_array_path, control());
5936 result_val->init_req(_array_path, alloc_obj);
5937 result_i_o ->set_req(_array_path, i_o());
5938 result_mem ->set_req(_array_path, reset_memory());
5939 }
5940 }
5941 }
5942
5943 if (!stopped()) {
5944 // It's an instance (we did array above). Make the slow-path tests.
5945 // If this is a virtual call, we generate a funny guard. We grab
5946 // the vtable entry corresponding to clone() from the target object.
5947 // If the target method which we are calling happens to be the
5948 // Object clone() method, we pass the guard. We do not need this
5949 // guard for non-virtual calls; the caller is known to be the native
5950 // Object clone().
5951 if (is_virtual) {
5952 generate_virtual_guard(obj_klass, slow_region);
5953 }
5954
5955 // The object must be easily cloneable and must not have a finalizer.
5956 // Both of these conditions may be checked in a single test.
5957 // We could optimize the test further, but we don't care.
5958 generate_misc_flags_guard(obj_klass,
5959 // Test both conditions:
5960 KlassFlags::_misc_is_cloneable_fast | KlassFlags::_misc_has_finalizer,
5961 // Must be cloneable but not finalizer:
5962 KlassFlags::_misc_is_cloneable_fast,
5963 slow_region);
5964 }
5965
5966 if (!stopped()) {
5967 // It's an instance, and it passed the slow-path tests.
5968 PreserveJVMState pjvms(this);
5969 Node* obj_size = nullptr; // Total object size, including object alignment padding.
5970 // Need to deoptimize on exception from allocation since Object.clone intrinsic
5971 // is reexecuted if deoptimization occurs and there could be problems when merging
5972 // exception state between multiple Object.clone versions (reexecute=true vs reexecute=false).
5973 Node* alloc_obj = new_instance(obj_klass, nullptr, &obj_size, /*deoptimize_on_exception=*/true);
5974
5975 copy_to_clone(obj, alloc_obj, obj_size, false);
5976
5977 // Present the results of the slow call.
5978 result_reg->init_req(_instance_path, control());
5979 result_val->init_req(_instance_path, alloc_obj);
5980 result_i_o ->set_req(_instance_path, i_o());
5981 result_mem ->set_req(_instance_path, reset_memory());
5982 }
5983
5984 // Generate code for the slow case. We make a call to clone().
5985 set_control(_gvn.transform(slow_region));
5986 if (!stopped()) {
5987 PreserveJVMState pjvms(this);
5988 CallJavaNode* slow_call = generate_method_call(vmIntrinsics::_clone, is_virtual, false, true);
5989 // We need to deoptimize on exception (see comment above)
5990 Node* slow_result = set_results_for_java_call(slow_call, false, /* deoptimize */ true);
5991 // this->control() comes from set_results_for_java_call
5992 result_reg->init_req(_slow_path, control());
5993 result_val->init_req(_slow_path, slow_result);
5994 result_i_o ->set_req(_slow_path, i_o());
5995 result_mem ->set_req(_slow_path, reset_memory());
5996 }
5997
5998 // Return the combined state.
5999 set_control( _gvn.transform(result_reg));
6000 set_i_o( _gvn.transform(result_i_o));
6001 set_all_memory( _gvn.transform(result_mem));
6002 } // original reexecute is set back here
6003
6004 set_result(_gvn.transform(result_val));
6005 return true;
6006 }
6007
6008 // If we have a tightly coupled allocation, the arraycopy may take care
6009 // of the array initialization. If one of the guards we insert between
6010 // the allocation and the arraycopy causes a deoptimization, an
6011 // uninitialized array will escape the compiled method. To prevent that
6012 // we set the JVM state for uncommon traps between the allocation and
6013 // the arraycopy to the state before the allocation so, in case of
6014 // deoptimization, we'll reexecute the allocation and the
6015 // initialization.
6016 JVMState* LibraryCallKit::arraycopy_restore_alloc_state(AllocateArrayNode* alloc, int& saved_reexecute_sp) {
6017 if (alloc != nullptr) {
6018 ciMethod* trap_method = alloc->jvms()->method();
6019 int trap_bci = alloc->jvms()->bci();
6020
6021 if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
6022 !C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_null_check)) {
6023 // Make sure there's no store between the allocation and the
6024 // arraycopy otherwise visible side effects could be rexecuted
6025 // in case of deoptimization and cause incorrect execution.
6026 bool no_interfering_store = true;
6027 Node* mem = alloc->in(TypeFunc::Memory);
6028 if (mem->is_MergeMem()) {
6029 for (MergeMemStream mms(merged_memory(), mem->as_MergeMem()); mms.next_non_empty2(); ) {
6030 Node* n = mms.memory();
6031 if (n != mms.memory2() && !(n->is_Proj() && n->in(0) == alloc->initialization())) {
6032 assert(n->is_Store(), "what else?");
6033 no_interfering_store = false;
6034 break;
6035 }
6036 }
6037 } else {
6038 for (MergeMemStream mms(merged_memory()); mms.next_non_empty(); ) {
6039 Node* n = mms.memory();
6040 if (n != mem && !(n->is_Proj() && n->in(0) == alloc->initialization())) {
6041 assert(n->is_Store(), "what else?");
6042 no_interfering_store = false;
6043 break;
6044 }
6045 }
6046 }
6047
6048 if (no_interfering_store) {
6049 SafePointNode* sfpt = create_safepoint_with_state_before_array_allocation(alloc);
6050
6051 JVMState* saved_jvms = jvms();
6052 saved_reexecute_sp = _reexecute_sp;
6053
6054 set_jvms(sfpt->jvms());
6055 _reexecute_sp = jvms()->sp();
6056
6057 return saved_jvms;
6058 }
6059 }
6060 }
6061 return nullptr;
6062 }
6063
6064 // Clone the JVMState of the array allocation and create a new safepoint with it. Re-push the array length to the stack
6065 // such that uncommon traps can be emitted to re-execute the array allocation in the interpreter.
6066 SafePointNode* LibraryCallKit::create_safepoint_with_state_before_array_allocation(const AllocateArrayNode* alloc) const {
6067 JVMState* old_jvms = alloc->jvms()->clone_shallow(C);
6068 uint size = alloc->req();
6069 SafePointNode* sfpt = new SafePointNode(size, old_jvms);
6070 old_jvms->set_map(sfpt);
6071 for (uint i = 0; i < size; i++) {
6072 sfpt->init_req(i, alloc->in(i));
6073 }
6074 int adjustment = 1;
6075 const TypeAryKlassPtr* ary_klass_ptr = alloc->in(AllocateNode::KlassNode)->bottom_type()->is_aryklassptr();
6076 if (ary_klass_ptr->is_null_free()) {
6077 // A null-free, tightly coupled array allocation can only come from LibraryCallKit::inline_newArray which
6078 // also requires the componentType and initVal on stack for re-execution.
6079 // Re-create and push the componentType.
6080 ciArrayKlass* klass = ary_klass_ptr->exact_klass()->as_array_klass();
6081 ciInstance* instance = klass->component_mirror_instance();
6082 const TypeInstPtr* t_instance = TypeInstPtr::make(instance);
6083 sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp(), makecon(t_instance));
6084 adjustment++;
6085 }
6086 // re-push array length for deoptimization
6087 sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp() + adjustment - 1, alloc->in(AllocateNode::ALength));
6088 if (ary_klass_ptr->is_null_free()) {
6089 // Re-create and push the initVal.
6090 Node* init_val = alloc->in(AllocateNode::InitValue);
6091 if (init_val == nullptr) {
6092 init_val = InlineTypeNode::make_all_zero(_gvn, ary_klass_ptr->elem()->is_instklassptr()->instance_klass()->as_inline_klass());
6093 } else if (UseCompressedOops) {
6094 init_val = _gvn.transform(new DecodeNNode(init_val, init_val->bottom_type()->make_ptr()));
6095 }
6096 sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp() + adjustment, init_val);
6097 adjustment++;
6098 }
6099 old_jvms->set_sp(old_jvms->sp() + adjustment);
6100 old_jvms->set_monoff(old_jvms->monoff() + adjustment);
6101 old_jvms->set_scloff(old_jvms->scloff() + adjustment);
6102 old_jvms->set_endoff(old_jvms->endoff() + adjustment);
6103 old_jvms->set_should_reexecute(true);
6104
6105 sfpt->set_i_o(map()->i_o());
6106 sfpt->set_memory(map()->memory());
6107 sfpt->set_control(map()->control());
6108 return sfpt;
6109 }
6110
6111 // In case of a deoptimization, we restart execution at the
6112 // allocation, allocating a new array. We would leave an uninitialized
6113 // array in the heap that GCs wouldn't expect. Move the allocation
6114 // after the traps so we don't allocate the array if we
6115 // deoptimize. This is possible because tightly_coupled_allocation()
6116 // guarantees there's no observer of the allocated array at this point
6117 // and the control flow is simple enough.
6118 void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms_before_guards,
6119 int saved_reexecute_sp, uint new_idx) {
6120 if (saved_jvms_before_guards != nullptr && !stopped()) {
6121 replace_unrelated_uncommon_traps_with_alloc_state(alloc, saved_jvms_before_guards);
6122
6123 assert(alloc != nullptr, "only with a tightly coupled allocation");
6124 // restore JVM state to the state at the arraycopy
6125 saved_jvms_before_guards->map()->set_control(map()->control());
6126 assert(saved_jvms_before_guards->map()->memory() == map()->memory(), "memory state changed?");
6127 assert(saved_jvms_before_guards->map()->i_o() == map()->i_o(), "IO state changed?");
6128 // If we've improved the types of some nodes (null check) while
6129 // emitting the guards, propagate them to the current state
6130 map()->replaced_nodes().apply(saved_jvms_before_guards->map(), new_idx);
6131 set_jvms(saved_jvms_before_guards);
6132 _reexecute_sp = saved_reexecute_sp;
6133
6134 // Remove the allocation from above the guards
6135 CallProjections* callprojs = alloc->extract_projections(true);
6136 InitializeNode* init = alloc->initialization();
6137 Node* alloc_mem = alloc->in(TypeFunc::Memory);
6138 C->gvn_replace_by(callprojs->fallthrough_ioproj, alloc->in(TypeFunc::I_O));
6139 init->replace_mem_projs_by(alloc_mem, C);
6140
6141 // The CastIINode created in GraphKit::new_array (in AllocateArrayNode::make_ideal_length) must stay below
6142 // the allocation (i.e. is only valid if the allocation succeeds):
6143 // 1) replace CastIINode with AllocateArrayNode's length here
6144 // 2) Create CastIINode again once allocation has moved (see below) at the end of this method
6145 //
6146 // Multiple identical CastIINodes might exist here. Each GraphKit::load_array_length() call will generate
6147 // new separate CastIINode (arraycopy guard checks or any array length use between array allocation and ararycopy)
6148 Node* init_control = init->proj_out(TypeFunc::Control);
6149 Node* alloc_length = alloc->Ideal_length();
6150 #ifdef ASSERT
6151 Node* prev_cast = nullptr;
6152 #endif
6153 for (uint i = 0; i < init_control->outcnt(); i++) {
6154 Node* init_out = init_control->raw_out(i);
6155 if (init_out->is_CastII() && init_out->in(TypeFunc::Control) == init_control && init_out->in(1) == alloc_length) {
6156 #ifdef ASSERT
6157 if (prev_cast == nullptr) {
6158 prev_cast = init_out;
6159 } else {
6160 if (prev_cast->cmp(*init_out) == false) {
6161 prev_cast->dump();
6162 init_out->dump();
6163 assert(false, "not equal CastIINode");
6164 }
6165 }
6166 #endif
6167 C->gvn_replace_by(init_out, alloc_length);
6168 }
6169 }
6170 C->gvn_replace_by(init->proj_out(TypeFunc::Control), alloc->in(0));
6171
6172 // move the allocation here (after the guards)
6173 _gvn.hash_delete(alloc);
6174 alloc->set_req(TypeFunc::Control, control());
6175 alloc->set_req(TypeFunc::I_O, i_o());
6176 Node *mem = reset_memory();
6177 set_all_memory(mem);
6178 alloc->set_req(TypeFunc::Memory, mem);
6179 set_control(init->proj_out_or_null(TypeFunc::Control));
6180 set_i_o(callprojs->fallthrough_ioproj);
6181
6182 // Update memory as done in GraphKit::set_output_for_allocation()
6183 const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
6184 const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
6185 if (ary_type->isa_aryptr() && length_type != nullptr) {
6186 ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
6187 }
6188 const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot);
6189 int elemidx = C->get_alias_index(telemref);
6190 // Need to properly move every memory projection for the Initialize
6191 #ifdef ASSERT
6192 int mark_idx = C->get_alias_index(ary_type->add_offset(oopDesc::mark_offset_in_bytes()));
6193 int klass_idx = C->get_alias_index(ary_type->add_offset(oopDesc::klass_offset_in_bytes()));
6194 #endif
6195 auto move_proj = [&](ProjNode* proj) {
6196 int alias_idx = C->get_alias_index(proj->adr_type());
6197 assert(alias_idx == Compile::AliasIdxRaw ||
6198 alias_idx == elemidx ||
6199 alias_idx == mark_idx ||
6200 alias_idx == klass_idx, "should be raw memory or array element type");
6201 set_memory(proj, alias_idx);
6202 };
6203 init->for_each_proj(move_proj, TypeFunc::Memory);
6204
6205 Node* allocx = _gvn.transform(alloc);
6206 assert(allocx == alloc, "where has the allocation gone?");
6207 assert(dest->is_CheckCastPP(), "not an allocation result?");
6208
6209 _gvn.hash_delete(dest);
6210 dest->set_req(0, control());
6211 Node* destx = _gvn.transform(dest);
6212 assert(destx == dest, "where has the allocation result gone?");
6213
6214 array_ideal_length(alloc, ary_type, true);
6215 }
6216 }
6217
6218 // Unrelated UCTs between the array allocation and the array copy, which are considered safe by tightly_coupled_allocation(),
6219 // need to be replaced by an UCT with a state before the array allocation (including the array length). This is necessary
6220 // because we could hit one of these UCTs (which are executed before the emitted array copy guards and the actual array
6221 // allocation which is moved down in arraycopy_move_allocation_here()). When later resuming execution in the interpreter,
6222 // we would have wrongly skipped the array allocation. To prevent this, we resume execution at the array allocation in
6223 // the interpreter similar to what we are doing for the newly emitted guards for the array copy.
6224 void LibraryCallKit::replace_unrelated_uncommon_traps_with_alloc_state(AllocateArrayNode* alloc,
6225 JVMState* saved_jvms_before_guards) {
6226 if (saved_jvms_before_guards->map()->control()->is_IfProj()) {
6227 // There is at least one unrelated uncommon trap which needs to be replaced.
6228 SafePointNode* sfpt = create_safepoint_with_state_before_array_allocation(alloc);
6229
6230 JVMState* saved_jvms = jvms();
6231 const int saved_reexecute_sp = _reexecute_sp;
6232 set_jvms(sfpt->jvms());
6233 _reexecute_sp = jvms()->sp();
6234
6235 replace_unrelated_uncommon_traps_with_alloc_state(saved_jvms_before_guards);
6236
6237 // Restore state
6238 set_jvms(saved_jvms);
6239 _reexecute_sp = saved_reexecute_sp;
6240 }
6241 }
6242
6243 // Replace the unrelated uncommon traps with new uncommon trap nodes by reusing the action and reason. The new uncommon
6244 // traps will have the state of the array allocation. Let the old uncommon trap nodes die.
6245 void LibraryCallKit::replace_unrelated_uncommon_traps_with_alloc_state(JVMState* saved_jvms_before_guards) {
6246 Node* if_proj = saved_jvms_before_guards->map()->control(); // Start the search right before the newly emitted guards
6247 while (if_proj->is_IfProj()) {
6248 CallStaticJavaNode* uncommon_trap = get_uncommon_trap_from_success_proj(if_proj);
6249 if (uncommon_trap != nullptr) {
6250 create_new_uncommon_trap(uncommon_trap);
6251 }
6252 assert(if_proj->in(0)->is_If(), "must be If");
6253 if_proj = if_proj->in(0)->in(0);
6254 }
6255 assert(if_proj->is_Proj() && if_proj->in(0)->is_Initialize(),
6256 "must have reached control projection of init node");
6257 }
6258
6259 void LibraryCallKit::create_new_uncommon_trap(CallStaticJavaNode* uncommon_trap_call) {
6260 const int trap_request = uncommon_trap_call->uncommon_trap_request();
6261 assert(trap_request != 0, "no valid UCT trap request");
6262 PreserveJVMState pjvms(this);
6263 set_control(uncommon_trap_call->in(0));
6264 uncommon_trap(Deoptimization::trap_request_reason(trap_request),
6265 Deoptimization::trap_request_action(trap_request));
6266 assert(stopped(), "Should be stopped");
6267 _gvn.hash_delete(uncommon_trap_call);
6268 uncommon_trap_call->set_req(0, top()); // not used anymore, kill it
6269 }
6270
6271 // Common checks for array sorting intrinsics arguments.
6272 // Returns `true` if checks passed.
6273 bool LibraryCallKit::check_array_sort_arguments(Node* elementType, Node* obj, BasicType& bt) {
6274 // check address of the class
6275 if (elementType == nullptr || elementType->is_top()) {
6276 return false; // dead path
6277 }
6278 const TypeInstPtr* elem_klass = gvn().type(elementType)->isa_instptr();
6279 if (elem_klass == nullptr) {
6280 return false; // dead path
6281 }
6282 // java_mirror_type() returns non-null for compile-time Class constants only
6283 ciType* elem_type = elem_klass->java_mirror_type();
6284 if (elem_type == nullptr) {
6285 return false;
6286 }
6287 bt = elem_type->basic_type();
6288 // Disable the intrinsic if the CPU does not support SIMD sort
6289 if (!Matcher::supports_simd_sort(bt)) {
6290 return false;
6291 }
6292 // check address of the array
6293 if (obj == nullptr || obj->is_top()) {
6294 return false; // dead path
6295 }
6296 const TypeAryPtr* obj_t = _gvn.type(obj)->isa_aryptr();
6297 if (obj_t == nullptr || obj_t->elem() == Type::BOTTOM) {
6298 return false; // failed input validation
6299 }
6300 return true;
6301 }
6302
6303 //------------------------------inline_array_partition-----------------------
6304 bool LibraryCallKit::inline_array_partition() {
6305 address stubAddr = StubRoutines::select_array_partition_function();
6306 if (stubAddr == nullptr) {
6307 return false; // Intrinsic's stub is not implemented on this platform
6308 }
6309 assert(callee()->signature()->size() == 9, "arrayPartition has 8 parameters (one long)");
6310
6311 // no receiver because it is a static method
6312 Node* elementType = argument(0);
6313 Node* obj = argument(1);
6314 Node* offset = argument(2); // long
6315 Node* fromIndex = argument(4);
6316 Node* toIndex = argument(5);
6317 Node* indexPivot1 = argument(6);
6318 Node* indexPivot2 = argument(7);
6319 // PartitionOperation: argument(8) is ignored
6320
6321 Node* pivotIndices = nullptr;
6322 BasicType bt = T_ILLEGAL;
6323
6324 if (!check_array_sort_arguments(elementType, obj, bt)) {
6325 return false;
6326 }
6327 null_check(obj);
6328 // If obj is dead, only null-path is taken.
6329 if (stopped()) {
6330 return true;
6331 }
6332 // Set the original stack and the reexecute bit for the interpreter to reexecute
6333 // the bytecode that invokes DualPivotQuicksort.partition() if deoptimization happens.
6334 { PreserveReexecuteState preexecs(this);
6335 jvms()->set_should_reexecute(true);
6336
6337 Node* obj_adr = make_unsafe_address(obj, offset);
6338
6339 // create the pivotIndices array of type int and size = 2
6340 Node* size = intcon(2);
6341 Node* klass_node = makecon(TypeKlassPtr::make(ciTypeArrayKlass::make(T_INT)));
6342 pivotIndices = new_array(klass_node, size, 0); // no arguments to push
6343 AllocateArrayNode* alloc = tightly_coupled_allocation(pivotIndices);
6344 guarantee(alloc != nullptr, "created above");
6345 Node* pivotIndices_adr = basic_plus_adr(pivotIndices, arrayOopDesc::base_offset_in_bytes(T_INT));
6346
6347 // pass the basic type enum to the stub
6348 Node* elemType = intcon(bt);
6349
6350 // Call the stub
6351 const char *stubName = "array_partition_stub";
6352 make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::array_partition_Type(),
6353 stubAddr, stubName, TypePtr::BOTTOM,
6354 obj_adr, elemType, fromIndex, toIndex, pivotIndices_adr,
6355 indexPivot1, indexPivot2);
6356
6357 } // original reexecute is set back here
6358
6359 if (!stopped()) {
6360 set_result(pivotIndices);
6361 }
6362
6363 return true;
6364 }
6365
6366
6367 //------------------------------inline_array_sort-----------------------
6368 bool LibraryCallKit::inline_array_sort() {
6369 address stubAddr = StubRoutines::select_arraysort_function();
6370 if (stubAddr == nullptr) {
6371 return false; // Intrinsic's stub is not implemented on this platform
6372 }
6373 assert(callee()->signature()->size() == 7, "arraySort has 6 parameters (one long)");
6374
6375 // no receiver because it is a static method
6376 Node* elementType = argument(0);
6377 Node* obj = argument(1);
6378 Node* offset = argument(2); // long
6379 Node* fromIndex = argument(4);
6380 Node* toIndex = argument(5);
6381 // SortOperation: argument(6) is ignored
6382
6383 BasicType bt = T_ILLEGAL;
6384
6385 if (!check_array_sort_arguments(elementType, obj, bt)) {
6386 return false;
6387 }
6388 null_check(obj);
6389 // If obj is dead, only null-path is taken.
6390 if (stopped()) {
6391 return true;
6392 }
6393 Node* obj_adr = make_unsafe_address(obj, offset);
6394
6395 // pass the basic type enum to the stub
6396 Node* elemType = intcon(bt);
6397
6398 // Call the stub.
6399 const char *stubName = "arraysort_stub";
6400 make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::array_sort_Type(),
6401 stubAddr, stubName, TypePtr::BOTTOM,
6402 obj_adr, elemType, fromIndex, toIndex);
6403
6404 return true;
6405 }
6406
6407
6408 //------------------------------inline_arraycopy-----------------------
6409 // public static native void java.lang.System.arraycopy(Object src, int srcPos,
6410 // Object dest, int destPos,
6411 // int length);
6412 bool LibraryCallKit::inline_arraycopy() {
6413 // Get the arguments.
6414 Node* src = argument(0); // type: oop
6415 Node* src_offset = argument(1); // type: int
6416 Node* dest = argument(2); // type: oop
6417 Node* dest_offset = argument(3); // type: int
6418 Node* length = argument(4); // type: int
6419
6420 uint new_idx = C->unique();
6421
6422 // Check for allocation before we add nodes that would confuse
6423 // tightly_coupled_allocation()
6424 AllocateArrayNode* alloc = tightly_coupled_allocation(dest);
6425
6426 int saved_reexecute_sp = -1;
6427 JVMState* saved_jvms_before_guards = arraycopy_restore_alloc_state(alloc, saved_reexecute_sp);
6428 // See arraycopy_restore_alloc_state() comment
6429 // if alloc == null we don't have to worry about a tightly coupled allocation so we can emit all needed guards
6430 // if saved_jvms_before_guards is not null (then alloc is not null) then we can handle guards and a tightly coupled allocation
6431 // if saved_jvms_before_guards is null and alloc is not null, we can't emit any guards
6432 bool can_emit_guards = (alloc == nullptr || saved_jvms_before_guards != nullptr);
6433
6434 // The following tests must be performed
6435 // (1) src and dest are arrays.
6436 // (2) src and dest arrays must have elements of the same BasicType
6437 // (3) src and dest must not be null.
6438 // (4) src_offset must not be negative.
6439 // (5) dest_offset must not be negative.
6440 // (6) length must not be negative.
6441 // (7) src_offset + length must not exceed length of src.
6442 // (8) dest_offset + length must not exceed length of dest.
6443 // (9) each element of an oop array must be assignable
6444
6445 // (3) src and dest must not be null.
6446 // always do this here because we need the JVM state for uncommon traps
6447 Node* null_ctl = top();
6448 src = saved_jvms_before_guards != nullptr ? null_check_oop(src, &null_ctl, true, true) : null_check(src, T_ARRAY);
6449 assert(null_ctl->is_top(), "no null control here");
6450 dest = null_check(dest, T_ARRAY);
6451
6452 if (!can_emit_guards) {
6453 // if saved_jvms_before_guards is null and alloc is not null, we don't emit any
6454 // guards but the arraycopy node could still take advantage of a
6455 // tightly allocated allocation. tightly_coupled_allocation() is
6456 // called again to make sure it takes the null check above into
6457 // account: the null check is mandatory and if it caused an
6458 // uncommon trap to be emitted then the allocation can't be
6459 // considered tightly coupled in this context.
6460 alloc = tightly_coupled_allocation(dest);
6461 }
6462
6463 bool validated = false;
6464
6465 const Type* src_type = _gvn.type(src);
6466 const Type* dest_type = _gvn.type(dest);
6467 const TypeAryPtr* top_src = src_type->isa_aryptr();
6468 const TypeAryPtr* top_dest = dest_type->isa_aryptr();
6469
6470 // Do we have the type of src?
6471 bool has_src = (top_src != nullptr && top_src->elem() != Type::BOTTOM);
6472 // Do we have the type of dest?
6473 bool has_dest = (top_dest != nullptr && top_dest->elem() != Type::BOTTOM);
6474 // Is the type for src from speculation?
6475 bool src_spec = false;
6476 // Is the type for dest from speculation?
6477 bool dest_spec = false;
6478
6479 if ((!has_src || !has_dest) && can_emit_guards) {
6480 // We don't have sufficient type information, let's see if
6481 // speculative types can help. We need to have types for both src
6482 // and dest so that it pays off.
6483
6484 // Do we already have or could we have type information for src
6485 bool could_have_src = has_src;
6486 // Do we already have or could we have type information for dest
6487 bool could_have_dest = has_dest;
6488
6489 ciKlass* src_k = nullptr;
6490 if (!has_src) {
6491 src_k = src_type->speculative_type_not_null();
6492 if (src_k != nullptr && src_k->is_array_klass()) {
6493 could_have_src = true;
6494 }
6495 }
6496
6497 ciKlass* dest_k = nullptr;
6498 if (!has_dest) {
6499 dest_k = dest_type->speculative_type_not_null();
6500 if (dest_k != nullptr && dest_k->is_array_klass()) {
6501 could_have_dest = true;
6502 }
6503 }
6504
6505 if (could_have_src && could_have_dest) {
6506 // This is going to pay off so emit the required guards
6507 if (!has_src) {
6508 src = maybe_cast_profiled_obj(src, src_k, true);
6509 src_type = _gvn.type(src);
6510 top_src = src_type->isa_aryptr();
6511 has_src = (top_src != nullptr && top_src->elem() != Type::BOTTOM);
6512 src_spec = true;
6513 }
6514 if (!has_dest) {
6515 dest = maybe_cast_profiled_obj(dest, dest_k, true);
6516 dest_type = _gvn.type(dest);
6517 top_dest = dest_type->isa_aryptr();
6518 has_dest = (top_dest != nullptr && top_dest->elem() != Type::BOTTOM);
6519 dest_spec = true;
6520 }
6521 }
6522 }
6523
6524 if (has_src && has_dest && can_emit_guards) {
6525 BasicType src_elem = top_src->isa_aryptr()->elem()->array_element_basic_type();
6526 BasicType dest_elem = top_dest->isa_aryptr()->elem()->array_element_basic_type();
6527 if (is_reference_type(src_elem, true)) src_elem = T_OBJECT;
6528 if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT;
6529
6530 if (src_elem == dest_elem && top_src->is_flat() == top_dest->is_flat() && src_elem == T_OBJECT) {
6531 // If both arrays are object arrays then having the exact types
6532 // for both will remove the need for a subtype check at runtime
6533 // before the call and may make it possible to pick a faster copy
6534 // routine (without a subtype check on every element)
6535 // Do we have the exact type of src?
6536 bool could_have_src = src_spec;
6537 // Do we have the exact type of dest?
6538 bool could_have_dest = dest_spec;
6539 ciKlass* src_k = nullptr;
6540 ciKlass* dest_k = nullptr;
6541 if (!src_spec) {
6542 src_k = src_type->speculative_type_not_null();
6543 if (src_k != nullptr && src_k->is_array_klass()) {
6544 could_have_src = true;
6545 }
6546 }
6547 if (!dest_spec) {
6548 dest_k = dest_type->speculative_type_not_null();
6549 if (dest_k != nullptr && dest_k->is_array_klass()) {
6550 could_have_dest = true;
6551 }
6552 }
6553 if (could_have_src && could_have_dest) {
6554 // If we can have both exact types, emit the missing guards
6555 if (could_have_src && !src_spec) {
6556 src = maybe_cast_profiled_obj(src, src_k, true);
6557 src_type = _gvn.type(src);
6558 top_src = src_type->isa_aryptr();
6559 }
6560 if (could_have_dest && !dest_spec) {
6561 dest = maybe_cast_profiled_obj(dest, dest_k, true);
6562 dest_type = _gvn.type(dest);
6563 top_dest = dest_type->isa_aryptr();
6564 }
6565 }
6566 }
6567 }
6568
6569 ciMethod* trap_method = method();
6570 int trap_bci = bci();
6571 if (saved_jvms_before_guards != nullptr) {
6572 trap_method = alloc->jvms()->method();
6573 trap_bci = alloc->jvms()->bci();
6574 }
6575
6576 bool negative_length_guard_generated = false;
6577
6578 if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
6579 can_emit_guards && !src->is_top() && !dest->is_top()) {
6580 // validate arguments: enables transformation the ArrayCopyNode
6581 validated = true;
6582
6583 RegionNode* slow_region = new RegionNode(1);
6584 record_for_igvn(slow_region);
6585
6586 // (1) src and dest are arrays.
6587 generate_non_array_guard(load_object_klass(src), slow_region, &src);
6588 generate_non_array_guard(load_object_klass(dest), slow_region, &dest);
6589
6590 // (2) src and dest arrays must have elements of the same BasicType
6591 // done at macro expansion or at Ideal transformation time
6592
6593 // (4) src_offset must not be negative.
6594 generate_negative_guard(src_offset, slow_region);
6595
6596 // (5) dest_offset must not be negative.
6597 generate_negative_guard(dest_offset, slow_region);
6598
6599 // (7) src_offset + length must not exceed length of src.
6600 generate_limit_guard(src_offset, length,
6601 load_array_length(src),
6602 slow_region);
6603
6604 // (8) dest_offset + length must not exceed length of dest.
6605 generate_limit_guard(dest_offset, length,
6606 load_array_length(dest),
6607 slow_region);
6608
6609 // (6) length must not be negative.
6610 // This is also checked in generate_arraycopy() during macro expansion, but
6611 // we also have to check it here for the case where the ArrayCopyNode will
6612 // be eliminated by Escape Analysis.
6613 if (EliminateAllocations) {
6614 generate_negative_guard(length, slow_region);
6615 negative_length_guard_generated = true;
6616 }
6617
6618 // (9) each element of an oop array must be assignable
6619 Node* dest_klass = load_object_klass(dest);
6620 Node* refined_dest_klass = dest_klass;
6621 if (src != dest) {
6622 dest_klass = load_non_refined_array_klass(refined_dest_klass);
6623 Node* not_subtype_ctrl = gen_subtype_check(src, dest_klass);
6624 slow_region->add_req(not_subtype_ctrl);
6625 }
6626
6627 // TODO 8251971 Improve this. What about atomicity? Make sure this is always folded for type arrays.
6628 // If destination is null-restricted, source must be null-restricted as well: src_null_restricted || !dst_null_restricted
6629 Node* src_klass = load_object_klass(src);
6630 Node* adr_prop_src = basic_plus_adr(top(), src_klass, in_bytes(ArrayKlass::properties_offset()));
6631 Node* prop_src = _gvn.transform(LoadNode::make(_gvn, control(), immutable_memory(), adr_prop_src,
6632 _gvn.type(adr_prop_src)->is_ptr(), TypeInt::INT, T_INT,
6633 MemNode::unordered));
6634 Node* adr_prop_dest = basic_plus_adr(top(), refined_dest_klass, in_bytes(ArrayKlass::properties_offset()));
6635 Node* prop_dest = _gvn.transform(LoadNode::make(_gvn, control(), immutable_memory(), adr_prop_dest,
6636 _gvn.type(adr_prop_dest)->is_ptr(), TypeInt::INT, T_INT,
6637 MemNode::unordered));
6638
6639 const ArrayProperties props_null_restricted = ArrayProperties::Default().with_null_restricted();
6640 jint props_value = (jint)props_null_restricted.value();
6641
6642 prop_dest = _gvn.transform(new XorINode(prop_dest, intcon(props_value)));
6643 prop_src = _gvn.transform(new OrINode(prop_dest, prop_src));
6644 prop_src = _gvn.transform(new AndINode(prop_src, intcon(props_value)));
6645
6646 Node* chk = _gvn.transform(new CmpINode(prop_src, intcon(props_value)));
6647 Node* tst = _gvn.transform(new BoolNode(chk, BoolTest::ne));
6648 generate_fair_guard(tst, slow_region);
6649
6650 // TODO 8251971 This is too strong
6651 generate_fair_guard(flat_array_test(src), slow_region);
6652 generate_fair_guard(flat_array_test(dest), slow_region);
6653
6654 {
6655 PreserveJVMState pjvms(this);
6656 set_control(_gvn.transform(slow_region));
6657 uncommon_trap(Deoptimization::Reason_intrinsic,
6658 Deoptimization::Action_make_not_entrant);
6659 assert(stopped(), "Should be stopped");
6660 }
6661
6662 const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->isa_klassptr();
6663 if (dest_klass_t == nullptr) {
6664 // refined_dest_klass may not be an array, which leads to dest_klass being top. This means we
6665 // are in a dead path.
6666 uncommon_trap(Deoptimization::Reason_intrinsic,
6667 Deoptimization::Action_make_not_entrant);
6668 return true;
6669 }
6670
6671 const Type* toop = dest_klass_t->cast_to_exactness(false)->as_instance_type();
6672 src = _gvn.transform(new CheckCastPPNode(control(), src, toop));
6673 arraycopy_move_allocation_here(alloc, dest, saved_jvms_before_guards, saved_reexecute_sp, new_idx);
6674 }
6675
6676 if (stopped()) {
6677 return true;
6678 }
6679
6680 Node* dest_klass = load_object_klass(dest);
6681 dest_klass = load_non_refined_array_klass(dest_klass);
6682
6683 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != nullptr, negative_length_guard_generated,
6684 // Create LoadRange and LoadKlass nodes for use during macro expansion here
6685 // so the compiler has a chance to eliminate them: during macro expansion,
6686 // we have to set their control (CastPP nodes are eliminated).
6687 load_object_klass(src), dest_klass,
6688 load_array_length(src), load_array_length(dest));
6689
6690 ac->set_arraycopy(validated);
6691
6692 Node* n = _gvn.transform(ac);
6693 if (n == ac) {
6694 ac->connect_outputs(this);
6695 } else {
6696 assert(validated, "shouldn't transform if all arguments not validated");
6697 set_all_memory(n);
6698 }
6699 clear_upper_avx();
6700
6701
6702 return true;
6703 }
6704
6705
6706 // Helper function which determines if an arraycopy immediately follows
6707 // an allocation, with no intervening tests or other escapes for the object.
6708 AllocateArrayNode*
6709 LibraryCallKit::tightly_coupled_allocation(Node* ptr) {
6710 if (stopped()) return nullptr; // no fast path
6711 if (!C->do_aliasing()) return nullptr; // no MergeMems around
6712
6713 AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(ptr);
6714 if (alloc == nullptr) return nullptr;
6715
6716 Node* rawmem = memory(Compile::AliasIdxRaw);
6717 // Is the allocation's memory state untouched?
6718 if (!(rawmem->is_Proj() && rawmem->in(0)->is_Initialize())) {
6719 // Bail out if there have been raw-memory effects since the allocation.
6720 // (Example: There might have been a call or safepoint.)
6721 return nullptr;
6722 }
6723 rawmem = rawmem->in(0)->as_Initialize()->memory(Compile::AliasIdxRaw);
6724 if (!(rawmem->is_Proj() && rawmem->in(0) == alloc)) {
6725 return nullptr;
6726 }
6727
6728 // There must be no unexpected observers of this allocation.
6729 for (DUIterator_Fast imax, i = ptr->fast_outs(imax); i < imax; i++) {
6730 Node* obs = ptr->fast_out(i);
6731 if (obs != this->map()) {
6732 return nullptr;
6733 }
6734 }
6735
6736 // This arraycopy must unconditionally follow the allocation of the ptr.
6737 Node* alloc_ctl = ptr->in(0);
6738 Node* ctl = control();
6739 while (ctl != alloc_ctl) {
6740 // There may be guards which feed into the slow_region.
6741 // Any other control flow means that we might not get a chance
6742 // to finish initializing the allocated object.
6743 // Various low-level checks bottom out in uncommon traps. These
6744 // are considered safe since we've already checked above that
6745 // there is no unexpected observer of this allocation.
6746 if (get_uncommon_trap_from_success_proj(ctl) != nullptr) {
6747 assert(ctl->in(0)->is_If(), "must be If");
6748 ctl = ctl->in(0)->in(0);
6749 } else {
6750 return nullptr;
6751 }
6752 }
6753
6754 // If we get this far, we have an allocation which immediately
6755 // precedes the arraycopy, and we can take over zeroing the new object.
6756 // The arraycopy will finish the initialization, and provide
6757 // a new control state to which we will anchor the destination pointer.
6758
6759 return alloc;
6760 }
6761
6762 CallStaticJavaNode* LibraryCallKit::get_uncommon_trap_from_success_proj(Node* node) {
6763 if (node->is_IfProj()) {
6764 IfProjNode* other_proj = node->as_IfProj()->other_if_proj();
6765 for (DUIterator_Fast jmax, j = other_proj->fast_outs(jmax); j < jmax; j++) {
6766 Node* obs = other_proj->fast_out(j);
6767 if (obs->in(0) == other_proj && obs->is_CallStaticJava() &&
6768 (obs->as_CallStaticJava()->entry_point() == OptoRuntime::uncommon_trap_blob()->entry_point())) {
6769 return obs->as_CallStaticJava();
6770 }
6771 }
6772 }
6773 return nullptr;
6774 }
6775
6776 //-------------inline_encodeISOArray-----------------------------------
6777 // int sun.nio.cs.ISO_8859_1.Encoder#encodeISOArray0(byte[] sa, int sp, byte[] da, int dp, int len)
6778 // int java.lang.StringCoding#encodeISOArray0(byte[] sa, int sp, byte[] da, int dp, int len)
6779 // int java.lang.StringCoding#encodeAsciiArray0(char[] sa, int sp, byte[] da, int dp, int len)
6780 // encode char[] to byte[] in ISO_8859_1 or ASCII
6781 bool LibraryCallKit::inline_encodeISOArray(bool ascii) {
6782 assert(callee()->signature()->size() == 5, "encodeISOArray has 5 parameters");
6783 // no receiver since it is static method
6784 Node *src = argument(0);
6785 Node *src_offset = argument(1);
6786 Node *dst = argument(2);
6787 Node *dst_offset = argument(3);
6788 Node *length = argument(4);
6789
6790 // Cast source & target arrays to not-null
6791 src = must_be_not_null(src, true);
6792 dst = must_be_not_null(dst, true);
6793 if (stopped()) {
6794 return true;
6795 }
6796
6797 const TypeAryPtr* src_type = src->Value(&_gvn)->isa_aryptr();
6798 const TypeAryPtr* dst_type = dst->Value(&_gvn)->isa_aryptr();
6799 if (src_type == nullptr || src_type->elem() == Type::BOTTOM ||
6800 dst_type == nullptr || dst_type->elem() == Type::BOTTOM) {
6801 // failed array check
6802 return false;
6803 }
6804
6805 // Figure out the size and type of the elements we will be copying.
6806 BasicType src_elem = src_type->elem()->array_element_basic_type();
6807 BasicType dst_elem = dst_type->elem()->array_element_basic_type();
6808 if (!((src_elem == T_CHAR) || (src_elem== T_BYTE)) || dst_elem != T_BYTE) {
6809 return false;
6810 }
6811
6812 // Check source & target bounds
6813 RegionNode* bailout = create_bailout();
6814 generate_string_range_check(src, src_offset, length, src_elem == T_BYTE, bailout);
6815 generate_string_range_check(dst, dst_offset, length, false, bailout);
6816 if (check_bailout(bailout)) {
6817 return true;
6818 }
6819
6820 Node* src_start = array_element_address(src, src_offset, T_CHAR);
6821 Node* dst_start = array_element_address(dst, dst_offset, dst_elem);
6822 // 'src_start' points to src array + scaled offset
6823 // 'dst_start' points to dst array + scaled offset
6824
6825 const TypeAryPtr* mtype = TypeAryPtr::BYTES;
6826 Node* enc = new EncodeISOArrayNode(control(), memory(mtype), src_start, dst_start, length, ascii);
6827 enc = _gvn.transform(enc);
6828 Node* res_mem = _gvn.transform(new SCMemProjNode(enc));
6829 set_memory(res_mem, mtype);
6830 set_result(enc);
6831 clear_upper_avx();
6832
6833 return true;
6834 }
6835
6836 //-------------inline_multiplyToLen-----------------------------------
6837 bool LibraryCallKit::inline_multiplyToLen() {
6838 assert(UseMultiplyToLenIntrinsic, "not implemented on this platform");
6839
6840 address stubAddr = StubRoutines::multiplyToLen();
6841 if (stubAddr == nullptr) {
6842 return false; // Intrinsic's stub is not implemented on this platform
6843 }
6844 const char* stubName = "multiplyToLen";
6845
6846 assert(callee()->signature()->size() == 5, "multiplyToLen has 5 parameters");
6847
6848 // no receiver because it is a static method
6849 Node* x = argument(0);
6850 Node* xlen = argument(1);
6851 Node* y = argument(2);
6852 Node* ylen = argument(3);
6853 Node* z = argument(4);
6854
6855 x = must_be_not_null(x, true);
6856 y = must_be_not_null(y, true);
6857
6858 const TypeAryPtr* x_type = x->Value(&_gvn)->isa_aryptr();
6859 const TypeAryPtr* y_type = y->Value(&_gvn)->isa_aryptr();
6860 if (x_type == nullptr || x_type->elem() == Type::BOTTOM ||
6861 y_type == nullptr || y_type->elem() == Type::BOTTOM) {
6862 // failed array check
6863 return false;
6864 }
6865
6866 BasicType x_elem = x_type->elem()->array_element_basic_type();
6867 BasicType y_elem = y_type->elem()->array_element_basic_type();
6868 if (x_elem != T_INT || y_elem != T_INT) {
6869 return false;
6870 }
6871
6872 Node* x_start = array_element_address(x, intcon(0), x_elem);
6873 Node* y_start = array_element_address(y, intcon(0), y_elem);
6874 // 'x_start' points to x array + scaled xlen
6875 // 'y_start' points to y array + scaled ylen
6876
6877 Node* z_start = array_element_address(z, intcon(0), T_INT);
6878
6879 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
6880 OptoRuntime::multiplyToLen_Type(),
6881 stubAddr, stubName, TypePtr::BOTTOM,
6882 x_start, xlen, y_start, ylen, z_start);
6883
6884 C->set_has_split_ifs(true); // Has chance for split-if optimization
6885 set_result(z);
6886 return true;
6887 }
6888
6889 //-------------inline_squareToLen------------------------------------
6890 bool LibraryCallKit::inline_squareToLen() {
6891 assert(UseSquareToLenIntrinsic, "not implemented on this platform");
6892
6893 address stubAddr = StubRoutines::squareToLen();
6894 if (stubAddr == nullptr) {
6895 return false; // Intrinsic's stub is not implemented on this platform
6896 }
6897 const char* stubName = "squareToLen";
6898
6899 assert(callee()->signature()->size() == 4, "implSquareToLen has 4 parameters");
6900
6901 Node* x = argument(0);
6902 Node* len = argument(1);
6903 Node* z = argument(2);
6904 Node* zlen = argument(3);
6905
6906 x = must_be_not_null(x, true);
6907 z = must_be_not_null(z, true);
6908
6909 const TypeAryPtr* x_type = x->Value(&_gvn)->isa_aryptr();
6910 const TypeAryPtr* z_type = z->Value(&_gvn)->isa_aryptr();
6911 if (x_type == nullptr || x_type->elem() == Type::BOTTOM ||
6912 z_type == nullptr || z_type->elem() == Type::BOTTOM) {
6913 // failed array check
6914 return false;
6915 }
6916
6917 BasicType x_elem = x_type->elem()->array_element_basic_type();
6918 BasicType z_elem = z_type->elem()->array_element_basic_type();
6919 if (x_elem != T_INT || z_elem != T_INT) {
6920 return false;
6921 }
6922
6923
6924 Node* x_start = array_element_address(x, intcon(0), x_elem);
6925 Node* z_start = array_element_address(z, intcon(0), z_elem);
6926
6927 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
6928 OptoRuntime::squareToLen_Type(),
6929 stubAddr, stubName, TypePtr::BOTTOM,
6930 x_start, len, z_start, zlen);
6931
6932 set_result(z);
6933 return true;
6934 }
6935
6936 //-------------inline_mulAdd------------------------------------------
6937 bool LibraryCallKit::inline_mulAdd() {
6938 assert(UseMulAddIntrinsic, "not implemented on this platform");
6939
6940 address stubAddr = StubRoutines::mulAdd();
6941 if (stubAddr == nullptr) {
6942 return false; // Intrinsic's stub is not implemented on this platform
6943 }
6944 const char* stubName = "mulAdd";
6945
6946 assert(callee()->signature()->size() == 5, "mulAdd has 5 parameters");
6947
6948 Node* out = argument(0);
6949 Node* in = argument(1);
6950 Node* offset = argument(2);
6951 Node* len = argument(3);
6952 Node* k = argument(4);
6953
6954 in = must_be_not_null(in, true);
6955 out = must_be_not_null(out, true);
6956
6957 const TypeAryPtr* out_type = out->Value(&_gvn)->isa_aryptr();
6958 const TypeAryPtr* in_type = in->Value(&_gvn)->isa_aryptr();
6959 if (out_type == nullptr || out_type->elem() == Type::BOTTOM ||
6960 in_type == nullptr || in_type->elem() == Type::BOTTOM) {
6961 // failed array check
6962 return false;
6963 }
6964
6965 BasicType out_elem = out_type->elem()->array_element_basic_type();
6966 BasicType in_elem = in_type->elem()->array_element_basic_type();
6967 if (out_elem != T_INT || in_elem != T_INT) {
6968 return false;
6969 }
6970
6971 Node* outlen = load_array_length(out);
6972 Node* new_offset = _gvn.transform(new SubINode(outlen, offset));
6973 Node* out_start = array_element_address(out, intcon(0), out_elem);
6974 Node* in_start = array_element_address(in, intcon(0), in_elem);
6975
6976 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
6977 OptoRuntime::mulAdd_Type(),
6978 stubAddr, stubName, TypePtr::BOTTOM,
6979 out_start,in_start, new_offset, len, k);
6980 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
6981 set_result(result);
6982 return true;
6983 }
6984
6985 //-------------inline_montgomeryMultiply-----------------------------------
6986 bool LibraryCallKit::inline_montgomeryMultiply() {
6987 address stubAddr = StubRoutines::montgomeryMultiply();
6988 if (stubAddr == nullptr) {
6989 return false; // Intrinsic's stub is not implemented on this platform
6990 }
6991
6992 assert(UseMontgomeryMultiplyIntrinsic, "not implemented on this platform");
6993 const char* stubName = "montgomery_multiply";
6994
6995 assert(callee()->signature()->size() == 7, "montgomeryMultiply has 7 parameters");
6996
6997 Node* a = argument(0);
6998 Node* b = argument(1);
6999 Node* n = argument(2);
7000 Node* len = argument(3);
7001 Node* inv = argument(4);
7002 Node* m = argument(6);
7003
7004 const TypeAryPtr* a_type = a->Value(&_gvn)->isa_aryptr();
7005 const TypeAryPtr* b_type = b->Value(&_gvn)->isa_aryptr();
7006 const TypeAryPtr* n_type = n->Value(&_gvn)->isa_aryptr();
7007 const TypeAryPtr* m_type = m->Value(&_gvn)->isa_aryptr();
7008 if (a_type == nullptr || a_type->elem() == Type::BOTTOM ||
7009 b_type == nullptr || b_type->elem() == Type::BOTTOM ||
7010 n_type == nullptr || n_type->elem() == Type::BOTTOM ||
7011 m_type == nullptr || m_type->elem() == Type::BOTTOM) {
7012 // failed array check
7013 return false;
7014 }
7015
7016 BasicType a_elem = a_type->elem()->array_element_basic_type();
7017 BasicType b_elem = b_type->elem()->array_element_basic_type();
7018 BasicType n_elem = n_type->elem()->array_element_basic_type();
7019 BasicType m_elem = m_type->elem()->array_element_basic_type();
7020 if (a_elem != T_INT || b_elem != T_INT || n_elem != T_INT || m_elem != T_INT) {
7021 return false;
7022 }
7023
7024 // Make the call
7025 {
7026 Node* a_start = array_element_address(a, intcon(0), a_elem);
7027 Node* b_start = array_element_address(b, intcon(0), b_elem);
7028 Node* n_start = array_element_address(n, intcon(0), n_elem);
7029 Node* m_start = array_element_address(m, intcon(0), m_elem);
7030
7031 Node* call = make_runtime_call(RC_LEAF,
7032 OptoRuntime::montgomeryMultiply_Type(),
7033 stubAddr, stubName, TypePtr::BOTTOM,
7034 a_start, b_start, n_start, len, inv, top(),
7035 m_start);
7036 set_result(m);
7037 }
7038
7039 return true;
7040 }
7041
7042 bool LibraryCallKit::inline_montgomerySquare() {
7043 address stubAddr = StubRoutines::montgomerySquare();
7044 if (stubAddr == nullptr) {
7045 return false; // Intrinsic's stub is not implemented on this platform
7046 }
7047
7048 assert(UseMontgomerySquareIntrinsic, "not implemented on this platform");
7049 const char* stubName = "montgomery_square";
7050
7051 assert(callee()->signature()->size() == 6, "montgomerySquare has 6 parameters");
7052
7053 Node* a = argument(0);
7054 Node* n = argument(1);
7055 Node* len = argument(2);
7056 Node* inv = argument(3);
7057 Node* m = argument(5);
7058
7059 const TypeAryPtr* a_type = a->Value(&_gvn)->isa_aryptr();
7060 const TypeAryPtr* n_type = n->Value(&_gvn)->isa_aryptr();
7061 const TypeAryPtr* m_type = m->Value(&_gvn)->isa_aryptr();
7062 if (a_type == nullptr || a_type->elem() == Type::BOTTOM ||
7063 n_type == nullptr || n_type->elem() == Type::BOTTOM ||
7064 m_type == nullptr || m_type->elem() == Type::BOTTOM) {
7065 // failed array check
7066 return false;
7067 }
7068
7069 BasicType a_elem = a_type->elem()->array_element_basic_type();
7070 BasicType n_elem = n_type->elem()->array_element_basic_type();
7071 BasicType m_elem = m_type->elem()->array_element_basic_type();
7072 if (a_elem != T_INT || n_elem != T_INT || m_elem != T_INT) {
7073 return false;
7074 }
7075
7076 // Make the call
7077 {
7078 Node* a_start = array_element_address(a, intcon(0), a_elem);
7079 Node* n_start = array_element_address(n, intcon(0), n_elem);
7080 Node* m_start = array_element_address(m, intcon(0), m_elem);
7081
7082 Node* call = make_runtime_call(RC_LEAF,
7083 OptoRuntime::montgomerySquare_Type(),
7084 stubAddr, stubName, TypePtr::BOTTOM,
7085 a_start, n_start, len, inv, top(),
7086 m_start);
7087 set_result(m);
7088 }
7089
7090 return true;
7091 }
7092
7093 bool LibraryCallKit::inline_bigIntegerShift(bool isRightShift) {
7094 address stubAddr = nullptr;
7095 const char* stubName = nullptr;
7096
7097 stubAddr = isRightShift? StubRoutines::bigIntegerRightShift(): StubRoutines::bigIntegerLeftShift();
7098 if (stubAddr == nullptr) {
7099 return false; // Intrinsic's stub is not implemented on this platform
7100 }
7101
7102 stubName = isRightShift? "bigIntegerRightShiftWorker" : "bigIntegerLeftShiftWorker";
7103
7104 assert(callee()->signature()->size() == 5, "expected 5 arguments");
7105
7106 Node* newArr = argument(0);
7107 Node* oldArr = argument(1);
7108 Node* newIdx = argument(2);
7109 Node* shiftCount = argument(3);
7110 Node* numIter = argument(4);
7111
7112 const TypeAryPtr* newArr_type = newArr->Value(&_gvn)->isa_aryptr();
7113 const TypeAryPtr* oldArr_type = oldArr->Value(&_gvn)->isa_aryptr();
7114 if (newArr_type == nullptr || newArr_type->elem() == Type::BOTTOM ||
7115 oldArr_type == nullptr || oldArr_type->elem() == Type::BOTTOM) {
7116 return false;
7117 }
7118
7119 BasicType newArr_elem = newArr_type->elem()->array_element_basic_type();
7120 BasicType oldArr_elem = oldArr_type->elem()->array_element_basic_type();
7121 if (newArr_elem != T_INT || oldArr_elem != T_INT) {
7122 return false;
7123 }
7124
7125 // Make the call
7126 {
7127 Node* newArr_start = array_element_address(newArr, intcon(0), newArr_elem);
7128 Node* oldArr_start = array_element_address(oldArr, intcon(0), oldArr_elem);
7129
7130 Node* call = make_runtime_call(RC_LEAF,
7131 OptoRuntime::bigIntegerShift_Type(),
7132 stubAddr,
7133 stubName,
7134 TypePtr::BOTTOM,
7135 newArr_start,
7136 oldArr_start,
7137 newIdx,
7138 shiftCount,
7139 numIter);
7140 }
7141
7142 return true;
7143 }
7144
7145 //-------------inline_vectorizedMismatch------------------------------
7146 bool LibraryCallKit::inline_vectorizedMismatch() {
7147 assert(UseVectorizedMismatchIntrinsic, "not implemented on this platform");
7148
7149 assert(callee()->signature()->size() == 8, "vectorizedMismatch has 6 parameters");
7150 Node* obja = argument(0); // Object
7151 Node* aoffset = argument(1); // long
7152 Node* objb = argument(3); // Object
7153 Node* boffset = argument(4); // long
7154 Node* length = argument(6); // int
7155 Node* scale = argument(7); // int
7156
7157 const TypeAryPtr* obja_t = _gvn.type(obja)->isa_aryptr();
7158 const TypeAryPtr* objb_t = _gvn.type(objb)->isa_aryptr();
7159 if (obja_t == nullptr || obja_t->elem() == Type::BOTTOM ||
7160 objb_t == nullptr || objb_t->elem() == Type::BOTTOM ||
7161 scale == top()) {
7162 return false; // failed input validation
7163 }
7164
7165 Node* obja_adr = make_unsafe_address(obja, aoffset);
7166 Node* objb_adr = make_unsafe_address(objb, boffset);
7167
7168 // Partial inlining handling for inputs smaller than ArrayOperationPartialInlineSize bytes in size.
7169 //
7170 // inline_limit = ArrayOperationPartialInlineSize / element_size;
7171 // if (length <= inline_limit) {
7172 // inline_path:
7173 // vmask = VectorMaskGen length
7174 // vload1 = LoadVectorMasked obja, vmask
7175 // vload2 = LoadVectorMasked objb, vmask
7176 // result1 = VectorCmpMasked vload1, vload2, vmask
7177 // } else {
7178 // call_stub_path:
7179 // result2 = call vectorizedMismatch_stub(obja, objb, length, scale)
7180 // }
7181 // exit_block:
7182 // return Phi(result1, result2);
7183 //
7184 enum { inline_path = 1, // input is small enough to process it all at once
7185 stub_path = 2, // input is too large; call into the VM
7186 PATH_LIMIT = 3
7187 };
7188
7189 Node* exit_block = new RegionNode(PATH_LIMIT);
7190 Node* result_phi = new PhiNode(exit_block, TypeInt::INT);
7191 Node* memory_phi = new PhiNode(exit_block, Type::MEMORY, TypePtr::BOTTOM);
7192
7193 Node* call_stub_path = control();
7194
7195 BasicType elem_bt = T_ILLEGAL;
7196
7197 const TypeInt* scale_t = _gvn.type(scale)->is_int();
7198 if (scale_t->is_con()) {
7199 switch (scale_t->get_con()) {
7200 case 0: elem_bt = T_BYTE; break;
7201 case 1: elem_bt = T_SHORT; break;
7202 case 2: elem_bt = T_INT; break;
7203 case 3: elem_bt = T_LONG; break;
7204
7205 default: elem_bt = T_ILLEGAL; break; // not supported
7206 }
7207 }
7208
7209 int inline_limit = 0;
7210 bool do_partial_inline = false;
7211
7212 if (elem_bt != T_ILLEGAL && ArrayOperationPartialInlineSize > 0) {
7213 inline_limit = ArrayOperationPartialInlineSize / type2aelembytes(elem_bt);
7214 do_partial_inline = inline_limit >= 16;
7215 }
7216
7217 if (do_partial_inline) {
7218 assert(elem_bt != T_ILLEGAL, "sanity");
7219
7220 if (Matcher::match_rule_supported_vector(Op_VectorMaskGen, inline_limit, elem_bt) &&
7221 Matcher::match_rule_supported_vector(Op_LoadVectorMasked, inline_limit, elem_bt) &&
7222 Matcher::match_rule_supported_vector(Op_VectorCmpMasked, inline_limit, elem_bt)) {
7223
7224 const TypeVect* vt = TypeVect::make(elem_bt, inline_limit);
7225 Node* cmp_length = _gvn.transform(new CmpINode(length, intcon(inline_limit)));
7226 Node* bol_gt = _gvn.transform(new BoolNode(cmp_length, BoolTest::gt));
7227
7228 call_stub_path = generate_guard(bol_gt, nullptr, PROB_MIN);
7229
7230 if (!stopped()) {
7231 Node* casted_length = _gvn.transform(new CastIINode(control(), length, TypeInt::make(0, inline_limit, Type::WidenMin)));
7232
7233 const TypePtr* obja_adr_t = _gvn.type(obja_adr)->isa_ptr();
7234 const TypePtr* objb_adr_t = _gvn.type(objb_adr)->isa_ptr();
7235 Node* obja_adr_mem = memory(C->get_alias_index(obja_adr_t));
7236 Node* objb_adr_mem = memory(C->get_alias_index(objb_adr_t));
7237
7238 Node* vmask = _gvn.transform(VectorMaskGenNode::make(ConvI2X(casted_length), elem_bt));
7239 Node* vload_obja = _gvn.transform(new LoadVectorMaskedNode(control(), obja_adr_mem, obja_adr, obja_adr_t, vt, vmask));
7240 Node* vload_objb = _gvn.transform(new LoadVectorMaskedNode(control(), objb_adr_mem, objb_adr, objb_adr_t, vt, vmask));
7241 Node* result = _gvn.transform(new VectorCmpMaskedNode(vload_obja, vload_objb, vmask, TypeInt::INT));
7242
7243 exit_block->init_req(inline_path, control());
7244 memory_phi->init_req(inline_path, map()->memory());
7245 result_phi->init_req(inline_path, result);
7246
7247 C->set_max_vector_size(MAX2((uint)ArrayOperationPartialInlineSize, C->max_vector_size()));
7248 clear_upper_avx();
7249 }
7250 }
7251 }
7252
7253 if (call_stub_path != nullptr) {
7254 set_control(call_stub_path);
7255
7256 Node* call = make_runtime_call(RC_LEAF,
7257 OptoRuntime::vectorizedMismatch_Type(),
7258 StubRoutines::vectorizedMismatch(), "vectorizedMismatch", TypePtr::BOTTOM,
7259 obja_adr, objb_adr, length, scale);
7260
7261 exit_block->init_req(stub_path, control());
7262 memory_phi->init_req(stub_path, map()->memory());
7263 result_phi->init_req(stub_path, _gvn.transform(new ProjNode(call, TypeFunc::Parms)));
7264 }
7265
7266 exit_block = _gvn.transform(exit_block);
7267 memory_phi = _gvn.transform(memory_phi);
7268 result_phi = _gvn.transform(result_phi);
7269
7270 record_for_igvn(exit_block);
7271 record_for_igvn(memory_phi);
7272 record_for_igvn(result_phi);
7273
7274 set_control(exit_block);
7275 set_all_memory(memory_phi);
7276 set_result(result_phi);
7277
7278 return true;
7279 }
7280
7281 //------------------------------inline_vectorizedHashcode----------------------------
7282 bool LibraryCallKit::inline_vectorizedHashCode() {
7283 assert(UseVectorizedHashCodeIntrinsic, "not implemented on this platform");
7284
7285 assert(callee()->signature()->size() == 5, "vectorizedHashCode has 5 parameters");
7286 Node* array = argument(0);
7287 Node* offset = argument(1);
7288 Node* length = argument(2);
7289 Node* initialValue = argument(3);
7290 Node* basic_type = argument(4);
7291
7292 if (basic_type == top()) {
7293 return false; // failed input validation
7294 }
7295
7296 const TypeInt* basic_type_t = _gvn.type(basic_type)->is_int();
7297 if (!basic_type_t->is_con()) {
7298 return false; // Only intrinsify if mode argument is constant
7299 }
7300
7301 array = must_be_not_null(array, true);
7302
7303 BasicType bt = (BasicType)basic_type_t->get_con();
7304
7305 // Resolve address of first element
7306 Node* array_start = array_element_address(array, offset, bt);
7307
7308 set_result(_gvn.transform(new VectorizedHashCodeNode(control(), memory(TypeAryPtr::get_array_body_type(bt)),
7309 array_start, length, initialValue, basic_type)));
7310 clear_upper_avx();
7311
7312 return true;
7313 }
7314
7315 /**
7316 * Calculate CRC32 for byte.
7317 * int java.util.zip.CRC32.update(int crc, int b)
7318 */
7319 bool LibraryCallKit::inline_updateCRC32() {
7320 assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions support");
7321 assert(callee()->signature()->size() == 2, "update has 2 parameters");
7322 // no receiver since it is static method
7323 Node* crc = argument(0); // type: int
7324 Node* b = argument(1); // type: int
7325
7326 /*
7327 * int c = ~ crc;
7328 * b = timesXtoThe32[(b ^ c) & 0xFF];
7329 * b = b ^ (c >>> 8);
7330 * crc = ~b;
7331 */
7332
7333 Node* M1 = intcon(-1);
7334 crc = _gvn.transform(new XorINode(crc, M1));
7335 Node* result = _gvn.transform(new XorINode(crc, b));
7336 result = _gvn.transform(new AndINode(result, intcon(0xFF)));
7337
7338 Node* base = makecon(TypeRawPtr::make(StubRoutines::crc_table_addr()));
7339 Node* offset = _gvn.transform(new LShiftINode(result, intcon(0x2)));
7340 Node* adr = off_heap_plus_addr(base, ConvI2X(offset));
7341 result = make_load(control(), adr, TypeInt::INT, T_INT, MemNode::unordered);
7342
7343 crc = _gvn.transform(new URShiftINode(crc, intcon(8)));
7344 result = _gvn.transform(new XorINode(crc, result));
7345 result = _gvn.transform(new XorINode(result, M1));
7346 set_result(result);
7347 return true;
7348 }
7349
7350 /**
7351 * Calculate CRC32 for byte[] array.
7352 * int java.util.zip.CRC32.updateBytes(int crc, byte[] buf, int off, int len)
7353 */
7354 bool LibraryCallKit::inline_updateBytesCRC32() {
7355 assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions support");
7356 assert(callee()->signature()->size() == 4, "updateBytes has 4 parameters");
7357 // no receiver since it is static method
7358 Node* crc = argument(0); // type: int
7359 Node* src = argument(1); // type: oop
7360 Node* offset = argument(2); // type: int
7361 Node* length = argument(3); // type: int
7362
7363 const TypeAryPtr* src_type = src->Value(&_gvn)->isa_aryptr();
7364 if (src_type == nullptr || src_type->elem() == Type::BOTTOM) {
7365 // failed array check
7366 return false;
7367 }
7368
7369 // Figure out the size and type of the elements we will be copying.
7370 BasicType src_elem = src_type->elem()->array_element_basic_type();
7371 if (src_elem != T_BYTE) {
7372 return false;
7373 }
7374
7375 // 'src_start' points to src array + scaled offset
7376 src = must_be_not_null(src, true);
7377 Node* src_start = array_element_address(src, offset, src_elem);
7378
7379 // We assume that range check is done by caller.
7380 // TODO: generate range check (offset+length < src.length) in debug VM.
7381
7382 // Call the stub.
7383 address stubAddr = StubRoutines::updateBytesCRC32();
7384 const char *stubName = "updateBytesCRC32";
7385
7386 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::updateBytesCRC32_Type(),
7387 stubAddr, stubName, TypePtr::BOTTOM,
7388 crc, src_start, length);
7389 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
7390 set_result(result);
7391 return true;
7392 }
7393
7394 /**
7395 * Calculate CRC32 for ByteBuffer.
7396 * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
7397 */
7398 bool LibraryCallKit::inline_updateByteBufferCRC32() {
7399 assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions support");
7400 assert(callee()->signature()->size() == 5, "updateByteBuffer has 4 parameters and one is long");
7401 // no receiver since it is static method
7402 Node* crc = argument(0); // type: int
7403 Node* src = argument(1); // type: long
7404 Node* offset = argument(3); // type: int
7405 Node* length = argument(4); // type: int
7406
7407 src = ConvL2X(src); // adjust Java long to machine word
7408 Node* base = _gvn.transform(new CastX2PNode(src));
7409 offset = ConvI2X(offset);
7410
7411 // 'src_start' points to src array + scaled offset
7412 Node* src_start = off_heap_plus_addr(base, offset);
7413
7414 // Call the stub.
7415 address stubAddr = StubRoutines::updateBytesCRC32();
7416 const char *stubName = "updateBytesCRC32";
7417
7418 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::updateBytesCRC32_Type(),
7419 stubAddr, stubName, TypePtr::BOTTOM,
7420 crc, src_start, length);
7421 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
7422 set_result(result);
7423 return true;
7424 }
7425
7426 //------------------------------get_table_from_crc32c_class-----------------------
7427 Node * LibraryCallKit::get_table_from_crc32c_class(ciInstanceKlass *crc32c_class) {
7428 Node* table = load_field_from_object(nullptr, "byteTable", "[I", /*decorators*/ IN_HEAP, /*is_static*/ true, crc32c_class);
7429 assert (table != nullptr, "wrong version of java.util.zip.CRC32C");
7430
7431 return table;
7432 }
7433
7434 //------------------------------inline_updateBytesCRC32C-----------------------
7435 //
7436 // Calculate CRC32C for byte[] array.
7437 // int java.util.zip.CRC32C.updateBytes(int crc, byte[] buf, int off, int end)
7438 //
7439 bool LibraryCallKit::inline_updateBytesCRC32C() {
7440 assert(UseCRC32CIntrinsics, "need CRC32C instruction support");
7441 assert(callee()->signature()->size() == 4, "updateBytes has 4 parameters");
7442 assert(callee()->holder()->is_loaded(), "CRC32C class must be loaded");
7443 // no receiver since it is a static method
7444 Node* crc = argument(0); // type: int
7445 Node* src = argument(1); // type: oop
7446 Node* offset = argument(2); // type: int
7447 Node* end = argument(3); // type: int
7448
7449 Node* length = _gvn.transform(new SubINode(end, offset));
7450
7451 const TypeAryPtr* src_type = src->Value(&_gvn)->isa_aryptr();
7452 if (src_type == nullptr || src_type->elem() == Type::BOTTOM) {
7453 // failed array check
7454 return false;
7455 }
7456
7457 // Figure out the size and type of the elements we will be copying.
7458 BasicType src_elem = src_type->elem()->array_element_basic_type();
7459 if (src_elem != T_BYTE) {
7460 return false;
7461 }
7462
7463 // 'src_start' points to src array + scaled offset
7464 src = must_be_not_null(src, true);
7465 Node* src_start = array_element_address(src, offset, src_elem);
7466
7467 // static final int[] byteTable in class CRC32C
7468 Node* table = get_table_from_crc32c_class(callee()->holder());
7469 table = must_be_not_null(table, true);
7470 Node* table_start = array_element_address(table, intcon(0), T_INT);
7471
7472 // We assume that range check is done by caller.
7473 // TODO: generate range check (offset+length < src.length) in debug VM.
7474
7475 // Call the stub.
7476 address stubAddr = StubRoutines::updateBytesCRC32C();
7477 const char *stubName = "updateBytesCRC32C";
7478
7479 Node* call = make_runtime_call(RC_LEAF, OptoRuntime::updateBytesCRC32C_Type(),
7480 stubAddr, stubName, TypePtr::BOTTOM,
7481 crc, src_start, length, table_start);
7482 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
7483 set_result(result);
7484 return true;
7485 }
7486
7487 //------------------------------inline_updateDirectByteBufferCRC32C-----------------------
7488 //
7489 // Calculate CRC32C for DirectByteBuffer.
7490 // int java.util.zip.CRC32C.updateDirectByteBuffer(int crc, long buf, int off, int end)
7491 //
7492 bool LibraryCallKit::inline_updateDirectByteBufferCRC32C() {
7493 assert(UseCRC32CIntrinsics, "need CRC32C instruction support");
7494 assert(callee()->signature()->size() == 5, "updateDirectByteBuffer has 4 parameters and one is long");
7495 assert(callee()->holder()->is_loaded(), "CRC32C class must be loaded");
7496 // no receiver since it is a static method
7497 Node* crc = argument(0); // type: int
7498 Node* src = argument(1); // type: long
7499 Node* offset = argument(3); // type: int
7500 Node* end = argument(4); // type: int
7501
7502 Node* length = _gvn.transform(new SubINode(end, offset));
7503
7504 src = ConvL2X(src); // adjust Java long to machine word
7505 Node* base = _gvn.transform(new CastX2PNode(src));
7506 offset = ConvI2X(offset);
7507
7508 // 'src_start' points to src array + scaled offset
7509 Node* src_start = off_heap_plus_addr(base, offset);
7510
7511 // static final int[] byteTable in class CRC32C
7512 Node* table = get_table_from_crc32c_class(callee()->holder());
7513 table = must_be_not_null(table, true);
7514 Node* table_start = array_element_address(table, intcon(0), T_INT);
7515
7516 // Call the stub.
7517 address stubAddr = StubRoutines::updateBytesCRC32C();
7518 const char *stubName = "updateBytesCRC32C";
7519
7520 Node* call = make_runtime_call(RC_LEAF, OptoRuntime::updateBytesCRC32C_Type(),
7521 stubAddr, stubName, TypePtr::BOTTOM,
7522 crc, src_start, length, table_start);
7523 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
7524 set_result(result);
7525 return true;
7526 }
7527
7528 //------------------------------inline_updateBytesAdler32----------------------
7529 //
7530 // Calculate Adler32 checksum for byte[] array.
7531 // int java.util.zip.Adler32.updateBytes(int crc, byte[] buf, int off, int len)
7532 //
7533 bool LibraryCallKit::inline_updateBytesAdler32() {
7534 assert(UseAdler32Intrinsics, "Adler32 Intrinsic support need"); // check if we actually need to check this flag or check a different one
7535 assert(callee()->signature()->size() == 4, "updateBytes has 4 parameters");
7536 assert(callee()->holder()->is_loaded(), "Adler32 class must be loaded");
7537 // no receiver since it is static method
7538 Node* crc = argument(0); // type: int
7539 Node* src = argument(1); // type: oop
7540 Node* offset = argument(2); // type: int
7541 Node* length = argument(3); // type: int
7542
7543 const TypeAryPtr* src_type = src->Value(&_gvn)->isa_aryptr();
7544 if (src_type == nullptr || src_type->elem() == Type::BOTTOM) {
7545 // failed array check
7546 return false;
7547 }
7548
7549 // Figure out the size and type of the elements we will be copying.
7550 BasicType src_elem = src_type->elem()->array_element_basic_type();
7551 if (src_elem != T_BYTE) {
7552 return false;
7553 }
7554
7555 // 'src_start' points to src array + scaled offset
7556 Node* src_start = array_element_address(src, offset, src_elem);
7557
7558 // We assume that range check is done by caller.
7559 // TODO: generate range check (offset+length < src.length) in debug VM.
7560
7561 // Call the stub.
7562 address stubAddr = StubRoutines::updateBytesAdler32();
7563 const char *stubName = "updateBytesAdler32";
7564
7565 Node* call = make_runtime_call(RC_LEAF, OptoRuntime::updateBytesAdler32_Type(),
7566 stubAddr, stubName, TypePtr::BOTTOM,
7567 crc, src_start, length);
7568 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
7569 set_result(result);
7570 return true;
7571 }
7572
7573 //------------------------------inline_updateByteBufferAdler32---------------
7574 //
7575 // Calculate Adler32 checksum for DirectByteBuffer.
7576 // int java.util.zip.Adler32.updateByteBuffer(int crc, long buf, int off, int len)
7577 //
7578 bool LibraryCallKit::inline_updateByteBufferAdler32() {
7579 assert(UseAdler32Intrinsics, "Adler32 Intrinsic support need"); // check if we actually need to check this flag or check a different one
7580 assert(callee()->signature()->size() == 5, "updateByteBuffer has 4 parameters and one is long");
7581 assert(callee()->holder()->is_loaded(), "Adler32 class must be loaded");
7582 // no receiver since it is static method
7583 Node* crc = argument(0); // type: int
7584 Node* src = argument(1); // type: long
7585 Node* offset = argument(3); // type: int
7586 Node* length = argument(4); // type: int
7587
7588 src = ConvL2X(src); // adjust Java long to machine word
7589 Node* base = _gvn.transform(new CastX2PNode(src));
7590 offset = ConvI2X(offset);
7591
7592 // 'src_start' points to src array + scaled offset
7593 Node* src_start = off_heap_plus_addr(base, offset);
7594
7595 // Call the stub.
7596 address stubAddr = StubRoutines::updateBytesAdler32();
7597 const char *stubName = "updateBytesAdler32";
7598
7599 Node* call = make_runtime_call(RC_LEAF, OptoRuntime::updateBytesAdler32_Type(),
7600 stubAddr, stubName, TypePtr::BOTTOM,
7601 crc, src_start, length);
7602
7603 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
7604 set_result(result);
7605 return true;
7606 }
7607
7608 //----------------------------inline_reference_get0----------------------------
7609 // public T java.lang.ref.Reference.get();
7610 bool LibraryCallKit::inline_reference_get0() {
7611 const int referent_offset = java_lang_ref_Reference::referent_offset();
7612
7613 // Get the argument:
7614 Node* reference_obj = null_check_receiver();
7615 if (stopped()) return true;
7616
7617 DecoratorSet decorators = IN_HEAP | ON_WEAK_OOP_REF;
7618 Node* result = load_field_from_object(reference_obj, "referent", "Ljava/lang/Object;",
7619 decorators, /*is_static*/ false,
7620 env()->Reference_klass());
7621 if (result == nullptr) return false;
7622
7623 // Add memory barrier to prevent commoning reads from this field
7624 // across safepoint since GC can change its value.
7625 insert_mem_bar(Op_MemBarCPUOrder);
7626
7627 set_result(result);
7628 return true;
7629 }
7630
7631 //----------------------------inline_reference_refersTo0----------------------------
7632 // bool java.lang.ref.Reference.refersTo0();
7633 // bool java.lang.ref.PhantomReference.refersTo0();
7634 bool LibraryCallKit::inline_reference_refersTo0(bool is_phantom) {
7635 // Get arguments:
7636 Node* reference_obj = null_check_receiver();
7637 Node* other_obj = argument(1);
7638 if (stopped()) return true;
7639
7640 DecoratorSet decorators = IN_HEAP | AS_NO_KEEPALIVE;
7641 decorators |= (is_phantom ? ON_PHANTOM_OOP_REF : ON_WEAK_OOP_REF);
7642 Node* referent = load_field_from_object(reference_obj, "referent", "Ljava/lang/Object;",
7643 decorators, /*is_static*/ false,
7644 env()->Reference_klass());
7645 if (referent == nullptr) return false;
7646
7647 // Add memory barrier to prevent commoning reads from this field
7648 // across safepoint since GC can change its value.
7649 insert_mem_bar(Op_MemBarCPUOrder);
7650
7651 Node* cmp = _gvn.transform(new CmpPNode(referent, other_obj));
7652 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
7653 IfNode* if_node = create_and_map_if(control(), bol, PROB_FAIR, COUNT_UNKNOWN);
7654
7655 RegionNode* region = new RegionNode(3);
7656 PhiNode* phi = new PhiNode(region, TypeInt::BOOL);
7657
7658 Node* if_true = _gvn.transform(new IfTrueNode(if_node));
7659 region->init_req(1, if_true);
7660 phi->init_req(1, intcon(1));
7661
7662 Node* if_false = _gvn.transform(new IfFalseNode(if_node));
7663 region->init_req(2, if_false);
7664 phi->init_req(2, intcon(0));
7665
7666 set_control(_gvn.transform(region));
7667 record_for_igvn(region);
7668 set_result(_gvn.transform(phi));
7669 return true;
7670 }
7671
7672 //----------------------------inline_reference_clear0----------------------------
7673 // void java.lang.ref.Reference.clear0();
7674 // void java.lang.ref.PhantomReference.clear0();
7675 bool LibraryCallKit::inline_reference_clear0(bool is_phantom) {
7676 // This matches the implementation in JVM_ReferenceClear, see the comments there.
7677
7678 // Get arguments
7679 Node* reference_obj = null_check_receiver();
7680 if (stopped()) return true;
7681
7682 // Common access parameters
7683 DecoratorSet decorators = IN_HEAP | AS_NO_KEEPALIVE;
7684 decorators |= (is_phantom ? ON_PHANTOM_OOP_REF : ON_WEAK_OOP_REF);
7685 Node* referent_field_addr = basic_plus_adr(reference_obj, java_lang_ref_Reference::referent_offset());
7686 const TypePtr* referent_field_addr_type = _gvn.type(referent_field_addr)->isa_ptr();
7687 const Type* val_type = TypeOopPtr::make_from_klass(env()->Object_klass());
7688
7689 Node* referent = access_load_at(reference_obj,
7690 referent_field_addr,
7691 referent_field_addr_type,
7692 val_type,
7693 T_OBJECT,
7694 decorators);
7695
7696 IdealKit ideal(this);
7697 #define __ ideal.
7698 __ if_then(referent, BoolTest::ne, null());
7699 sync_kit(ideal);
7700 access_store_at(reference_obj,
7701 referent_field_addr,
7702 referent_field_addr_type,
7703 null(),
7704 val_type,
7705 T_OBJECT,
7706 decorators);
7707 __ sync_kit(this);
7708 __ end_if();
7709 final_sync(ideal);
7710 #undef __
7711
7712 return true;
7713 }
7714
7715 //-----------------------inline_reference_reachabilityFence-----------------
7716 // bool java.lang.ref.Reference.reachabilityFence();
7717 bool LibraryCallKit::inline_reference_reachabilityFence() {
7718 Node* referent = argument(0);
7719 insert_reachability_fence(referent);
7720 return true;
7721 }
7722
7723 Node* LibraryCallKit::load_field_from_object(Node* fromObj, const char* fieldName, const char* fieldTypeString,
7724 DecoratorSet decorators, bool is_static,
7725 ciInstanceKlass* fromKls) {
7726 if (fromKls == nullptr) {
7727 const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();
7728 assert(tinst != nullptr, "obj is null");
7729 assert(tinst->is_loaded(), "obj is not loaded");
7730 fromKls = tinst->instance_klass();
7731 }
7732 ciField* field = fromKls->get_field_by_name(ciSymbol::make(fieldName),
7733 ciSymbol::make(fieldTypeString),
7734 is_static);
7735
7736 assert(field != nullptr, "undefined field %s %s %s", fieldTypeString, fromKls->name()->as_utf8(), fieldName);
7737 if (field == nullptr) return (Node *) nullptr;
7738
7739 if (is_static) {
7740 const TypeInstPtr* tip = TypeInstPtr::make(fromKls->java_mirror());
7741 fromObj = makecon(tip);
7742 }
7743
7744 // Next code copied from Parse::do_get_xxx():
7745
7746 // Compute address and memory type.
7747 int offset = field->offset_in_bytes();
7748 bool is_vol = field->is_volatile();
7749 ciType* field_klass = field->type();
7750 assert(field_klass->is_loaded(), "should be loaded");
7751 const TypePtr* adr_type = C->alias_type(field)->adr_type();
7752 Node *adr = basic_plus_adr(fromObj, fromObj, offset);
7753 assert(C->get_alias_index(adr_type) == C->get_alias_index(_gvn.type(adr)->isa_ptr()),
7754 "slice of address and input slice don't match");
7755 BasicType bt = field->layout_type();
7756
7757 // Build the resultant type of the load
7758 const Type *type;
7759 if (bt == T_OBJECT) {
7760 type = TypeOopPtr::make_from_klass(field_klass->as_klass());
7761 } else {
7762 type = Type::get_const_basic_type(bt);
7763 }
7764
7765 if (is_vol) {
7766 decorators |= MO_SEQ_CST;
7767 }
7768
7769 return access_load_at(fromObj, adr, adr_type, type, bt, decorators);
7770 }
7771
7772 Node * LibraryCallKit::field_address_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString,
7773 bool is_exact /* true */, bool is_static /* false */,
7774 ciInstanceKlass * fromKls /* nullptr */) {
7775 if (fromKls == nullptr) {
7776 const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();
7777 assert(tinst != nullptr, "obj is null");
7778 assert(tinst->is_loaded(), "obj is not loaded");
7779 assert(!is_exact || tinst->klass_is_exact(), "klass not exact");
7780 fromKls = tinst->instance_klass();
7781 }
7782 else {
7783 assert(is_static, "only for static field access");
7784 }
7785 ciField* field = fromKls->get_field_by_name(ciSymbol::make(fieldName),
7786 ciSymbol::make(fieldTypeString),
7787 is_static);
7788
7789 assert(field != nullptr, "undefined field");
7790 assert(!field->is_volatile(), "not defined for volatile fields");
7791
7792 if (is_static) {
7793 const TypeInstPtr* tip = TypeInstPtr::make(fromKls->java_mirror());
7794 fromObj = makecon(tip);
7795 }
7796
7797 // Next code copied from Parse::do_get_xxx():
7798
7799 // Compute address and memory type.
7800 int offset = field->offset_in_bytes();
7801 Node *adr = basic_plus_adr(fromObj, fromObj, offset);
7802
7803 return adr;
7804 }
7805
7806 //------------------------------inline_aescrypt_Block-----------------------
7807 bool LibraryCallKit::inline_aescrypt_Block(vmIntrinsics::ID id) {
7808 address stubAddr = nullptr;
7809 const char *stubName;
7810 bool is_decrypt = false;
7811 assert(UseAES, "need AES instruction support");
7812
7813 switch(id) {
7814 case vmIntrinsics::_aescrypt_encryptBlock:
7815 stubAddr = StubRoutines::aescrypt_encryptBlock();
7816 stubName = "aescrypt_encryptBlock";
7817 break;
7818 case vmIntrinsics::_aescrypt_decryptBlock:
7819 stubAddr = StubRoutines::aescrypt_decryptBlock();
7820 stubName = "aescrypt_decryptBlock";
7821 is_decrypt = true;
7822 break;
7823 default:
7824 break;
7825 }
7826 if (stubAddr == nullptr) return false;
7827
7828 Node* aescrypt_object = argument(0);
7829 Node* src = argument(1);
7830 Node* src_offset = argument(2);
7831 Node* dest = argument(3);
7832 Node* dest_offset = argument(4);
7833
7834 src = must_be_not_null(src, true);
7835 dest = must_be_not_null(dest, true);
7836
7837 // (1) src and dest are arrays.
7838 const TypeAryPtr* src_type = src->Value(&_gvn)->isa_aryptr();
7839 const TypeAryPtr* dest_type = dest->Value(&_gvn)->isa_aryptr();
7840 assert( src_type != nullptr && src_type->elem() != Type::BOTTOM &&
7841 dest_type != nullptr && dest_type->elem() != Type::BOTTOM, "args are strange");
7842
7843 // for the quick and dirty code we will skip all the checks.
7844 // we are just trying to get the call to be generated.
7845 Node* src_start = src;
7846 Node* dest_start = dest;
7847 if (src_offset != nullptr || dest_offset != nullptr) {
7848 assert(src_offset != nullptr && dest_offset != nullptr, "");
7849 src_start = array_element_address(src, src_offset, T_BYTE);
7850 dest_start = array_element_address(dest, dest_offset, T_BYTE);
7851 }
7852
7853 // now need to get the start of its expanded key array
7854 // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java
7855 Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object, is_decrypt);
7856 if (k_start == nullptr) return false;
7857
7858 // Call the stub.
7859 make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::aescrypt_block_Type(),
7860 stubAddr, stubName, TypePtr::BOTTOM,
7861 src_start, dest_start, k_start);
7862
7863 return true;
7864 }
7865
7866 //------------------------------inline_cipherBlockChaining_AESCrypt-----------------------
7867 bool LibraryCallKit::inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id) {
7868 address stubAddr = nullptr;
7869 const char *stubName = nullptr;
7870 bool is_decrypt = false;
7871 assert(UseAES, "need AES instruction support");
7872
7873 switch(id) {
7874 case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
7875 stubAddr = StubRoutines::cipherBlockChaining_encryptAESCrypt();
7876 stubName = "cipherBlockChaining_encryptAESCrypt";
7877 break;
7878 case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
7879 stubAddr = StubRoutines::cipherBlockChaining_decryptAESCrypt();
7880 stubName = "cipherBlockChaining_decryptAESCrypt";
7881 is_decrypt = true;
7882 break;
7883 default:
7884 break;
7885 }
7886 if (stubAddr == nullptr) return false;
7887
7888 Node* cipherBlockChaining_object = argument(0);
7889 Node* src = argument(1);
7890 Node* src_offset = argument(2);
7891 Node* len = argument(3);
7892 Node* dest = argument(4);
7893 Node* dest_offset = argument(5);
7894
7895 src = must_be_not_null(src, false);
7896 dest = must_be_not_null(dest, false);
7897
7898 // (1) src and dest are arrays.
7899 const TypeAryPtr* src_type = src->Value(&_gvn)->isa_aryptr();
7900 const TypeAryPtr* dest_type = dest->Value(&_gvn)->isa_aryptr();
7901 assert( src_type != nullptr && src_type->elem() != Type::BOTTOM &&
7902 dest_type != nullptr && dest_type->elem() != Type::BOTTOM, "args are strange");
7903
7904 // checks are the responsibility of the caller
7905 Node* src_start = src;
7906 Node* dest_start = dest;
7907 if (src_offset != nullptr || dest_offset != nullptr) {
7908 assert(src_offset != nullptr && dest_offset != nullptr, "");
7909 src_start = array_element_address(src, src_offset, T_BYTE);
7910 dest_start = array_element_address(dest, dest_offset, T_BYTE);
7911 }
7912
7913 // if we are in this set of code, we "know" the embeddedCipher is an AESCrypt object
7914 // (because of the predicated logic executed earlier).
7915 // so we cast it here safely.
7916 // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java
7917
7918 Node* embeddedCipherObj = load_field_from_object(cipherBlockChaining_object, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;");
7919 if (embeddedCipherObj == nullptr) return false;
7920
7921 // cast it to what we know it will be at runtime
7922 const TypeInstPtr* tinst = _gvn.type(cipherBlockChaining_object)->isa_instptr();
7923 assert(tinst != nullptr, "CBC obj is null");
7924 assert(tinst->is_loaded(), "CBC obj is not loaded");
7925 ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AES_Crypt"));
7926 assert(klass_AESCrypt->is_loaded(), "predicate checks that this class is loaded");
7927
7928 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
7929 const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_AESCrypt);
7930 const TypeOopPtr* xtype = aklass->as_instance_type()->cast_to_ptr_type(TypePtr::NotNull);
7931 Node* aescrypt_object = new CheckCastPPNode(control(), embeddedCipherObj, xtype);
7932 aescrypt_object = _gvn.transform(aescrypt_object);
7933
7934 // we need to get the start of the aescrypt_object's expanded key array
7935 Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object, is_decrypt);
7936 if (k_start == nullptr) return false;
7937
7938 // similarly, get the start address of the r vector
7939 Node* objRvec = load_field_from_object(cipherBlockChaining_object, "r", "[B");
7940 if (objRvec == nullptr) return false;
7941 Node* r_start = array_element_address(objRvec, intcon(0), T_BYTE);
7942
7943 // Call the stub, passing src_start, dest_start, k_start, r_start and src_len
7944 Node* cbcCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
7945 OptoRuntime::cipherBlockChaining_aescrypt_Type(),
7946 stubAddr, stubName, TypePtr::BOTTOM,
7947 src_start, dest_start, k_start, r_start, len);
7948
7949 // return cipher length (int)
7950 Node* retvalue = _gvn.transform(new ProjNode(cbcCrypt, TypeFunc::Parms));
7951 set_result(retvalue);
7952 return true;
7953 }
7954
7955 //------------------------------inline_electronicCodeBook_AESCrypt-----------------------
7956 bool LibraryCallKit::inline_electronicCodeBook_AESCrypt(vmIntrinsics::ID id) {
7957 address stubAddr = nullptr;
7958 const char *stubName = nullptr;
7959 bool is_decrypt = false;
7960 assert(UseAES, "need AES instruction support");
7961
7962 switch (id) {
7963 case vmIntrinsics::_electronicCodeBook_encryptAESCrypt:
7964 stubAddr = StubRoutines::electronicCodeBook_encryptAESCrypt();
7965 stubName = "electronicCodeBook_encryptAESCrypt";
7966 break;
7967 case vmIntrinsics::_electronicCodeBook_decryptAESCrypt:
7968 stubAddr = StubRoutines::electronicCodeBook_decryptAESCrypt();
7969 stubName = "electronicCodeBook_decryptAESCrypt";
7970 is_decrypt = true;
7971 break;
7972 default:
7973 break;
7974 }
7975
7976 if (stubAddr == nullptr) return false;
7977
7978 Node* electronicCodeBook_object = argument(0);
7979 Node* src = argument(1);
7980 Node* src_offset = argument(2);
7981 Node* len = argument(3);
7982 Node* dest = argument(4);
7983 Node* dest_offset = argument(5);
7984
7985 // (1) src and dest are arrays.
7986 const TypeAryPtr* src_type = src->Value(&_gvn)->isa_aryptr();
7987 const TypeAryPtr* dest_type = dest->Value(&_gvn)->isa_aryptr();
7988 assert( src_type != nullptr && src_type->elem() != Type::BOTTOM &&
7989 dest_type != nullptr && dest_type->elem() != Type::BOTTOM, "args are strange");
7990
7991 // checks are the responsibility of the caller
7992 Node* src_start = src;
7993 Node* dest_start = dest;
7994 if (src_offset != nullptr || dest_offset != nullptr) {
7995 assert(src_offset != nullptr && dest_offset != nullptr, "");
7996 src_start = array_element_address(src, src_offset, T_BYTE);
7997 dest_start = array_element_address(dest, dest_offset, T_BYTE);
7998 }
7999
8000 // if we are in this set of code, we "know" the embeddedCipher is an AESCrypt object
8001 // (because of the predicated logic executed earlier).
8002 // so we cast it here safely.
8003 // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java
8004
8005 Node* embeddedCipherObj = load_field_from_object(electronicCodeBook_object, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;");
8006 if (embeddedCipherObj == nullptr) return false;
8007
8008 // cast it to what we know it will be at runtime
8009 const TypeInstPtr* tinst = _gvn.type(electronicCodeBook_object)->isa_instptr();
8010 assert(tinst != nullptr, "ECB obj is null");
8011 assert(tinst->is_loaded(), "ECB obj is not loaded");
8012 ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AES_Crypt"));
8013 assert(klass_AESCrypt->is_loaded(), "predicate checks that this class is loaded");
8014
8015 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
8016 const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_AESCrypt);
8017 const TypeOopPtr* xtype = aklass->as_instance_type()->cast_to_ptr_type(TypePtr::NotNull);
8018 Node* aescrypt_object = new CheckCastPPNode(control(), embeddedCipherObj, xtype);
8019 aescrypt_object = _gvn.transform(aescrypt_object);
8020
8021 // we need to get the start of the aescrypt_object's expanded key array
8022 Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object, is_decrypt);
8023 if (k_start == nullptr) return false;
8024
8025 // Call the stub, passing src_start, dest_start, k_start, r_start and src_len
8026 Node* ecbCrypt = make_runtime_call(RC_LEAF | RC_NO_FP,
8027 OptoRuntime::electronicCodeBook_aescrypt_Type(),
8028 stubAddr, stubName, TypePtr::BOTTOM,
8029 src_start, dest_start, k_start, len);
8030
8031 // return cipher length (int)
8032 Node* retvalue = _gvn.transform(new ProjNode(ecbCrypt, TypeFunc::Parms));
8033 set_result(retvalue);
8034 return true;
8035 }
8036
8037 //------------------------------inline_counterMode_AESCrypt-----------------------
8038 bool LibraryCallKit::inline_counterMode_AESCrypt(vmIntrinsics::ID id) {
8039 assert(UseAES, "need AES instruction support");
8040 if (!UseAESCTRIntrinsics) return false;
8041
8042 address stubAddr = nullptr;
8043 const char *stubName = nullptr;
8044 if (id == vmIntrinsics::_counterMode_AESCrypt) {
8045 stubAddr = StubRoutines::counterMode_AESCrypt();
8046 stubName = "counterMode_AESCrypt";
8047 }
8048 if (stubAddr == nullptr) return false;
8049
8050 Node* counterMode_object = argument(0);
8051 Node* src = argument(1);
8052 Node* src_offset = argument(2);
8053 Node* len = argument(3);
8054 Node* dest = argument(4);
8055 Node* dest_offset = argument(5);
8056
8057 // (1) src and dest are arrays.
8058 const TypeAryPtr* src_type = src->Value(&_gvn)->isa_aryptr();
8059 const TypeAryPtr* dest_type = dest->Value(&_gvn)->isa_aryptr();
8060 assert( src_type != nullptr && src_type->elem() != Type::BOTTOM &&
8061 dest_type != nullptr && dest_type->elem() != Type::BOTTOM, "args are strange");
8062
8063 // checks are the responsibility of the caller
8064 Node* src_start = src;
8065 Node* dest_start = dest;
8066 if (src_offset != nullptr || dest_offset != nullptr) {
8067 assert(src_offset != nullptr && dest_offset != nullptr, "");
8068 src_start = array_element_address(src, src_offset, T_BYTE);
8069 dest_start = array_element_address(dest, dest_offset, T_BYTE);
8070 }
8071
8072 // if we are in this set of code, we "know" the embeddedCipher is an AESCrypt object
8073 // (because of the predicated logic executed earlier).
8074 // so we cast it here safely.
8075 // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java
8076 Node* embeddedCipherObj = load_field_from_object(counterMode_object, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;");
8077 if (embeddedCipherObj == nullptr) return false;
8078 // cast it to what we know it will be at runtime
8079 const TypeInstPtr* tinst = _gvn.type(counterMode_object)->isa_instptr();
8080 assert(tinst != nullptr, "CTR obj is null");
8081 assert(tinst->is_loaded(), "CTR obj is not loaded");
8082 ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AES_Crypt"));
8083 assert(klass_AESCrypt->is_loaded(), "predicate checks that this class is loaded");
8084 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
8085 const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_AESCrypt);
8086 const TypeOopPtr* xtype = aklass->as_instance_type()->cast_to_ptr_type(TypePtr::NotNull);
8087 Node* aescrypt_object = new CheckCastPPNode(control(), embeddedCipherObj, xtype);
8088 aescrypt_object = _gvn.transform(aescrypt_object);
8089 // we need to get the start of the aescrypt_object's expanded key array
8090 Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object, /* is_decrypt */ false);
8091 if (k_start == nullptr) return false;
8092 // similarly, get the start address of the r vector
8093 Node* obj_counter = load_field_from_object(counterMode_object, "counter", "[B");
8094 if (obj_counter == nullptr) return false;
8095 Node* cnt_start = array_element_address(obj_counter, intcon(0), T_BYTE);
8096
8097 Node* saved_encCounter = load_field_from_object(counterMode_object, "encryptedCounter", "[B");
8098 if (saved_encCounter == nullptr) return false;
8099 Node* saved_encCounter_start = array_element_address(saved_encCounter, intcon(0), T_BYTE);
8100 Node* used = field_address_from_object(counterMode_object, "used", "I", /*is_exact*/ false);
8101
8102 // Call the stub, passing src_start, dest_start, k_start, r_start and src_len
8103 Node* ctrCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
8104 OptoRuntime::counterMode_aescrypt_Type(),
8105 stubAddr, stubName, TypePtr::BOTTOM,
8106 src_start, dest_start, k_start, cnt_start, len, saved_encCounter_start, used);
8107
8108 // return cipher length (int)
8109 Node* retvalue = _gvn.transform(new ProjNode(ctrCrypt, TypeFunc::Parms));
8110 set_result(retvalue);
8111 return true;
8112 }
8113
8114 //------------------------------get_key_start_from_aescrypt_object-----------------------
8115 Node* LibraryCallKit::get_key_start_from_aescrypt_object(Node* aescrypt_object, bool is_decrypt) {
8116 // MixColumns for decryption can be reduced by preprocessing MixColumns with round keys.
8117 // Intel's extension is based on this optimization and AESCrypt generates round keys by preprocessing MixColumns.
8118 // However, ppc64 vncipher processes MixColumns and requires the same round keys with encryption.
8119 // The following platform specific stubs of encryption and decryption use the same round keys.
8120 #if defined(PPC64) || defined(S390) || defined(RISCV64)
8121 bool use_decryption_key = false;
8122 #else
8123 bool use_decryption_key = is_decrypt;
8124 #endif
8125 Node* objAESCryptKey = load_field_from_object(aescrypt_object, use_decryption_key ? "sessionKd" : "sessionKe", "[I");
8126 assert(objAESCryptKey != nullptr, "wrong version of com.sun.crypto.provider.AES_Crypt");
8127 if (objAESCryptKey == nullptr) return (Node *) nullptr;
8128
8129 // now have the array, need to get the start address of the selected key array
8130 Node* k_start = array_element_address(objAESCryptKey, intcon(0), T_INT);
8131 return k_start;
8132 }
8133
8134 //----------------------------inline_cipherBlockChaining_AESCrypt_predicate----------------------------
8135 // Return node representing slow path of predicate check.
8136 // the pseudo code we want to emulate with this predicate is:
8137 // for encryption:
8138 // if (embeddedCipherObj instanceof AESCrypt) do_intrinsic, else do_javapath
8139 // for decryption:
8140 // if ((embeddedCipherObj instanceof AESCrypt) && (cipher!=plain)) do_intrinsic, else do_javapath
8141 // note cipher==plain is more conservative than the original java code but that's OK
8142 //
8143 Node* LibraryCallKit::inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting) {
8144 // The receiver was checked for null already.
8145 Node* objCBC = argument(0);
8146
8147 Node* src = argument(1);
8148 Node* dest = argument(4);
8149
8150 // Load embeddedCipher field of CipherBlockChaining object.
8151 Node* embeddedCipherObj = load_field_from_object(objCBC, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;");
8152
8153 // get AESCrypt klass for instanceOf check
8154 // AESCrypt might not be loaded yet if some other SymmetricCipher got us to this compile point
8155 // will have same classloader as CipherBlockChaining object
8156 const TypeInstPtr* tinst = _gvn.type(objCBC)->isa_instptr();
8157 assert(tinst != nullptr, "CBCobj is null");
8158 assert(tinst->is_loaded(), "CBCobj is not loaded");
8159
8160 // we want to do an instanceof comparison against the AESCrypt class
8161 ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AES_Crypt"));
8162 if (!klass_AESCrypt->is_loaded()) {
8163 // if AESCrypt is not even loaded, we never take the intrinsic fast path
8164 Node* ctrl = control();
8165 set_control(top()); // no regular fast path
8166 return ctrl;
8167 }
8168
8169 src = must_be_not_null(src, true);
8170 dest = must_be_not_null(dest, true);
8171
8172 // Resolve oops to stable for CmpP below.
8173 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
8174
8175 Node* instof = gen_instanceof(embeddedCipherObj, makecon(TypeKlassPtr::make(instklass_AESCrypt)));
8176 Node* cmp_instof = _gvn.transform(new CmpINode(instof, intcon(1)));
8177 Node* bool_instof = _gvn.transform(new BoolNode(cmp_instof, BoolTest::ne));
8178
8179 Node* instof_false = generate_guard(bool_instof, nullptr, PROB_MIN);
8180
8181 // for encryption, we are done
8182 if (!decrypting)
8183 return instof_false; // even if it is null
8184
8185 // for decryption, we need to add a further check to avoid
8186 // taking the intrinsic path when cipher and plain are the same
8187 // see the original java code for why.
8188 RegionNode* region = new RegionNode(3);
8189 region->init_req(1, instof_false);
8190
8191 Node* cmp_src_dest = _gvn.transform(new CmpPNode(src, dest));
8192 Node* bool_src_dest = _gvn.transform(new BoolNode(cmp_src_dest, BoolTest::eq));
8193 Node* src_dest_conjoint = generate_guard(bool_src_dest, nullptr, PROB_MIN);
8194 region->init_req(2, src_dest_conjoint);
8195
8196 record_for_igvn(region);
8197 return _gvn.transform(region);
8198 }
8199
8200 //----------------------------inline_electronicCodeBook_AESCrypt_predicate----------------------------
8201 // Return node representing slow path of predicate check.
8202 // the pseudo code we want to emulate with this predicate is:
8203 // for encryption:
8204 // if (embeddedCipherObj instanceof AESCrypt) do_intrinsic, else do_javapath
8205 // for decryption:
8206 // if ((embeddedCipherObj instanceof AESCrypt) && (cipher!=plain)) do_intrinsic, else do_javapath
8207 // note cipher==plain is more conservative than the original java code but that's OK
8208 //
8209 Node* LibraryCallKit::inline_electronicCodeBook_AESCrypt_predicate(bool decrypting) {
8210 // The receiver was checked for null already.
8211 Node* objECB = argument(0);
8212
8213 // Load embeddedCipher field of ElectronicCodeBook object.
8214 Node* embeddedCipherObj = load_field_from_object(objECB, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;");
8215
8216 // get AESCrypt klass for instanceOf check
8217 // AESCrypt might not be loaded yet if some other SymmetricCipher got us to this compile point
8218 // will have same classloader as ElectronicCodeBook object
8219 const TypeInstPtr* tinst = _gvn.type(objECB)->isa_instptr();
8220 assert(tinst != nullptr, "ECBobj is null");
8221 assert(tinst->is_loaded(), "ECBobj is not loaded");
8222
8223 // we want to do an instanceof comparison against the AESCrypt class
8224 ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AES_Crypt"));
8225 if (!klass_AESCrypt->is_loaded()) {
8226 // if AESCrypt is not even loaded, we never take the intrinsic fast path
8227 Node* ctrl = control();
8228 set_control(top()); // no regular fast path
8229 return ctrl;
8230 }
8231 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
8232
8233 Node* instof = gen_instanceof(embeddedCipherObj, makecon(TypeKlassPtr::make(instklass_AESCrypt)));
8234 Node* cmp_instof = _gvn.transform(new CmpINode(instof, intcon(1)));
8235 Node* bool_instof = _gvn.transform(new BoolNode(cmp_instof, BoolTest::ne));
8236
8237 Node* instof_false = generate_guard(bool_instof, nullptr, PROB_MIN);
8238
8239 // for encryption, we are done
8240 if (!decrypting)
8241 return instof_false; // even if it is null
8242
8243 // for decryption, we need to add a further check to avoid
8244 // taking the intrinsic path when cipher and plain are the same
8245 // see the original java code for why.
8246 RegionNode* region = new RegionNode(3);
8247 region->init_req(1, instof_false);
8248 Node* src = argument(1);
8249 Node* dest = argument(4);
8250 Node* cmp_src_dest = _gvn.transform(new CmpPNode(src, dest));
8251 Node* bool_src_dest = _gvn.transform(new BoolNode(cmp_src_dest, BoolTest::eq));
8252 Node* src_dest_conjoint = generate_guard(bool_src_dest, nullptr, PROB_MIN);
8253 region->init_req(2, src_dest_conjoint);
8254
8255 record_for_igvn(region);
8256 return _gvn.transform(region);
8257 }
8258
8259 //----------------------------inline_counterMode_AESCrypt_predicate----------------------------
8260 // Return node representing slow path of predicate check.
8261 // the pseudo code we want to emulate with this predicate is:
8262 // for encryption:
8263 // if (embeddedCipherObj instanceof AESCrypt) do_intrinsic, else do_javapath
8264 // for decryption:
8265 // if ((embeddedCipherObj instanceof AESCrypt) && (cipher!=plain)) do_intrinsic, else do_javapath
8266 // note cipher==plain is more conservative than the original java code but that's OK
8267 //
8268
8269 Node* LibraryCallKit::inline_counterMode_AESCrypt_predicate() {
8270 // The receiver was checked for null already.
8271 Node* objCTR = argument(0);
8272
8273 // Load embeddedCipher field of CipherBlockChaining object.
8274 Node* embeddedCipherObj = load_field_from_object(objCTR, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;");
8275
8276 // get AESCrypt klass for instanceOf check
8277 // AESCrypt might not be loaded yet if some other SymmetricCipher got us to this compile point
8278 // will have same classloader as CipherBlockChaining object
8279 const TypeInstPtr* tinst = _gvn.type(objCTR)->isa_instptr();
8280 assert(tinst != nullptr, "CTRobj is null");
8281 assert(tinst->is_loaded(), "CTRobj is not loaded");
8282
8283 // we want to do an instanceof comparison against the AESCrypt class
8284 ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AES_Crypt"));
8285 if (!klass_AESCrypt->is_loaded()) {
8286 // if AESCrypt is not even loaded, we never take the intrinsic fast path
8287 Node* ctrl = control();
8288 set_control(top()); // no regular fast path
8289 return ctrl;
8290 }
8291
8292 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
8293 Node* instof = gen_instanceof(embeddedCipherObj, makecon(TypeKlassPtr::make(instklass_AESCrypt)));
8294 Node* cmp_instof = _gvn.transform(new CmpINode(instof, intcon(1)));
8295 Node* bool_instof = _gvn.transform(new BoolNode(cmp_instof, BoolTest::ne));
8296 Node* instof_false = generate_guard(bool_instof, nullptr, PROB_MIN);
8297
8298 return instof_false; // even if it is null
8299 }
8300
8301 //------------------------------inline_ghash_processBlocks
8302 bool LibraryCallKit::inline_ghash_processBlocks() {
8303 address stubAddr;
8304 const char *stubName;
8305 assert(UseGHASHIntrinsics, "need GHASH intrinsics support");
8306
8307 stubAddr = StubRoutines::ghash_processBlocks();
8308 stubName = "ghash_processBlocks";
8309
8310 Node* data = argument(0);
8311 Node* offset = argument(1);
8312 Node* len = argument(2);
8313 Node* state = argument(3);
8314 Node* subkeyH = argument(4);
8315
8316 state = must_be_not_null(state, true);
8317 subkeyH = must_be_not_null(subkeyH, true);
8318 data = must_be_not_null(data, true);
8319
8320 Node* state_start = array_element_address(state, intcon(0), T_LONG);
8321 assert(state_start, "state is null");
8322 Node* subkeyH_start = array_element_address(subkeyH, intcon(0), T_LONG);
8323 assert(subkeyH_start, "subkeyH is null");
8324 Node* data_start = array_element_address(data, offset, T_BYTE);
8325 assert(data_start, "data is null");
8326
8327 Node* ghash = make_runtime_call(RC_LEAF|RC_NO_FP,
8328 OptoRuntime::ghash_processBlocks_Type(),
8329 stubAddr, stubName, TypePtr::BOTTOM,
8330 state_start, subkeyH_start, data_start, len);
8331 return true;
8332 }
8333
8334 //------------------------------inline_chacha20Block
8335 bool LibraryCallKit::inline_chacha20Block() {
8336 address stubAddr;
8337 const char *stubName;
8338 assert(UseChaCha20Intrinsics, "need ChaCha20 intrinsics support");
8339
8340 stubAddr = StubRoutines::chacha20Block();
8341 stubName = "chacha20Block";
8342
8343 Node* state = argument(0);
8344 Node* result = argument(1);
8345
8346 state = must_be_not_null(state, true);
8347 result = must_be_not_null(result, true);
8348
8349 Node* state_start = array_element_address(state, intcon(0), T_INT);
8350 assert(state_start, "state is null");
8351 Node* result_start = array_element_address(result, intcon(0), T_BYTE);
8352 assert(result_start, "result is null");
8353
8354 Node* cc20Blk = make_runtime_call(RC_LEAF|RC_NO_FP,
8355 OptoRuntime::chacha20Block_Type(),
8356 stubAddr, stubName, TypePtr::BOTTOM,
8357 state_start, result_start);
8358 // return key stream length (int)
8359 Node* retvalue = _gvn.transform(new ProjNode(cc20Blk, TypeFunc::Parms));
8360 set_result(retvalue);
8361 return true;
8362 }
8363
8364 //------------------------------inline_kyberNtt
8365 bool LibraryCallKit::inline_kyberNtt() {
8366 address stubAddr;
8367 const char *stubName;
8368 assert(UseKyberIntrinsics, "need Kyber intrinsics support");
8369 assert(callee()->signature()->size() == 2, "kyberNtt has 2 parameters");
8370
8371 stubAddr = StubRoutines::kyberNtt();
8372 stubName = "kyberNtt";
8373 if (!stubAddr) return false;
8374
8375 Node* coeffs = argument(0);
8376 Node* ntt_zetas = argument(1);
8377
8378 coeffs = must_be_not_null(coeffs, true);
8379 ntt_zetas = must_be_not_null(ntt_zetas, true);
8380
8381 Node* coeffs_start = array_element_address(coeffs, intcon(0), T_SHORT);
8382 assert(coeffs_start, "coeffs is null");
8383 Node* ntt_zetas_start = array_element_address(ntt_zetas, intcon(0), T_SHORT);
8384 assert(ntt_zetas_start, "ntt_zetas is null");
8385 Node* kyberNtt = make_runtime_call(RC_LEAF|RC_NO_FP,
8386 OptoRuntime::kyberNtt_Type(),
8387 stubAddr, stubName, TypePtr::BOTTOM,
8388 coeffs_start, ntt_zetas_start);
8389 // return an int
8390 Node* retvalue = _gvn.transform(new ProjNode(kyberNtt, TypeFunc::Parms));
8391 set_result(retvalue);
8392 return true;
8393 }
8394
8395 //------------------------------inline_kyberInverseNtt
8396 bool LibraryCallKit::inline_kyberInverseNtt() {
8397 address stubAddr;
8398 const char *stubName;
8399 assert(UseKyberIntrinsics, "need Kyber intrinsics support");
8400 assert(callee()->signature()->size() == 2, "kyberInverseNtt has 2 parameters");
8401
8402 stubAddr = StubRoutines::kyberInverseNtt();
8403 stubName = "kyberInverseNtt";
8404 if (!stubAddr) return false;
8405
8406 Node* coeffs = argument(0);
8407 Node* zetas = argument(1);
8408
8409 coeffs = must_be_not_null(coeffs, true);
8410 zetas = must_be_not_null(zetas, true);
8411
8412 Node* coeffs_start = array_element_address(coeffs, intcon(0), T_SHORT);
8413 assert(coeffs_start, "coeffs is null");
8414 Node* zetas_start = array_element_address(zetas, intcon(0), T_SHORT);
8415 assert(zetas_start, "inverseNtt_zetas is null");
8416 Node* kyberInverseNtt = make_runtime_call(RC_LEAF|RC_NO_FP,
8417 OptoRuntime::kyberInverseNtt_Type(),
8418 stubAddr, stubName, TypePtr::BOTTOM,
8419 coeffs_start, zetas_start);
8420
8421 // return an int
8422 Node* retvalue = _gvn.transform(new ProjNode(kyberInverseNtt, TypeFunc::Parms));
8423 set_result(retvalue);
8424 return true;
8425 }
8426
8427 //------------------------------inline_kyberNttMult
8428 bool LibraryCallKit::inline_kyberNttMult() {
8429 address stubAddr;
8430 const char *stubName;
8431 assert(UseKyberIntrinsics, "need Kyber intrinsics support");
8432 assert(callee()->signature()->size() == 4, "kyberNttMult has 4 parameters");
8433
8434 stubAddr = StubRoutines::kyberNttMult();
8435 stubName = "kyberNttMult";
8436 if (!stubAddr) return false;
8437
8438 Node* result = argument(0);
8439 Node* ntta = argument(1);
8440 Node* nttb = argument(2);
8441 Node* zetas = argument(3);
8442
8443 result = must_be_not_null(result, true);
8444 ntta = must_be_not_null(ntta, true);
8445 nttb = must_be_not_null(nttb, true);
8446 zetas = must_be_not_null(zetas, true);
8447
8448 Node* result_start = array_element_address(result, intcon(0), T_SHORT);
8449 assert(result_start, "result is null");
8450 Node* ntta_start = array_element_address(ntta, intcon(0), T_SHORT);
8451 assert(ntta_start, "ntta is null");
8452 Node* nttb_start = array_element_address(nttb, intcon(0), T_SHORT);
8453 assert(nttb_start, "nttb is null");
8454 Node* zetas_start = array_element_address(zetas, intcon(0), T_SHORT);
8455 assert(zetas_start, "nttMult_zetas is null");
8456 Node* kyberNttMult = make_runtime_call(RC_LEAF|RC_NO_FP,
8457 OptoRuntime::kyberNttMult_Type(),
8458 stubAddr, stubName, TypePtr::BOTTOM,
8459 result_start, ntta_start, nttb_start,
8460 zetas_start);
8461
8462 // return an int
8463 Node* retvalue = _gvn.transform(new ProjNode(kyberNttMult, TypeFunc::Parms));
8464 set_result(retvalue);
8465
8466 return true;
8467 }
8468
8469 //------------------------------inline_kyberAddPoly_2
8470 bool LibraryCallKit::inline_kyberAddPoly_2() {
8471 address stubAddr;
8472 const char *stubName;
8473 assert(UseKyberIntrinsics, "need Kyber intrinsics support");
8474 assert(callee()->signature()->size() == 3, "kyberAddPoly_2 has 3 parameters");
8475
8476 stubAddr = StubRoutines::kyberAddPoly_2();
8477 stubName = "kyberAddPoly_2";
8478 if (!stubAddr) return false;
8479
8480 Node* result = argument(0);
8481 Node* a = argument(1);
8482 Node* b = argument(2);
8483
8484 result = must_be_not_null(result, true);
8485 a = must_be_not_null(a, true);
8486 b = must_be_not_null(b, true);
8487
8488 Node* result_start = array_element_address(result, intcon(0), T_SHORT);
8489 assert(result_start, "result is null");
8490 Node* a_start = array_element_address(a, intcon(0), T_SHORT);
8491 assert(a_start, "a is null");
8492 Node* b_start = array_element_address(b, intcon(0), T_SHORT);
8493 assert(b_start, "b is null");
8494 Node* kyberAddPoly_2 = make_runtime_call(RC_LEAF|RC_NO_FP,
8495 OptoRuntime::kyberAddPoly_2_Type(),
8496 stubAddr, stubName, TypePtr::BOTTOM,
8497 result_start, a_start, b_start);
8498 // return an int
8499 Node* retvalue = _gvn.transform(new ProjNode(kyberAddPoly_2, TypeFunc::Parms));
8500 set_result(retvalue);
8501 return true;
8502 }
8503
8504 //------------------------------inline_kyberAddPoly_3
8505 bool LibraryCallKit::inline_kyberAddPoly_3() {
8506 address stubAddr;
8507 const char *stubName;
8508 assert(UseKyberIntrinsics, "need Kyber intrinsics support");
8509 assert(callee()->signature()->size() == 4, "kyberAddPoly_3 has 4 parameters");
8510
8511 stubAddr = StubRoutines::kyberAddPoly_3();
8512 stubName = "kyberAddPoly_3";
8513 if (!stubAddr) return false;
8514
8515 Node* result = argument(0);
8516 Node* a = argument(1);
8517 Node* b = argument(2);
8518 Node* c = argument(3);
8519
8520 result = must_be_not_null(result, true);
8521 a = must_be_not_null(a, true);
8522 b = must_be_not_null(b, true);
8523 c = must_be_not_null(c, true);
8524
8525 Node* result_start = array_element_address(result, intcon(0), T_SHORT);
8526 assert(result_start, "result is null");
8527 Node* a_start = array_element_address(a, intcon(0), T_SHORT);
8528 assert(a_start, "a is null");
8529 Node* b_start = array_element_address(b, intcon(0), T_SHORT);
8530 assert(b_start, "b is null");
8531 Node* c_start = array_element_address(c, intcon(0), T_SHORT);
8532 assert(c_start, "c is null");
8533 Node* kyberAddPoly_3 = make_runtime_call(RC_LEAF|RC_NO_FP,
8534 OptoRuntime::kyberAddPoly_3_Type(),
8535 stubAddr, stubName, TypePtr::BOTTOM,
8536 result_start, a_start, b_start, c_start);
8537 // return an int
8538 Node* retvalue = _gvn.transform(new ProjNode(kyberAddPoly_3, TypeFunc::Parms));
8539 set_result(retvalue);
8540 return true;
8541 }
8542
8543 //------------------------------inline_kyber12To16
8544 bool LibraryCallKit::inline_kyber12To16() {
8545 address stubAddr;
8546 const char *stubName;
8547 assert(UseKyberIntrinsics, "need Kyber intrinsics support");
8548 assert(callee()->signature()->size() == 4, "kyber12To16 has 4 parameters");
8549
8550 stubAddr = StubRoutines::kyber12To16();
8551 stubName = "kyber12To16";
8552 if (!stubAddr) return false;
8553
8554 Node* condensed = argument(0);
8555 Node* condensedOffs = argument(1);
8556 Node* parsed = argument(2);
8557 Node* parsedLength = argument(3);
8558
8559 condensed = must_be_not_null(condensed, true);
8560 parsed = must_be_not_null(parsed, true);
8561
8562 Node* condensed_start = array_element_address(condensed, intcon(0), T_BYTE);
8563 assert(condensed_start, "condensed is null");
8564 Node* parsed_start = array_element_address(parsed, intcon(0), T_SHORT);
8565 assert(parsed_start, "parsed is null");
8566 Node* kyber12To16 = make_runtime_call(RC_LEAF|RC_NO_FP,
8567 OptoRuntime::kyber12To16_Type(),
8568 stubAddr, stubName, TypePtr::BOTTOM,
8569 condensed_start, condensedOffs, parsed_start, parsedLength);
8570 // return an int
8571 Node* retvalue = _gvn.transform(new ProjNode(kyber12To16, TypeFunc::Parms));
8572 set_result(retvalue);
8573 return true;
8574
8575 }
8576
8577 //------------------------------inline_kyberBarrettReduce
8578 bool LibraryCallKit::inline_kyberBarrettReduce() {
8579 address stubAddr;
8580 const char *stubName;
8581 assert(UseKyberIntrinsics, "need Kyber intrinsics support");
8582 assert(callee()->signature()->size() == 1, "kyberBarrettReduce has 1 parameters");
8583
8584 stubAddr = StubRoutines::kyberBarrettReduce();
8585 stubName = "kyberBarrettReduce";
8586 if (!stubAddr) return false;
8587
8588 Node* coeffs = argument(0);
8589
8590 coeffs = must_be_not_null(coeffs, true);
8591
8592 Node* coeffs_start = array_element_address(coeffs, intcon(0), T_SHORT);
8593 assert(coeffs_start, "coeffs is null");
8594 Node* kyberBarrettReduce = make_runtime_call(RC_LEAF|RC_NO_FP,
8595 OptoRuntime::kyberBarrettReduce_Type(),
8596 stubAddr, stubName, TypePtr::BOTTOM,
8597 coeffs_start);
8598 // return an int
8599 Node* retvalue = _gvn.transform(new ProjNode(kyberBarrettReduce, TypeFunc::Parms));
8600 set_result(retvalue);
8601 return true;
8602 }
8603
8604 //------------------------------inline_dilithiumAlmostNtt
8605 bool LibraryCallKit::inline_dilithiumAlmostNtt() {
8606 address stubAddr;
8607 const char *stubName;
8608 assert(UseDilithiumIntrinsics, "need Dilithium intrinsics support");
8609 assert(callee()->signature()->size() == 2, "dilithiumAlmostNtt has 2 parameters");
8610
8611 stubAddr = StubRoutines::dilithiumAlmostNtt();
8612 stubName = "dilithiumAlmostNtt";
8613 if (!stubAddr) return false;
8614
8615 Node* coeffs = argument(0);
8616 Node* ntt_zetas = argument(1);
8617
8618 coeffs = must_be_not_null(coeffs, true);
8619 ntt_zetas = must_be_not_null(ntt_zetas, true);
8620
8621 Node* coeffs_start = array_element_address(coeffs, intcon(0), T_INT);
8622 assert(coeffs_start, "coeffs is null");
8623 Node* ntt_zetas_start = array_element_address(ntt_zetas, intcon(0), T_INT);
8624 assert(ntt_zetas_start, "ntt_zetas is null");
8625 Node* dilithiumAlmostNtt = make_runtime_call(RC_LEAF|RC_NO_FP,
8626 OptoRuntime::dilithiumAlmostNtt_Type(),
8627 stubAddr, stubName, TypePtr::BOTTOM,
8628 coeffs_start, ntt_zetas_start);
8629 // return an int
8630 Node* retvalue = _gvn.transform(new ProjNode(dilithiumAlmostNtt, TypeFunc::Parms));
8631 set_result(retvalue);
8632 return true;
8633 }
8634
8635 //------------------------------inline_dilithiumAlmostInverseNtt
8636 bool LibraryCallKit::inline_dilithiumAlmostInverseNtt() {
8637 address stubAddr;
8638 const char *stubName;
8639 assert(UseDilithiumIntrinsics, "need Dilithium intrinsics support");
8640 assert(callee()->signature()->size() == 2, "dilithiumAlmostInverseNtt has 2 parameters");
8641
8642 stubAddr = StubRoutines::dilithiumAlmostInverseNtt();
8643 stubName = "dilithiumAlmostInverseNtt";
8644 if (!stubAddr) return false;
8645
8646 Node* coeffs = argument(0);
8647 Node* zetas = argument(1);
8648
8649 coeffs = must_be_not_null(coeffs, true);
8650 zetas = must_be_not_null(zetas, true);
8651
8652 Node* coeffs_start = array_element_address(coeffs, intcon(0), T_INT);
8653 assert(coeffs_start, "coeffs is null");
8654 Node* zetas_start = array_element_address(zetas, intcon(0), T_INT);
8655 assert(zetas_start, "inverseNtt_zetas is null");
8656 Node* dilithiumAlmostInverseNtt = make_runtime_call(RC_LEAF|RC_NO_FP,
8657 OptoRuntime::dilithiumAlmostInverseNtt_Type(),
8658 stubAddr, stubName, TypePtr::BOTTOM,
8659 coeffs_start, zetas_start);
8660 // return an int
8661 Node* retvalue = _gvn.transform(new ProjNode(dilithiumAlmostInverseNtt, TypeFunc::Parms));
8662 set_result(retvalue);
8663 return true;
8664 }
8665
8666 //------------------------------inline_dilithiumNttMult
8667 bool LibraryCallKit::inline_dilithiumNttMult() {
8668 address stubAddr;
8669 const char *stubName;
8670 assert(UseDilithiumIntrinsics, "need Dilithium intrinsics support");
8671 assert(callee()->signature()->size() == 3, "dilithiumNttMult has 3 parameters");
8672
8673 stubAddr = StubRoutines::dilithiumNttMult();
8674 stubName = "dilithiumNttMult";
8675 if (!stubAddr) return false;
8676
8677 Node* result = argument(0);
8678 Node* ntta = argument(1);
8679 Node* nttb = argument(2);
8680 Node* zetas = argument(3);
8681
8682 result = must_be_not_null(result, true);
8683 ntta = must_be_not_null(ntta, true);
8684 nttb = must_be_not_null(nttb, true);
8685 zetas = must_be_not_null(zetas, true);
8686
8687 Node* result_start = array_element_address(result, intcon(0), T_INT);
8688 assert(result_start, "result is null");
8689 Node* ntta_start = array_element_address(ntta, intcon(0), T_INT);
8690 assert(ntta_start, "ntta is null");
8691 Node* nttb_start = array_element_address(nttb, intcon(0), T_INT);
8692 assert(nttb_start, "nttb is null");
8693 Node* dilithiumNttMult = make_runtime_call(RC_LEAF|RC_NO_FP,
8694 OptoRuntime::dilithiumNttMult_Type(),
8695 stubAddr, stubName, TypePtr::BOTTOM,
8696 result_start, ntta_start, nttb_start);
8697
8698 // return an int
8699 Node* retvalue = _gvn.transform(new ProjNode(dilithiumNttMult, TypeFunc::Parms));
8700 set_result(retvalue);
8701
8702 return true;
8703 }
8704
8705 //------------------------------inline_dilithiumMontMulByConstant
8706 bool LibraryCallKit::inline_dilithiumMontMulByConstant() {
8707 address stubAddr;
8708 const char *stubName;
8709 assert(UseDilithiumIntrinsics, "need Dilithium intrinsics support");
8710 assert(callee()->signature()->size() == 2, "dilithiumMontMulByConstant has 2 parameters");
8711
8712 stubAddr = StubRoutines::dilithiumMontMulByConstant();
8713 stubName = "dilithiumMontMulByConstant";
8714 if (!stubAddr) return false;
8715
8716 Node* coeffs = argument(0);
8717 Node* constant = argument(1);
8718
8719 coeffs = must_be_not_null(coeffs, true);
8720
8721 Node* coeffs_start = array_element_address(coeffs, intcon(0), T_INT);
8722 assert(coeffs_start, "coeffs is null");
8723 Node* dilithiumMontMulByConstant = make_runtime_call(RC_LEAF|RC_NO_FP,
8724 OptoRuntime::dilithiumMontMulByConstant_Type(),
8725 stubAddr, stubName, TypePtr::BOTTOM,
8726 coeffs_start, constant);
8727
8728 // return an int
8729 Node* retvalue = _gvn.transform(new ProjNode(dilithiumMontMulByConstant, TypeFunc::Parms));
8730 set_result(retvalue);
8731 return true;
8732 }
8733
8734
8735 //------------------------------inline_dilithiumDecomposePoly
8736 bool LibraryCallKit::inline_dilithiumDecomposePoly() {
8737 address stubAddr;
8738 const char *stubName;
8739 assert(UseDilithiumIntrinsics, "need Dilithium intrinsics support");
8740 assert(callee()->signature()->size() == 5, "dilithiumDecomposePoly has 5 parameters");
8741
8742 stubAddr = StubRoutines::dilithiumDecomposePoly();
8743 stubName = "dilithiumDecomposePoly";
8744 if (!stubAddr) return false;
8745
8746 Node* input = argument(0);
8747 Node* lowPart = argument(1);
8748 Node* highPart = argument(2);
8749 Node* twoGamma2 = argument(3);
8750 Node* multiplier = argument(4);
8751
8752 input = must_be_not_null(input, true);
8753 lowPart = must_be_not_null(lowPart, true);
8754 highPart = must_be_not_null(highPart, true);
8755
8756 Node* input_start = array_element_address(input, intcon(0), T_INT);
8757 assert(input_start, "input is null");
8758 Node* lowPart_start = array_element_address(lowPart, intcon(0), T_INT);
8759 assert(lowPart_start, "lowPart is null");
8760 Node* highPart_start = array_element_address(highPart, intcon(0), T_INT);
8761 assert(highPart_start, "highPart is null");
8762
8763 Node* dilithiumDecomposePoly = make_runtime_call(RC_LEAF|RC_NO_FP,
8764 OptoRuntime::dilithiumDecomposePoly_Type(),
8765 stubAddr, stubName, TypePtr::BOTTOM,
8766 input_start, lowPart_start, highPart_start,
8767 twoGamma2, multiplier);
8768
8769 // return an int
8770 Node* retvalue = _gvn.transform(new ProjNode(dilithiumDecomposePoly, TypeFunc::Parms));
8771 set_result(retvalue);
8772 return true;
8773 }
8774
8775 bool LibraryCallKit::inline_base64_encodeBlock() {
8776 address stubAddr;
8777 const char *stubName;
8778 assert(UseBASE64Intrinsics, "need Base64 intrinsics support");
8779 assert(callee()->signature()->size() == 6, "base64_encodeBlock has 6 parameters");
8780 stubAddr = StubRoutines::base64_encodeBlock();
8781 stubName = "encodeBlock";
8782
8783 if (!stubAddr) return false;
8784 Node* base64obj = argument(0);
8785 Node* src = argument(1);
8786 Node* offset = argument(2);
8787 Node* len = argument(3);
8788 Node* dest = argument(4);
8789 Node* dp = argument(5);
8790 Node* isURL = argument(6);
8791
8792 src = must_be_not_null(src, true);
8793 dest = must_be_not_null(dest, true);
8794
8795 Node* src_start = array_element_address(src, intcon(0), T_BYTE);
8796 assert(src_start, "source array is null");
8797 Node* dest_start = array_element_address(dest, intcon(0), T_BYTE);
8798 assert(dest_start, "destination array is null");
8799
8800 Node* base64 = make_runtime_call(RC_LEAF,
8801 OptoRuntime::base64_encodeBlock_Type(),
8802 stubAddr, stubName, TypePtr::BOTTOM,
8803 src_start, offset, len, dest_start, dp, isURL);
8804 return true;
8805 }
8806
8807 bool LibraryCallKit::inline_base64_decodeBlock() {
8808 address stubAddr;
8809 const char *stubName;
8810 assert(UseBASE64Intrinsics, "need Base64 intrinsics support");
8811 assert(callee()->signature()->size() == 7, "base64_decodeBlock has 7 parameters");
8812 stubAddr = StubRoutines::base64_decodeBlock();
8813 stubName = "decodeBlock";
8814
8815 if (!stubAddr) return false;
8816 Node* base64obj = argument(0);
8817 Node* src = argument(1);
8818 Node* src_offset = argument(2);
8819 Node* len = argument(3);
8820 Node* dest = argument(4);
8821 Node* dest_offset = argument(5);
8822 Node* isURL = argument(6);
8823 Node* isMIME = argument(7);
8824
8825 src = must_be_not_null(src, true);
8826 dest = must_be_not_null(dest, true);
8827
8828 Node* src_start = array_element_address(src, intcon(0), T_BYTE);
8829 assert(src_start, "source array is null");
8830 Node* dest_start = array_element_address(dest, intcon(0), T_BYTE);
8831 assert(dest_start, "destination array is null");
8832
8833 Node* call = make_runtime_call(RC_LEAF,
8834 OptoRuntime::base64_decodeBlock_Type(),
8835 stubAddr, stubName, TypePtr::BOTTOM,
8836 src_start, src_offset, len, dest_start, dest_offset, isURL, isMIME);
8837 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
8838 set_result(result);
8839 return true;
8840 }
8841
8842 bool LibraryCallKit::inline_poly1305_processBlocks() {
8843 address stubAddr;
8844 const char *stubName;
8845 assert(UsePoly1305Intrinsics, "need Poly intrinsics support");
8846 assert(callee()->signature()->size() == 5, "poly1305_processBlocks has %d parameters", callee()->signature()->size());
8847 stubAddr = StubRoutines::poly1305_processBlocks();
8848 stubName = "poly1305_processBlocks";
8849
8850 if (!stubAddr) return false;
8851 null_check_receiver(); // null-check receiver
8852 if (stopped()) return true;
8853
8854 Node* input = argument(1);
8855 Node* input_offset = argument(2);
8856 Node* len = argument(3);
8857 Node* alimbs = argument(4);
8858 Node* rlimbs = argument(5);
8859
8860 input = must_be_not_null(input, true);
8861 alimbs = must_be_not_null(alimbs, true);
8862 rlimbs = must_be_not_null(rlimbs, true);
8863
8864 Node* input_start = array_element_address(input, input_offset, T_BYTE);
8865 assert(input_start, "input array is null");
8866 Node* acc_start = array_element_address(alimbs, intcon(0), T_LONG);
8867 assert(acc_start, "acc array is null");
8868 Node* r_start = array_element_address(rlimbs, intcon(0), T_LONG);
8869 assert(r_start, "r array is null");
8870
8871 Node* call = make_runtime_call(RC_LEAF | RC_NO_FP,
8872 OptoRuntime::poly1305_processBlocks_Type(),
8873 stubAddr, stubName, TypePtr::BOTTOM,
8874 input_start, len, acc_start, r_start);
8875 return true;
8876 }
8877
8878 bool LibraryCallKit::inline_intpoly_montgomeryMult_P256() {
8879 address stubAddr;
8880 const char *stubName;
8881 assert(UseIntPolyIntrinsics, "need intpoly intrinsics support");
8882 assert(callee()->signature()->size() == 3, "intpoly_montgomeryMult_P256 has %d parameters", callee()->signature()->size());
8883 stubAddr = StubRoutines::intpoly_montgomeryMult_P256();
8884 stubName = "intpoly_montgomeryMult_P256";
8885
8886 if (!stubAddr) return false;
8887 null_check_receiver(); // null-check receiver
8888 if (stopped()) return true;
8889
8890 Node* a = argument(1);
8891 Node* b = argument(2);
8892 Node* r = argument(3);
8893
8894 a = must_be_not_null(a, true);
8895 b = must_be_not_null(b, true);
8896 r = must_be_not_null(r, true);
8897
8898 Node* a_start = array_element_address(a, intcon(0), T_LONG);
8899 assert(a_start, "a array is null");
8900 Node* b_start = array_element_address(b, intcon(0), T_LONG);
8901 assert(b_start, "b array is null");
8902 Node* r_start = array_element_address(r, intcon(0), T_LONG);
8903 assert(r_start, "r array is null");
8904
8905 Node* call = make_runtime_call(RC_LEAF | RC_NO_FP,
8906 OptoRuntime::intpoly_montgomeryMult_P256_Type(),
8907 stubAddr, stubName, TypePtr::BOTTOM,
8908 a_start, b_start, r_start);
8909 return true;
8910 }
8911
8912 bool LibraryCallKit::inline_intpoly_assign() {
8913 assert(UseIntPolyIntrinsics, "need intpoly intrinsics support");
8914 assert(callee()->signature()->size() == 3, "intpoly_assign has %d parameters", callee()->signature()->size());
8915 const char *stubName = "intpoly_assign";
8916 address stubAddr = StubRoutines::intpoly_assign();
8917 if (!stubAddr) return false;
8918
8919 Node* set = argument(0);
8920 Node* a = argument(1);
8921 Node* b = argument(2);
8922 Node* arr_length = load_array_length(a);
8923
8924 a = must_be_not_null(a, true);
8925 b = must_be_not_null(b, true);
8926
8927 Node* a_start = array_element_address(a, intcon(0), T_LONG);
8928 assert(a_start, "a array is null");
8929 Node* b_start = array_element_address(b, intcon(0), T_LONG);
8930 assert(b_start, "b array is null");
8931
8932 Node* call = make_runtime_call(RC_LEAF | RC_NO_FP,
8933 OptoRuntime::intpoly_assign_Type(),
8934 stubAddr, stubName, TypePtr::BOTTOM,
8935 set, a_start, b_start, arr_length);
8936 return true;
8937 }
8938
8939 //------------------------------inline_digestBase_implCompress-----------------------
8940 //
8941 // Calculate MD5 for single-block byte[] array.
8942 // void com.sun.security.provider.MD5.implCompress(byte[] buf, int ofs)
8943 //
8944 // Calculate SHA (i.e., SHA-1) for single-block byte[] array.
8945 // void com.sun.security.provider.SHA.implCompress(byte[] buf, int ofs)
8946 //
8947 // Calculate SHA2 (i.e., SHA-244 or SHA-256) for single-block byte[] array.
8948 // void com.sun.security.provider.SHA2.implCompress(byte[] buf, int ofs)
8949 //
8950 // Calculate SHA5 (i.e., SHA-384 or SHA-512) for single-block byte[] array.
8951 // void com.sun.security.provider.SHA5.implCompress(byte[] buf, int ofs)
8952 //
8953 // Calculate SHA3 (i.e., SHA3-224 or SHA3-256 or SHA3-384 or SHA3-512) for single-block byte[] array.
8954 // void com.sun.security.provider.SHA3.implCompress(byte[] buf, int ofs)
8955 //
8956 bool LibraryCallKit::inline_digestBase_implCompress(vmIntrinsics::ID id) {
8957 assert(callee()->signature()->size() == 2, "sha_implCompress has 2 parameters");
8958
8959 Node* digestBase_obj = argument(0);
8960 Node* src = argument(1); // type oop
8961 Node* ofs = argument(2); // type int
8962
8963 const TypeAryPtr* src_type = src->Value(&_gvn)->isa_aryptr();
8964 if (src_type == nullptr || src_type->elem() == Type::BOTTOM) {
8965 // failed array check
8966 return false;
8967 }
8968 // Figure out the size and type of the elements we will be copying.
8969 BasicType src_elem = src_type->elem()->array_element_basic_type();
8970 if (src_elem != T_BYTE) {
8971 return false;
8972 }
8973 // 'src_start' points to src array + offset
8974 src = must_be_not_null(src, true);
8975 Node* src_start = array_element_address(src, ofs, src_elem);
8976 Node* state = nullptr;
8977 Node* block_size = nullptr;
8978 address stubAddr;
8979 const char *stubName;
8980
8981 switch(id) {
8982 case vmIntrinsics::_md5_implCompress:
8983 assert(UseMD5Intrinsics, "need MD5 instruction support");
8984 state = get_state_from_digest_object(digestBase_obj, T_INT);
8985 stubAddr = StubRoutines::md5_implCompress();
8986 stubName = "md5_implCompress";
8987 break;
8988 case vmIntrinsics::_sha_implCompress:
8989 assert(UseSHA1Intrinsics, "need SHA1 instruction support");
8990 state = get_state_from_digest_object(digestBase_obj, T_INT);
8991 stubAddr = StubRoutines::sha1_implCompress();
8992 stubName = "sha1_implCompress";
8993 break;
8994 case vmIntrinsics::_sha2_implCompress:
8995 assert(UseSHA256Intrinsics, "need SHA256 instruction support");
8996 state = get_state_from_digest_object(digestBase_obj, T_INT);
8997 stubAddr = StubRoutines::sha256_implCompress();
8998 stubName = "sha256_implCompress";
8999 break;
9000 case vmIntrinsics::_sha5_implCompress:
9001 assert(UseSHA512Intrinsics, "need SHA512 instruction support");
9002 state = get_state_from_digest_object(digestBase_obj, T_LONG);
9003 stubAddr = StubRoutines::sha512_implCompress();
9004 stubName = "sha512_implCompress";
9005 break;
9006 case vmIntrinsics::_sha3_implCompress:
9007 assert(UseSHA3Intrinsics, "need SHA3 instruction support");
9008 state = get_state_from_digest_object(digestBase_obj, T_LONG);
9009 stubAddr = StubRoutines::sha3_implCompress();
9010 stubName = "sha3_implCompress";
9011 block_size = get_block_size_from_digest_object(digestBase_obj);
9012 if (block_size == nullptr) return false;
9013 break;
9014 default:
9015 fatal_unexpected_iid(id);
9016 return false;
9017 }
9018 if (state == nullptr) return false;
9019
9020 assert(stubAddr != nullptr, "Stub %s is not generated", stubName);
9021 if (stubAddr == nullptr) return false;
9022
9023 // Call the stub.
9024 Node* call;
9025 if (block_size == nullptr) {
9026 call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::digestBase_implCompress_Type(false),
9027 stubAddr, stubName, TypePtr::BOTTOM,
9028 src_start, state);
9029 } else {
9030 call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::digestBase_implCompress_Type(true),
9031 stubAddr, stubName, TypePtr::BOTTOM,
9032 src_start, state, block_size);
9033 }
9034
9035 return true;
9036 }
9037
9038 //------------------------------inline_double_keccak
9039 bool LibraryCallKit::inline_double_keccak() {
9040 address stubAddr;
9041 const char *stubName;
9042 assert(UseSHA3Intrinsics, "need SHA3 intrinsics support");
9043 assert(callee()->signature()->size() == 2, "double_keccak has 2 parameters");
9044
9045 stubAddr = StubRoutines::double_keccak();
9046 stubName = "double_keccak";
9047 if (!stubAddr) return false;
9048
9049 Node* status0 = argument(0);
9050 Node* status1 = argument(1);
9051
9052 status0 = must_be_not_null(status0, true);
9053 status1 = must_be_not_null(status1, true);
9054
9055 Node* status0_start = array_element_address(status0, intcon(0), T_LONG);
9056 assert(status0_start, "status0 is null");
9057 Node* status1_start = array_element_address(status1, intcon(0), T_LONG);
9058 assert(status1_start, "status1 is null");
9059 Node* double_keccak = make_runtime_call(RC_LEAF|RC_NO_FP,
9060 OptoRuntime::double_keccak_Type(),
9061 stubAddr, stubName, TypePtr::BOTTOM,
9062 status0_start, status1_start);
9063 // return an int
9064 Node* retvalue = _gvn.transform(new ProjNode(double_keccak, TypeFunc::Parms));
9065 set_result(retvalue);
9066 return true;
9067 }
9068
9069
9070 //------------------------------inline_digestBase_implCompressMB-----------------------
9071 //
9072 // Calculate MD5/SHA/SHA2/SHA5/SHA3 for multi-block byte[] array.
9073 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit)
9074 //
9075 bool LibraryCallKit::inline_digestBase_implCompressMB(int predicate) {
9076 assert(UseMD5Intrinsics || UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics || UseSHA3Intrinsics,
9077 "need MD5/SHA1/SHA256/SHA512/SHA3 instruction support");
9078 assert((uint)predicate < 5, "sanity");
9079 assert(callee()->signature()->size() == 3, "digestBase_implCompressMB has 3 parameters");
9080
9081 Node* digestBase_obj = argument(0); // The receiver was checked for null already.
9082 Node* src = argument(1); // byte[] array
9083 Node* ofs = argument(2); // type int
9084 Node* limit = argument(3); // type int
9085
9086 const TypeAryPtr* src_type = src->Value(&_gvn)->isa_aryptr();
9087 if (src_type == nullptr || src_type->elem() == Type::BOTTOM) {
9088 // failed array check
9089 return false;
9090 }
9091 // Figure out the size and type of the elements we will be copying.
9092 BasicType src_elem = src_type->elem()->array_element_basic_type();
9093 if (src_elem != T_BYTE) {
9094 return false;
9095 }
9096 // 'src_start' points to src array + offset
9097 src = must_be_not_null(src, false);
9098 Node* src_start = array_element_address(src, ofs, src_elem);
9099
9100 const char* klass_digestBase_name = nullptr;
9101 const char* stub_name = nullptr;
9102 address stub_addr = nullptr;
9103 BasicType elem_type = T_INT;
9104
9105 switch (predicate) {
9106 case 0:
9107 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_md5_implCompress)) {
9108 klass_digestBase_name = "sun/security/provider/MD5";
9109 stub_name = "md5_implCompressMB";
9110 stub_addr = StubRoutines::md5_implCompressMB();
9111 }
9112 break;
9113 case 1:
9114 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_sha_implCompress)) {
9115 klass_digestBase_name = "sun/security/provider/SHA";
9116 stub_name = "sha1_implCompressMB";
9117 stub_addr = StubRoutines::sha1_implCompressMB();
9118 }
9119 break;
9120 case 2:
9121 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_sha2_implCompress)) {
9122 klass_digestBase_name = "sun/security/provider/SHA2";
9123 stub_name = "sha256_implCompressMB";
9124 stub_addr = StubRoutines::sha256_implCompressMB();
9125 }
9126 break;
9127 case 3:
9128 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_sha5_implCompress)) {
9129 klass_digestBase_name = "sun/security/provider/SHA5";
9130 stub_name = "sha512_implCompressMB";
9131 stub_addr = StubRoutines::sha512_implCompressMB();
9132 elem_type = T_LONG;
9133 }
9134 break;
9135 case 4:
9136 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_sha3_implCompress)) {
9137 klass_digestBase_name = "sun/security/provider/SHA3";
9138 stub_name = "sha3_implCompressMB";
9139 stub_addr = StubRoutines::sha3_implCompressMB();
9140 elem_type = T_LONG;
9141 }
9142 break;
9143 default:
9144 fatal("unknown DigestBase intrinsic predicate: %d", predicate);
9145 }
9146 if (klass_digestBase_name != nullptr) {
9147 assert(stub_addr != nullptr, "Stub is generated");
9148 if (stub_addr == nullptr) return false;
9149
9150 // get DigestBase klass to lookup for SHA klass
9151 const TypeInstPtr* tinst = _gvn.type(digestBase_obj)->isa_instptr();
9152 assert(tinst != nullptr, "digestBase_obj is not instance???");
9153 assert(tinst->is_loaded(), "DigestBase is not loaded");
9154
9155 ciKlass* klass_digestBase = tinst->instance_klass()->find_klass(ciSymbol::make(klass_digestBase_name));
9156 assert(klass_digestBase->is_loaded(), "predicate checks that this class is loaded");
9157 ciInstanceKlass* instklass_digestBase = klass_digestBase->as_instance_klass();
9158 return inline_digestBase_implCompressMB(digestBase_obj, instklass_digestBase, elem_type, stub_addr, stub_name, src_start, ofs, limit);
9159 }
9160 return false;
9161 }
9162
9163 //------------------------------inline_digestBase_implCompressMB-----------------------
9164 bool LibraryCallKit::inline_digestBase_implCompressMB(Node* digestBase_obj, ciInstanceKlass* instklass_digestBase,
9165 BasicType elem_type, address stubAddr, const char *stubName,
9166 Node* src_start, Node* ofs, Node* limit) {
9167 const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_digestBase);
9168 const TypeOopPtr* xtype = aklass->cast_to_exactness(false)->as_instance_type()->cast_to_ptr_type(TypePtr::NotNull);
9169 Node* digest_obj = new CheckCastPPNode(control(), digestBase_obj, xtype);
9170 digest_obj = _gvn.transform(digest_obj);
9171
9172 Node* state = get_state_from_digest_object(digest_obj, elem_type);
9173 if (state == nullptr) return false;
9174
9175 Node* block_size = nullptr;
9176 if (strcmp("sha3_implCompressMB", stubName) == 0) {
9177 block_size = get_block_size_from_digest_object(digest_obj);
9178 if (block_size == nullptr) return false;
9179 }
9180
9181 // Call the stub.
9182 Node* call;
9183 if (block_size == nullptr) {
9184 call = make_runtime_call(RC_LEAF|RC_NO_FP,
9185 OptoRuntime::digestBase_implCompressMB_Type(false),
9186 stubAddr, stubName, TypePtr::BOTTOM,
9187 src_start, state, ofs, limit);
9188 } else {
9189 call = make_runtime_call(RC_LEAF|RC_NO_FP,
9190 OptoRuntime::digestBase_implCompressMB_Type(true),
9191 stubAddr, stubName, TypePtr::BOTTOM,
9192 src_start, state, block_size, ofs, limit);
9193 }
9194
9195 // return ofs (int)
9196 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
9197 set_result(result);
9198
9199 return true;
9200 }
9201
9202 //------------------------------inline_galoisCounterMode_AESCrypt-----------------------
9203 bool LibraryCallKit::inline_galoisCounterMode_AESCrypt() {
9204 assert(UseAES, "need AES instruction support");
9205 address stubAddr = nullptr;
9206 const char *stubName = nullptr;
9207 stubAddr = StubRoutines::galoisCounterMode_AESCrypt();
9208 stubName = "galoisCounterMode_AESCrypt";
9209
9210 if (stubAddr == nullptr) return false;
9211
9212 Node* in = argument(0);
9213 Node* inOfs = argument(1);
9214 Node* len = argument(2);
9215 Node* ct = argument(3);
9216 Node* ctOfs = argument(4);
9217 Node* out = argument(5);
9218 Node* outOfs = argument(6);
9219 Node* gctr_object = argument(7);
9220 Node* ghash_object = argument(8);
9221
9222 // (1) in, ct and out are arrays.
9223 const TypeAryPtr* in_type = in->Value(&_gvn)->isa_aryptr();
9224 const TypeAryPtr* ct_type = ct->Value(&_gvn)->isa_aryptr();
9225 const TypeAryPtr* out_type = out->Value(&_gvn)->isa_aryptr();
9226 assert( in_type != nullptr && in_type->elem() != Type::BOTTOM &&
9227 ct_type != nullptr && ct_type->elem() != Type::BOTTOM &&
9228 out_type != nullptr && out_type->elem() != Type::BOTTOM, "args are strange");
9229
9230 // checks are the responsibility of the caller
9231 Node* in_start = in;
9232 Node* ct_start = ct;
9233 Node* out_start = out;
9234 if (inOfs != nullptr || ctOfs != nullptr || outOfs != nullptr) {
9235 assert(inOfs != nullptr && ctOfs != nullptr && outOfs != nullptr, "");
9236 in_start = array_element_address(in, inOfs, T_BYTE);
9237 ct_start = array_element_address(ct, ctOfs, T_BYTE);
9238 out_start = array_element_address(out, outOfs, T_BYTE);
9239 }
9240
9241 // if we are in this set of code, we "know" the embeddedCipher is an AESCrypt object
9242 // (because of the predicated logic executed earlier).
9243 // so we cast it here safely.
9244 // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java
9245 Node* embeddedCipherObj = load_field_from_object(gctr_object, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;");
9246 Node* counter = load_field_from_object(gctr_object, "counter", "[B");
9247 Node* subkeyHtbl = load_field_from_object(ghash_object, "subkeyHtbl", "[J");
9248 Node* state = load_field_from_object(ghash_object, "state", "[J");
9249
9250 if (embeddedCipherObj == nullptr || counter == nullptr || subkeyHtbl == nullptr || state == nullptr) {
9251 return false;
9252 }
9253 // cast it to what we know it will be at runtime
9254 const TypeInstPtr* tinst = _gvn.type(gctr_object)->isa_instptr();
9255 assert(tinst != nullptr, "GCTR obj is null");
9256 assert(tinst->is_loaded(), "GCTR obj is not loaded");
9257 ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AES_Crypt"));
9258 assert(klass_AESCrypt->is_loaded(), "predicate checks that this class is loaded");
9259 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
9260 const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_AESCrypt);
9261 const TypeOopPtr* xtype = aklass->as_instance_type();
9262 Node* aescrypt_object = new CheckCastPPNode(control(), embeddedCipherObj, xtype);
9263 aescrypt_object = _gvn.transform(aescrypt_object);
9264 // we need to get the start of the aescrypt_object's expanded key array
9265 Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object, /* is_decrypt */ false);
9266 if (k_start == nullptr) return false;
9267 // similarly, get the start address of the r vector
9268 Node* cnt_start = array_element_address(counter, intcon(0), T_BYTE);
9269 Node* state_start = array_element_address(state, intcon(0), T_LONG);
9270 Node* subkeyHtbl_start = array_element_address(subkeyHtbl, intcon(0), T_LONG);
9271
9272
9273 // Call the stub, passing params
9274 Node* gcmCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
9275 OptoRuntime::galoisCounterMode_aescrypt_Type(),
9276 stubAddr, stubName, TypePtr::BOTTOM,
9277 in_start, len, ct_start, out_start, k_start, state_start, subkeyHtbl_start, cnt_start);
9278
9279 // return cipher length (int)
9280 Node* retvalue = _gvn.transform(new ProjNode(gcmCrypt, TypeFunc::Parms));
9281 set_result(retvalue);
9282
9283 return true;
9284 }
9285
9286 //----------------------------inline_galoisCounterMode_AESCrypt_predicate----------------------------
9287 // Return node representing slow path of predicate check.
9288 // the pseudo code we want to emulate with this predicate is:
9289 // for encryption:
9290 // if (embeddedCipherObj instanceof AESCrypt) do_intrinsic, else do_javapath
9291 // for decryption:
9292 // if ((embeddedCipherObj instanceof AESCrypt) && (cipher!=plain)) do_intrinsic, else do_javapath
9293 // note cipher==plain is more conservative than the original java code but that's OK
9294 //
9295
9296 Node* LibraryCallKit::inline_galoisCounterMode_AESCrypt_predicate() {
9297 // The receiver was checked for null already.
9298 Node* objGCTR = argument(7);
9299 // Load embeddedCipher field of GCTR object.
9300 Node* embeddedCipherObj = load_field_from_object(objGCTR, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;");
9301 assert(embeddedCipherObj != nullptr, "embeddedCipherObj is null");
9302
9303 // get AESCrypt klass for instanceOf check
9304 // AESCrypt might not be loaded yet if some other SymmetricCipher got us to this compile point
9305 // will have same classloader as CipherBlockChaining object
9306 const TypeInstPtr* tinst = _gvn.type(objGCTR)->isa_instptr();
9307 assert(tinst != nullptr, "GCTR obj is null");
9308 assert(tinst->is_loaded(), "GCTR obj is not loaded");
9309
9310 // we want to do an instanceof comparison against the AESCrypt class
9311 ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AES_Crypt"));
9312 if (!klass_AESCrypt->is_loaded()) {
9313 // if AESCrypt is not even loaded, we never take the intrinsic fast path
9314 Node* ctrl = control();
9315 set_control(top()); // no regular fast path
9316 return ctrl;
9317 }
9318
9319 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
9320 Node* instof = gen_instanceof(embeddedCipherObj, makecon(TypeKlassPtr::make(instklass_AESCrypt)));
9321 Node* cmp_instof = _gvn.transform(new CmpINode(instof, intcon(1)));
9322 Node* bool_instof = _gvn.transform(new BoolNode(cmp_instof, BoolTest::ne));
9323 Node* instof_false = generate_guard(bool_instof, nullptr, PROB_MIN);
9324
9325 return instof_false; // even if it is null
9326 }
9327
9328 //------------------------------get_state_from_digest_object-----------------------
9329 Node * LibraryCallKit::get_state_from_digest_object(Node *digest_object, BasicType elem_type) {
9330 const char* state_type;
9331 switch (elem_type) {
9332 case T_BYTE: state_type = "[B"; break;
9333 case T_INT: state_type = "[I"; break;
9334 case T_LONG: state_type = "[J"; break;
9335 default: ShouldNotReachHere();
9336 }
9337 Node* digest_state = load_field_from_object(digest_object, "state", state_type);
9338 assert (digest_state != nullptr, "wrong version of sun.security.provider.MD5/SHA/SHA2/SHA5/SHA3");
9339 if (digest_state == nullptr) return (Node *) nullptr;
9340
9341 // now have the array, need to get the start address of the state array
9342 Node* state = array_element_address(digest_state, intcon(0), elem_type);
9343 return state;
9344 }
9345
9346 //------------------------------get_block_size_from_sha3_object----------------------------------
9347 Node * LibraryCallKit::get_block_size_from_digest_object(Node *digest_object) {
9348 Node* block_size = load_field_from_object(digest_object, "blockSize", "I");
9349 assert (block_size != nullptr, "sanity");
9350 return block_size;
9351 }
9352
9353 //----------------------------inline_digestBase_implCompressMB_predicate----------------------------
9354 // Return node representing slow path of predicate check.
9355 // the pseudo code we want to emulate with this predicate is:
9356 // if (digestBaseObj instanceof MD5/SHA/SHA2/SHA5/SHA3) do_intrinsic, else do_javapath
9357 //
9358 Node* LibraryCallKit::inline_digestBase_implCompressMB_predicate(int predicate) {
9359 assert(UseMD5Intrinsics || UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics || UseSHA3Intrinsics,
9360 "need MD5/SHA1/SHA256/SHA512/SHA3 instruction support");
9361 assert((uint)predicate < 5, "sanity");
9362
9363 // The receiver was checked for null already.
9364 Node* digestBaseObj = argument(0);
9365
9366 // get DigestBase klass for instanceOf check
9367 const TypeInstPtr* tinst = _gvn.type(digestBaseObj)->isa_instptr();
9368 assert(tinst != nullptr, "digestBaseObj is null");
9369 assert(tinst->is_loaded(), "DigestBase is not loaded");
9370
9371 const char* klass_name = nullptr;
9372 switch (predicate) {
9373 case 0:
9374 if (UseMD5Intrinsics) {
9375 // we want to do an instanceof comparison against the MD5 class
9376 klass_name = "sun/security/provider/MD5";
9377 }
9378 break;
9379 case 1:
9380 if (UseSHA1Intrinsics) {
9381 // we want to do an instanceof comparison against the SHA class
9382 klass_name = "sun/security/provider/SHA";
9383 }
9384 break;
9385 case 2:
9386 if (UseSHA256Intrinsics) {
9387 // we want to do an instanceof comparison against the SHA2 class
9388 klass_name = "sun/security/provider/SHA2";
9389 }
9390 break;
9391 case 3:
9392 if (UseSHA512Intrinsics) {
9393 // we want to do an instanceof comparison against the SHA5 class
9394 klass_name = "sun/security/provider/SHA5";
9395 }
9396 break;
9397 case 4:
9398 if (UseSHA3Intrinsics) {
9399 // we want to do an instanceof comparison against the SHA3 class
9400 klass_name = "sun/security/provider/SHA3";
9401 }
9402 break;
9403 default:
9404 fatal("unknown SHA intrinsic predicate: %d", predicate);
9405 }
9406
9407 ciKlass* klass = nullptr;
9408 if (klass_name != nullptr) {
9409 klass = tinst->instance_klass()->find_klass(ciSymbol::make(klass_name));
9410 }
9411 if ((klass == nullptr) || !klass->is_loaded()) {
9412 // if none of MD5/SHA/SHA2/SHA5 is loaded, we never take the intrinsic fast path
9413 Node* ctrl = control();
9414 set_control(top()); // no intrinsic path
9415 return ctrl;
9416 }
9417 ciInstanceKlass* instklass = klass->as_instance_klass();
9418
9419 Node* instof = gen_instanceof(digestBaseObj, makecon(TypeKlassPtr::make(instklass)));
9420 Node* cmp_instof = _gvn.transform(new CmpINode(instof, intcon(1)));
9421 Node* bool_instof = _gvn.transform(new BoolNode(cmp_instof, BoolTest::ne));
9422 Node* instof_false = generate_guard(bool_instof, nullptr, PROB_MIN);
9423
9424 return instof_false; // even if it is null
9425 }
9426
9427 //-------------inline_fma-----------------------------------
9428 bool LibraryCallKit::inline_fma(vmIntrinsics::ID id) {
9429 Node *a = nullptr;
9430 Node *b = nullptr;
9431 Node *c = nullptr;
9432 Node* result = nullptr;
9433 switch (id) {
9434 case vmIntrinsics::_fmaD:
9435 assert(callee()->signature()->size() == 6, "fma has 3 parameters of size 2 each.");
9436 // no receiver since it is static method
9437 a = argument(0);
9438 b = argument(2);
9439 c = argument(4);
9440 result = _gvn.transform(new FmaDNode(a, b, c));
9441 break;
9442 case vmIntrinsics::_fmaF:
9443 assert(callee()->signature()->size() == 3, "fma has 3 parameters of size 1 each.");
9444 a = argument(0);
9445 b = argument(1);
9446 c = argument(2);
9447 result = _gvn.transform(new FmaFNode(a, b, c));
9448 break;
9449 default:
9450 fatal_unexpected_iid(id); break;
9451 }
9452 set_result(result);
9453 return true;
9454 }
9455
9456 bool LibraryCallKit::inline_character_compare(vmIntrinsics::ID id) {
9457 // argument(0) is receiver
9458 Node* codePoint = argument(1);
9459 Node* n = nullptr;
9460
9461 switch (id) {
9462 case vmIntrinsics::_isDigit :
9463 n = new DigitNode(control(), codePoint);
9464 break;
9465 case vmIntrinsics::_isLowerCase :
9466 n = new LowerCaseNode(control(), codePoint);
9467 break;
9468 case vmIntrinsics::_isUpperCase :
9469 n = new UpperCaseNode(control(), codePoint);
9470 break;
9471 case vmIntrinsics::_isWhitespace :
9472 n = new WhitespaceNode(control(), codePoint);
9473 break;
9474 default:
9475 fatal_unexpected_iid(id);
9476 }
9477
9478 set_result(_gvn.transform(n));
9479 return true;
9480 }
9481
9482 bool LibraryCallKit::inline_profileBoolean() {
9483 Node* counts = argument(1);
9484 const TypeAryPtr* ary = nullptr;
9485 ciArray* aobj = nullptr;
9486 if (counts->is_Con()
9487 && (ary = counts->bottom_type()->isa_aryptr()) != nullptr
9488 && (aobj = ary->const_oop()->as_array()) != nullptr
9489 && (aobj->length() == 2)) {
9490 // Profile is int[2] where [0] and [1] correspond to false and true value occurrences respectively.
9491 jint false_cnt = aobj->element_value(0).as_int();
9492 jint true_cnt = aobj->element_value(1).as_int();
9493
9494 if (C->log() != nullptr) {
9495 C->log()->elem("observe source='profileBoolean' false='%d' true='%d'",
9496 false_cnt, true_cnt);
9497 }
9498
9499 if (false_cnt + true_cnt == 0) {
9500 // According to profile, never executed.
9501 uncommon_trap_exact(Deoptimization::Reason_intrinsic,
9502 Deoptimization::Action_reinterpret);
9503 return true;
9504 }
9505
9506 // result is a boolean (0 or 1) and its profile (false_cnt & true_cnt)
9507 // is a number of each value occurrences.
9508 Node* result = argument(0);
9509 if (false_cnt == 0 || true_cnt == 0) {
9510 // According to profile, one value has been never seen.
9511 int expected_val = (false_cnt == 0) ? 1 : 0;
9512
9513 Node* cmp = _gvn.transform(new CmpINode(result, intcon(expected_val)));
9514 Node* test = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
9515
9516 IfNode* check = create_and_map_if(control(), test, PROB_ALWAYS, COUNT_UNKNOWN);
9517 Node* fast_path = _gvn.transform(new IfTrueNode(check));
9518 Node* slow_path = _gvn.transform(new IfFalseNode(check));
9519
9520 { // Slow path: uncommon trap for never seen value and then reexecute
9521 // MethodHandleImpl::profileBoolean() to bump the count, so JIT knows
9522 // the value has been seen at least once.
9523 PreserveJVMState pjvms(this);
9524 PreserveReexecuteState preexecs(this);
9525 jvms()->set_should_reexecute(true);
9526
9527 set_control(slow_path);
9528 set_i_o(i_o());
9529
9530 uncommon_trap_exact(Deoptimization::Reason_intrinsic,
9531 Deoptimization::Action_reinterpret);
9532 }
9533 // The guard for never seen value enables sharpening of the result and
9534 // returning a constant. It allows to eliminate branches on the same value
9535 // later on.
9536 set_control(fast_path);
9537 result = intcon(expected_val);
9538 }
9539 // Stop profiling.
9540 // MethodHandleImpl::profileBoolean() has profiling logic in its bytecode.
9541 // By replacing method body with profile data (represented as ProfileBooleanNode
9542 // on IR level) we effectively disable profiling.
9543 // It enables full speed execution once optimized code is generated.
9544 Node* profile = _gvn.transform(new ProfileBooleanNode(result, false_cnt, true_cnt));
9545 C->record_for_igvn(profile);
9546 set_result(profile);
9547 return true;
9548 } else {
9549 // Continue profiling.
9550 // Profile data isn't available at the moment. So, execute method's bytecode version.
9551 // Usually, when GWT LambdaForms are profiled it means that a stand-alone nmethod
9552 // is compiled and counters aren't available since corresponding MethodHandle
9553 // isn't a compile-time constant.
9554 return false;
9555 }
9556 }
9557
9558 bool LibraryCallKit::inline_isCompileConstant() {
9559 Node* n = argument(0);
9560 set_result(n->is_Con() ? intcon(1) : intcon(0));
9561 return true;
9562 }
9563
9564 //------------------------------- inline_getObjectSize --------------------------------------
9565 //
9566 // Calculate the runtime size of the object/array.
9567 // native long sun.instrument.InstrumentationImpl.getObjectSize0(long nativeAgent, Object objectToSize);
9568 //
9569 bool LibraryCallKit::inline_getObjectSize() {
9570 Node* obj = argument(3);
9571 Node* klass_node = load_object_klass(obj);
9572
9573 jint layout_con = Klass::_lh_neutral_value;
9574 Node* layout_val = get_layout_helper(klass_node, layout_con);
9575 int layout_is_con = (layout_val == nullptr);
9576
9577 if (layout_is_con) {
9578 // Layout helper is constant, can figure out things at compile time.
9579
9580 if (Klass::layout_helper_is_instance(layout_con)) {
9581 // Instance case: layout_con contains the size itself.
9582 Node *size = longcon(Klass::layout_helper_size_in_bytes(layout_con));
9583 set_result(size);
9584 } else {
9585 // Array case: size is round(header + element_size*arraylength).
9586 // Since arraylength is different for every array instance, we have to
9587 // compute the whole thing at runtime.
9588
9589 Node* arr_length = load_array_length(obj);
9590
9591 int round_mask = MinObjAlignmentInBytes - 1;
9592 int hsize = Klass::layout_helper_header_size(layout_con);
9593 int eshift = Klass::layout_helper_log2_element_size(layout_con);
9594
9595 if ((round_mask & ~right_n_bits(eshift)) == 0) {
9596 round_mask = 0; // strength-reduce it if it goes away completely
9597 }
9598 assert((hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded");
9599 Node* header_size = intcon(hsize + round_mask);
9600
9601 Node* lengthx = ConvI2X(arr_length);
9602 Node* headerx = ConvI2X(header_size);
9603
9604 Node* abody = lengthx;
9605 if (eshift != 0) {
9606 abody = _gvn.transform(new LShiftXNode(lengthx, intcon(eshift)));
9607 }
9608 Node* size = _gvn.transform( new AddXNode(headerx, abody) );
9609 if (round_mask != 0) {
9610 size = _gvn.transform( new AndXNode(size, MakeConX(~round_mask)) );
9611 }
9612 size = ConvX2L(size);
9613 set_result(size);
9614 }
9615 } else {
9616 // Layout helper is not constant, need to test for array-ness at runtime.
9617
9618 enum { _instance_path = 1, _array_path, PATH_LIMIT };
9619 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
9620 PhiNode* result_val = new PhiNode(result_reg, TypeLong::LONG);
9621 record_for_igvn(result_reg);
9622
9623 Node* array_ctl = generate_array_guard(klass_node, nullptr, &obj);
9624 if (array_ctl != nullptr) {
9625 // Array case: size is round(header + element_size*arraylength).
9626 // Since arraylength is different for every array instance, we have to
9627 // compute the whole thing at runtime.
9628
9629 PreserveJVMState pjvms(this);
9630 set_control(array_ctl);
9631 Node* arr_length = load_array_length(obj);
9632
9633 int round_mask = MinObjAlignmentInBytes - 1;
9634 Node* mask = intcon(round_mask);
9635
9636 Node* hss = intcon(Klass::_lh_header_size_shift);
9637 Node* hsm = intcon(Klass::_lh_header_size_mask);
9638 Node* header_size = _gvn.transform(new URShiftINode(layout_val, hss));
9639 header_size = _gvn.transform(new AndINode(header_size, hsm));
9640 header_size = _gvn.transform(new AddINode(header_size, mask));
9641
9642 // There is no need to mask or shift this value.
9643 // The semantics of LShiftINode include an implicit mask to 0x1F.
9644 assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place");
9645 Node* elem_shift = layout_val;
9646
9647 Node* lengthx = ConvI2X(arr_length);
9648 Node* headerx = ConvI2X(header_size);
9649
9650 Node* abody = _gvn.transform(new LShiftXNode(lengthx, elem_shift));
9651 Node* size = _gvn.transform(new AddXNode(headerx, abody));
9652 if (round_mask != 0) {
9653 size = _gvn.transform(new AndXNode(size, MakeConX(~round_mask)));
9654 }
9655 size = ConvX2L(size);
9656
9657 result_reg->init_req(_array_path, control());
9658 result_val->init_req(_array_path, size);
9659 }
9660
9661 if (!stopped()) {
9662 // Instance case: the layout helper gives us instance size almost directly,
9663 // but we need to mask out the _lh_instance_slow_path_bit.
9664 Node* size = ConvI2X(layout_val);
9665 assert((int) Klass::_lh_instance_slow_path_bit < BytesPerLong, "clear bit");
9666 Node* mask = MakeConX(~(intptr_t) right_n_bits(LogBytesPerLong));
9667 size = _gvn.transform(new AndXNode(size, mask));
9668 size = ConvX2L(size);
9669
9670 result_reg->init_req(_instance_path, control());
9671 result_val->init_req(_instance_path, size);
9672 }
9673
9674 set_result(result_reg, result_val);
9675 }
9676
9677 return true;
9678 }
9679
9680 //------------------------------- inline_blackhole --------------------------------------
9681 //
9682 // Make sure all arguments to this node are alive.
9683 // This matches methods that were requested to be blackholed through compile commands.
9684 //
9685 bool LibraryCallKit::inline_blackhole() {
9686 assert(callee()->is_static(), "Should have been checked before: only static methods here");
9687 assert(callee()->is_empty(), "Should have been checked before: only empty methods here");
9688 assert(callee()->holder()->is_loaded(), "Should have been checked before: only methods for loaded classes here");
9689
9690 // Blackhole node pinches only the control, not memory. This allows
9691 // the blackhole to be pinned in the loop that computes blackholed
9692 // values, but have no other side effects, like breaking the optimizations
9693 // across the blackhole.
9694
9695 Node* bh = _gvn.transform(new BlackholeNode(control()));
9696 set_control(_gvn.transform(new ProjNode(bh, TypeFunc::Control)));
9697
9698 // Bind call arguments as blackhole arguments to keep them alive
9699 uint nargs = callee()->arg_size();
9700 for (uint i = 0; i < nargs; i++) {
9701 bh->add_req(argument(i));
9702 }
9703
9704 return true;
9705 }
9706
9707 Node* LibraryCallKit::unbox_fp16_value(const TypeInstPtr* float16_box_type, ciField* field, Node* box) {
9708 const TypeInstPtr* box_type = _gvn.type(box)->isa_instptr();
9709 if (box_type == nullptr || box_type->instance_klass() != float16_box_type->instance_klass()) {
9710 return nullptr; // box klass is not Float16
9711 }
9712
9713 // Null check; get notnull casted pointer
9714 Node* null_ctl = top();
9715 Node* not_null_box = null_check_oop(box, &null_ctl, true);
9716 // If not_null_box is dead, only null-path is taken
9717 if (stopped()) {
9718 set_control(null_ctl);
9719 return nullptr;
9720 }
9721 assert(not_null_box->bottom_type()->is_instptr()->maybe_null() == false, "");
9722 const TypePtr* adr_type = C->alias_type(field)->adr_type();
9723 Node* adr = basic_plus_adr(not_null_box, field->offset_in_bytes());
9724 return access_load_at(not_null_box, adr, adr_type, TypeInt::SHORT, T_SHORT, IN_HEAP);
9725 }
9726
9727 Node* LibraryCallKit::box_fp16_value(const TypeInstPtr* float16_box_type, ciField* field, Node* value) {
9728 PreserveReexecuteState preexecs(this);
9729 jvms()->set_should_reexecute(true);
9730
9731 const TypeKlassPtr* klass_type = float16_box_type->as_klass_type();
9732 Node* klass_node = makecon(klass_type);
9733 Node* box = new_instance(klass_node);
9734
9735 Node* value_field = basic_plus_adr(box, field->offset_in_bytes());
9736 const TypePtr* value_adr_type = value_field->bottom_type()->is_ptr();
9737
9738 Node* field_store = _gvn.transform(access_store_at(box,
9739 value_field,
9740 value_adr_type,
9741 value,
9742 TypeInt::SHORT,
9743 T_SHORT,
9744 IN_HEAP));
9745 set_memory(field_store, value_adr_type);
9746 return box;
9747 }
9748
9749 bool LibraryCallKit::inline_fp16_operations(vmIntrinsics::ID id, int num_args) {
9750 if (!Matcher::match_rule_supported(Op_ReinterpretS2HF) ||
9751 !Matcher::match_rule_supported(Op_ReinterpretHF2S)) {
9752 return false;
9753 }
9754
9755 const TypeInstPtr* box_type = _gvn.type(argument(0))->isa_instptr();
9756 if (box_type == nullptr || box_type->const_oop() == nullptr) {
9757 return false;
9758 }
9759
9760 ciInstanceKlass* float16_klass = box_type->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
9761 const TypeInstPtr* float16_box_type = TypeInstPtr::make_exact(TypePtr::NotNull, float16_klass);
9762 ciField* field = float16_klass->get_field_by_name(ciSymbols::value_name(),
9763 ciSymbols::short_signature(),
9764 false);
9765 assert(field != nullptr, "");
9766
9767 // Transformed nodes
9768 Node* fld1 = nullptr;
9769 Node* fld2 = nullptr;
9770 Node* fld3 = nullptr;
9771 switch(num_args) {
9772 case 3:
9773 fld3 = unbox_fp16_value(float16_box_type, field, argument(3));
9774 if (fld3 == nullptr) {
9775 return false;
9776 }
9777 fld3 = _gvn.transform(new ReinterpretS2HFNode(fld3));
9778 // fall-through
9779 case 2:
9780 fld2 = unbox_fp16_value(float16_box_type, field, argument(2));
9781 if (fld2 == nullptr) {
9782 return false;
9783 }
9784 fld2 = _gvn.transform(new ReinterpretS2HFNode(fld2));
9785 // fall-through
9786 case 1:
9787 fld1 = unbox_fp16_value(float16_box_type, field, argument(1));
9788 if (fld1 == nullptr) {
9789 return false;
9790 }
9791 fld1 = _gvn.transform(new ReinterpretS2HFNode(fld1));
9792 break;
9793 default: fatal("Unsupported number of arguments %d", num_args);
9794 }
9795
9796 Node* result = nullptr;
9797 switch (id) {
9798 // Unary operations
9799 case vmIntrinsics::_sqrt_float16:
9800 result = _gvn.transform(new SqrtHFNode(C, control(), fld1));
9801 break;
9802 // Ternary operations
9803 case vmIntrinsics::_fma_float16:
9804 result = _gvn.transform(new FmaHFNode(fld1, fld2, fld3));
9805 break;
9806 default:
9807 fatal_unexpected_iid(id);
9808 break;
9809 }
9810 result = _gvn.transform(new ReinterpretHF2SNode(result));
9811 set_result(box_fp16_value(float16_box_type, field, result));
9812 return true;
9813 }
9814