1 /*
2 * Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "ci/ciUtilities.inline.hpp"
28 #include "classfile/vmIntrinsics.hpp"
29 #include "compiler/compileBroker.hpp"
30 #include "compiler/compileLog.hpp"
31 #include "gc/shared/barrierSet.hpp"
32 #include "jfr/support/jfrIntrinsics.hpp"
33 #include "memory/resourceArea.hpp"
34 #include "oops/klass.inline.hpp"
35 #include "oops/objArrayKlass.hpp"
36 #include "opto/addnode.hpp"
37 #include "opto/arraycopynode.hpp"
38 #include "opto/c2compiler.hpp"
39 #include "opto/castnode.hpp"
40 #include "opto/cfgnode.hpp"
41 #include "opto/convertnode.hpp"
42 #include "opto/countbitsnode.hpp"
43 #include "opto/idealKit.hpp"
44 #include "opto/library_call.hpp"
45 #include "opto/mathexactnode.hpp"
46 #include "opto/mulnode.hpp"
47 #include "opto/narrowptrnode.hpp"
48 #include "opto/opaquenode.hpp"
49 #include "opto/parse.hpp"
50 #include "opto/runtime.hpp"
51 #include "opto/rootnode.hpp"
52 #include "opto/subnode.hpp"
53 #include "prims/unsafe.hpp"
54 #include "runtime/objectMonitor.hpp"
55 #include "runtime/sharedRuntime.hpp"
56 #include "runtime/stubRoutines.hpp"
57 #include "utilities/macros.hpp"
58 #include "utilities/powerOfTwo.hpp"
59
60 #if INCLUDE_JFR
61 #include "jfr/jfr.hpp"
62 #endif
63
64 //---------------------------make_vm_intrinsic----------------------------
65 CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
66 vmIntrinsicID id = m->intrinsic_id();
67 assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
68
69 if (!m->is_loaded()) {
70 // Do not attempt to inline unloaded methods.
71 return nullptr;
72 }
73
74 C2Compiler* compiler = (C2Compiler*)CompileBroker::compiler(CompLevel_full_optimization);
75 bool is_available = false;
76
77 {
78 // For calling is_intrinsic_supported and is_intrinsic_disabled_by_flag
79 // the compiler must transition to '_thread_in_vm' state because both
80 // methods access VM-internal data.
81 VM_ENTRY_MARK;
82 methodHandle mh(THREAD, m->get_Method());
83 is_available = compiler != nullptr && compiler->is_intrinsic_supported(mh, is_virtual) &&
84 !C->directive()->is_intrinsic_disabled(mh) &&
85 !vmIntrinsics::is_disabled_by_flags(mh);
86
87 }
88
89 if (is_available) {
90 assert(id <= vmIntrinsics::LAST_COMPILER_INLINE, "caller responsibility");
91 assert(id != vmIntrinsics::_Object_init && id != vmIntrinsics::_invoke, "enum out of order?");
92 return new LibraryIntrinsic(m, is_virtual,
93 vmIntrinsics::predicates_needed(id),
94 vmIntrinsics::does_virtual_dispatch(id),
95 id);
96 } else {
97 return nullptr;
98 }
99 }
100
101 JVMState* LibraryIntrinsic::generate(JVMState* jvms) {
102 LibraryCallKit kit(jvms, this);
103 Compile* C = kit.C;
104 int nodes = C->unique();
105 #ifndef PRODUCT
106 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
107 char buf[1000];
108 const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
109 tty->print_cr("Intrinsic %s", str);
110 }
111 #endif
112 ciMethod* callee = kit.callee();
113 const int bci = kit.bci();
114 #ifdef ASSERT
115 Node* ctrl = kit.control();
116 #endif
117 // Try to inline the intrinsic.
118 if (callee->check_intrinsic_candidate() &&
119 kit.try_to_inline(_last_predicate)) {
120 const char *inline_msg = is_virtual() ? "(intrinsic, virtual)"
121 : "(intrinsic)";
122 CompileTask::print_inlining_ul(callee, jvms->depth() - 1, bci, inline_msg);
123 if (C->print_intrinsics() || C->print_inlining()) {
124 C->print_inlining(callee, jvms->depth() - 1, bci, inline_msg);
125 }
126 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
127 if (C->log()) {
128 C->log()->elem("intrinsic id='%s'%s nodes='%d'",
129 vmIntrinsics::name_at(intrinsic_id()),
130 (is_virtual() ? " virtual='1'" : ""),
131 C->unique() - nodes);
132 }
133 // Push the result from the inlined method onto the stack.
134 kit.push_result();
135 C->print_inlining_update(this);
136 return kit.transfer_exceptions_into_jvms();
137 }
138
139 // The intrinsic bailed out
140 assert(ctrl == kit.control(), "Control flow was added although the intrinsic bailed out");
141 if (jvms->has_method()) {
142 // Not a root compile.
143 const char* msg;
144 if (callee->intrinsic_candidate()) {
145 msg = is_virtual() ? "failed to inline (intrinsic, virtual)" : "failed to inline (intrinsic)";
146 } else {
147 msg = is_virtual() ? "failed to inline (intrinsic, virtual), method not annotated"
148 : "failed to inline (intrinsic), method not annotated";
149 }
150 CompileTask::print_inlining_ul(callee, jvms->depth() - 1, bci, msg);
151 if (C->print_intrinsics() || C->print_inlining()) {
152 C->print_inlining(callee, jvms->depth() - 1, bci, msg);
153 }
154 } else {
155 // Root compile
156 ResourceMark rm;
157 stringStream msg_stream;
158 msg_stream.print("Did not generate intrinsic %s%s at bci:%d in",
159 vmIntrinsics::name_at(intrinsic_id()),
160 is_virtual() ? " (virtual)" : "", bci);
161 const char *msg = msg_stream.as_string();
162 log_debug(jit, inlining)("%s", msg);
163 if (C->print_intrinsics() || C->print_inlining()) {
164 tty->print("%s", msg);
165 }
166 }
167 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
168 C->print_inlining_update(this);
169
170 return nullptr;
171 }
172
173 Node* LibraryIntrinsic::generate_predicate(JVMState* jvms, int predicate) {
174 LibraryCallKit kit(jvms, this);
175 Compile* C = kit.C;
176 int nodes = C->unique();
177 _last_predicate = predicate;
178 #ifndef PRODUCT
179 assert(is_predicated() && predicate < predicates_count(), "sanity");
180 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
181 char buf[1000];
182 const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
183 tty->print_cr("Predicate for intrinsic %s", str);
184 }
185 #endif
186 ciMethod* callee = kit.callee();
187 const int bci = kit.bci();
188
189 Node* slow_ctl = kit.try_to_predicate(predicate);
190 if (!kit.failing()) {
191 const char *inline_msg = is_virtual() ? "(intrinsic, virtual, predicate)"
192 : "(intrinsic, predicate)";
193 CompileTask::print_inlining_ul(callee, jvms->depth() - 1, bci, inline_msg);
194 if (C->print_intrinsics() || C->print_inlining()) {
195 C->print_inlining(callee, jvms->depth() - 1, bci, inline_msg);
196 }
197 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
198 if (C->log()) {
199 C->log()->elem("predicate_intrinsic id='%s'%s nodes='%d'",
200 vmIntrinsics::name_at(intrinsic_id()),
201 (is_virtual() ? " virtual='1'" : ""),
202 C->unique() - nodes);
203 }
204 return slow_ctl; // Could be null if the check folds.
205 }
206
207 // The intrinsic bailed out
208 if (jvms->has_method()) {
209 // Not a root compile.
210 const char* msg = "failed to generate predicate for intrinsic";
211 CompileTask::print_inlining_ul(kit.callee(), jvms->depth() - 1, bci, msg);
212 if (C->print_intrinsics() || C->print_inlining()) {
213 C->print_inlining(kit.callee(), jvms->depth() - 1, bci, msg);
214 }
215 } else {
216 // Root compile
217 ResourceMark rm;
218 stringStream msg_stream;
219 msg_stream.print("Did not generate intrinsic %s%s at bci:%d in",
220 vmIntrinsics::name_at(intrinsic_id()),
221 is_virtual() ? " (virtual)" : "", bci);
222 const char *msg = msg_stream.as_string();
223 log_debug(jit, inlining)("%s", msg);
224 if (C->print_intrinsics() || C->print_inlining()) {
225 C->print_inlining_stream()->print("%s", msg);
226 }
227 }
228 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
229 return nullptr;
230 }
231
232 bool LibraryCallKit::try_to_inline(int predicate) {
233 // Handle symbolic names for otherwise undistinguished boolean switches:
234 const bool is_store = true;
235 const bool is_compress = true;
236 const bool is_static = true;
237 const bool is_volatile = true;
238
239 if (!jvms()->has_method()) {
240 // Root JVMState has a null method.
241 assert(map()->memory()->Opcode() == Op_Parm, "");
242 // Insert the memory aliasing node
243 set_all_memory(reset_memory());
244 }
245 assert(merged_memory(), "");
246
247 switch (intrinsic_id()) {
248 case vmIntrinsics::_hashCode: return inline_native_hashcode(intrinsic()->is_virtual(), !is_static);
249 case vmIntrinsics::_identityHashCode: return inline_native_hashcode(/*!virtual*/ false, is_static);
250 case vmIntrinsics::_getClass: return inline_native_getClass();
251
252 case vmIntrinsics::_ceil:
253 case vmIntrinsics::_floor:
254 case vmIntrinsics::_rint:
255 case vmIntrinsics::_dsin:
256 case vmIntrinsics::_dcos:
257 case vmIntrinsics::_dtan:
258 case vmIntrinsics::_dabs:
259 case vmIntrinsics::_fabs:
260 case vmIntrinsics::_iabs:
261 case vmIntrinsics::_labs:
262 case vmIntrinsics::_datan2:
263 case vmIntrinsics::_dsqrt:
264 case vmIntrinsics::_dexp:
265 case vmIntrinsics::_dlog:
266 case vmIntrinsics::_dlog10:
267 case vmIntrinsics::_dpow:
268 case vmIntrinsics::_dcopySign:
269 case vmIntrinsics::_fcopySign:
270 case vmIntrinsics::_dsignum:
271 case vmIntrinsics::_fsignum: return inline_math_native(intrinsic_id());
272
273 case vmIntrinsics::_min:
274 case vmIntrinsics::_max: return inline_min_max(intrinsic_id());
275
276 case vmIntrinsics::_notify:
277 case vmIntrinsics::_notifyAll:
278 return inline_notify(intrinsic_id());
279
280 case vmIntrinsics::_addExactI: return inline_math_addExactI(false /* add */);
281 case vmIntrinsics::_addExactL: return inline_math_addExactL(false /* add */);
282 case vmIntrinsics::_decrementExactI: return inline_math_subtractExactI(true /* decrement */);
283 case vmIntrinsics::_decrementExactL: return inline_math_subtractExactL(true /* decrement */);
284 case vmIntrinsics::_incrementExactI: return inline_math_addExactI(true /* increment */);
285 case vmIntrinsics::_incrementExactL: return inline_math_addExactL(true /* increment */);
286 case vmIntrinsics::_multiplyExactI: return inline_math_multiplyExactI();
287 case vmIntrinsics::_multiplyExactL: return inline_math_multiplyExactL();
288 case vmIntrinsics::_multiplyHigh: return inline_math_multiplyHigh();
289 case vmIntrinsics::_negateExactI: return inline_math_negateExactI();
290 case vmIntrinsics::_negateExactL: return inline_math_negateExactL();
291 case vmIntrinsics::_subtractExactI: return inline_math_subtractExactI(false /* subtract */);
292 case vmIntrinsics::_subtractExactL: return inline_math_subtractExactL(false /* subtract */);
293
294 case vmIntrinsics::_arraycopy: return inline_arraycopy();
295
296 case vmIntrinsics::_compareToL: return inline_string_compareTo(StrIntrinsicNode::LL);
297 case vmIntrinsics::_compareToU: return inline_string_compareTo(StrIntrinsicNode::UU);
298 case vmIntrinsics::_compareToLU: return inline_string_compareTo(StrIntrinsicNode::LU);
299 case vmIntrinsics::_compareToUL: return inline_string_compareTo(StrIntrinsicNode::UL);
300
301 case vmIntrinsics::_indexOfL: return inline_string_indexOf(StrIntrinsicNode::LL);
302 case vmIntrinsics::_indexOfU: return inline_string_indexOf(StrIntrinsicNode::UU);
303 case vmIntrinsics::_indexOfUL: return inline_string_indexOf(StrIntrinsicNode::UL);
304 case vmIntrinsics::_indexOfIL: return inline_string_indexOfI(StrIntrinsicNode::LL);
305 case vmIntrinsics::_indexOfIU: return inline_string_indexOfI(StrIntrinsicNode::UU);
306 case vmIntrinsics::_indexOfIUL: return inline_string_indexOfI(StrIntrinsicNode::UL);
307 case vmIntrinsics::_indexOfU_char: return inline_string_indexOfChar(StrIntrinsicNode::U);
308 case vmIntrinsics::_indexOfL_char: return inline_string_indexOfChar(StrIntrinsicNode::L);
309
310 case vmIntrinsics::_equalsL: return inline_string_equals(StrIntrinsicNode::LL);
311 case vmIntrinsics::_equalsU: return inline_string_equals(StrIntrinsicNode::UU);
312
313 case vmIntrinsics::_toBytesStringU: return inline_string_toBytesU();
314 case vmIntrinsics::_getCharsStringU: return inline_string_getCharsU();
315 case vmIntrinsics::_getCharStringU: return inline_string_char_access(!is_store);
316 case vmIntrinsics::_putCharStringU: return inline_string_char_access( is_store);
317
318 case vmIntrinsics::_compressStringC:
319 case vmIntrinsics::_compressStringB: return inline_string_copy( is_compress);
320 case vmIntrinsics::_inflateStringC:
321 case vmIntrinsics::_inflateStringB: return inline_string_copy(!is_compress);
322
323 case vmIntrinsics::_getReference: return inline_unsafe_access(!is_store, T_OBJECT, Relaxed, false);
324 case vmIntrinsics::_getBoolean: return inline_unsafe_access(!is_store, T_BOOLEAN, Relaxed, false);
325 case vmIntrinsics::_getByte: return inline_unsafe_access(!is_store, T_BYTE, Relaxed, false);
326 case vmIntrinsics::_getShort: return inline_unsafe_access(!is_store, T_SHORT, Relaxed, false);
327 case vmIntrinsics::_getChar: return inline_unsafe_access(!is_store, T_CHAR, Relaxed, false);
328 case vmIntrinsics::_getInt: return inline_unsafe_access(!is_store, T_INT, Relaxed, false);
329 case vmIntrinsics::_getLong: return inline_unsafe_access(!is_store, T_LONG, Relaxed, false);
330 case vmIntrinsics::_getFloat: return inline_unsafe_access(!is_store, T_FLOAT, Relaxed, false);
331 case vmIntrinsics::_getDouble: return inline_unsafe_access(!is_store, T_DOUBLE, Relaxed, false);
332
333 case vmIntrinsics::_putReference: return inline_unsafe_access( is_store, T_OBJECT, Relaxed, false);
334 case vmIntrinsics::_putBoolean: return inline_unsafe_access( is_store, T_BOOLEAN, Relaxed, false);
335 case vmIntrinsics::_putByte: return inline_unsafe_access( is_store, T_BYTE, Relaxed, false);
336 case vmIntrinsics::_putShort: return inline_unsafe_access( is_store, T_SHORT, Relaxed, false);
337 case vmIntrinsics::_putChar: return inline_unsafe_access( is_store, T_CHAR, Relaxed, false);
338 case vmIntrinsics::_putInt: return inline_unsafe_access( is_store, T_INT, Relaxed, false);
339 case vmIntrinsics::_putLong: return inline_unsafe_access( is_store, T_LONG, Relaxed, false);
340 case vmIntrinsics::_putFloat: return inline_unsafe_access( is_store, T_FLOAT, Relaxed, false);
341 case vmIntrinsics::_putDouble: return inline_unsafe_access( is_store, T_DOUBLE, Relaxed, false);
342
343 case vmIntrinsics::_getReferenceVolatile: return inline_unsafe_access(!is_store, T_OBJECT, Volatile, false);
344 case vmIntrinsics::_getBooleanVolatile: return inline_unsafe_access(!is_store, T_BOOLEAN, Volatile, false);
345 case vmIntrinsics::_getByteVolatile: return inline_unsafe_access(!is_store, T_BYTE, Volatile, false);
346 case vmIntrinsics::_getShortVolatile: return inline_unsafe_access(!is_store, T_SHORT, Volatile, false);
347 case vmIntrinsics::_getCharVolatile: return inline_unsafe_access(!is_store, T_CHAR, Volatile, false);
348 case vmIntrinsics::_getIntVolatile: return inline_unsafe_access(!is_store, T_INT, Volatile, false);
349 case vmIntrinsics::_getLongVolatile: return inline_unsafe_access(!is_store, T_LONG, Volatile, false);
350 case vmIntrinsics::_getFloatVolatile: return inline_unsafe_access(!is_store, T_FLOAT, Volatile, false);
351 case vmIntrinsics::_getDoubleVolatile: return inline_unsafe_access(!is_store, T_DOUBLE, Volatile, false);
352
353 case vmIntrinsics::_putReferenceVolatile: return inline_unsafe_access( is_store, T_OBJECT, Volatile, false);
354 case vmIntrinsics::_putBooleanVolatile: return inline_unsafe_access( is_store, T_BOOLEAN, Volatile, false);
355 case vmIntrinsics::_putByteVolatile: return inline_unsafe_access( is_store, T_BYTE, Volatile, false);
356 case vmIntrinsics::_putShortVolatile: return inline_unsafe_access( is_store, T_SHORT, Volatile, false);
357 case vmIntrinsics::_putCharVolatile: return inline_unsafe_access( is_store, T_CHAR, Volatile, false);
358 case vmIntrinsics::_putIntVolatile: return inline_unsafe_access( is_store, T_INT, Volatile, false);
359 case vmIntrinsics::_putLongVolatile: return inline_unsafe_access( is_store, T_LONG, Volatile, false);
360 case vmIntrinsics::_putFloatVolatile: return inline_unsafe_access( is_store, T_FLOAT, Volatile, false);
361 case vmIntrinsics::_putDoubleVolatile: return inline_unsafe_access( is_store, T_DOUBLE, Volatile, false);
362
363 case vmIntrinsics::_getShortUnaligned: return inline_unsafe_access(!is_store, T_SHORT, Relaxed, true);
364 case vmIntrinsics::_getCharUnaligned: return inline_unsafe_access(!is_store, T_CHAR, Relaxed, true);
365 case vmIntrinsics::_getIntUnaligned: return inline_unsafe_access(!is_store, T_INT, Relaxed, true);
366 case vmIntrinsics::_getLongUnaligned: return inline_unsafe_access(!is_store, T_LONG, Relaxed, true);
367
368 case vmIntrinsics::_putShortUnaligned: return inline_unsafe_access( is_store, T_SHORT, Relaxed, true);
369 case vmIntrinsics::_putCharUnaligned: return inline_unsafe_access( is_store, T_CHAR, Relaxed, true);
370 case vmIntrinsics::_putIntUnaligned: return inline_unsafe_access( is_store, T_INT, Relaxed, true);
371 case vmIntrinsics::_putLongUnaligned: return inline_unsafe_access( is_store, T_LONG, Relaxed, true);
372
373 case vmIntrinsics::_getReferenceAcquire: return inline_unsafe_access(!is_store, T_OBJECT, Acquire, false);
374 case vmIntrinsics::_getBooleanAcquire: return inline_unsafe_access(!is_store, T_BOOLEAN, Acquire, false);
375 case vmIntrinsics::_getByteAcquire: return inline_unsafe_access(!is_store, T_BYTE, Acquire, false);
376 case vmIntrinsics::_getShortAcquire: return inline_unsafe_access(!is_store, T_SHORT, Acquire, false);
377 case vmIntrinsics::_getCharAcquire: return inline_unsafe_access(!is_store, T_CHAR, Acquire, false);
378 case vmIntrinsics::_getIntAcquire: return inline_unsafe_access(!is_store, T_INT, Acquire, false);
379 case vmIntrinsics::_getLongAcquire: return inline_unsafe_access(!is_store, T_LONG, Acquire, false);
380 case vmIntrinsics::_getFloatAcquire: return inline_unsafe_access(!is_store, T_FLOAT, Acquire, false);
381 case vmIntrinsics::_getDoubleAcquire: return inline_unsafe_access(!is_store, T_DOUBLE, Acquire, false);
382
383 case vmIntrinsics::_putReferenceRelease: return inline_unsafe_access( is_store, T_OBJECT, Release, false);
384 case vmIntrinsics::_putBooleanRelease: return inline_unsafe_access( is_store, T_BOOLEAN, Release, false);
385 case vmIntrinsics::_putByteRelease: return inline_unsafe_access( is_store, T_BYTE, Release, false);
386 case vmIntrinsics::_putShortRelease: return inline_unsafe_access( is_store, T_SHORT, Release, false);
387 case vmIntrinsics::_putCharRelease: return inline_unsafe_access( is_store, T_CHAR, Release, false);
388 case vmIntrinsics::_putIntRelease: return inline_unsafe_access( is_store, T_INT, Release, false);
389 case vmIntrinsics::_putLongRelease: return inline_unsafe_access( is_store, T_LONG, Release, false);
390 case vmIntrinsics::_putFloatRelease: return inline_unsafe_access( is_store, T_FLOAT, Release, false);
391 case vmIntrinsics::_putDoubleRelease: return inline_unsafe_access( is_store, T_DOUBLE, Release, false);
392
393 case vmIntrinsics::_getReferenceOpaque: return inline_unsafe_access(!is_store, T_OBJECT, Opaque, false);
394 case vmIntrinsics::_getBooleanOpaque: return inline_unsafe_access(!is_store, T_BOOLEAN, Opaque, false);
395 case vmIntrinsics::_getByteOpaque: return inline_unsafe_access(!is_store, T_BYTE, Opaque, false);
396 case vmIntrinsics::_getShortOpaque: return inline_unsafe_access(!is_store, T_SHORT, Opaque, false);
397 case vmIntrinsics::_getCharOpaque: return inline_unsafe_access(!is_store, T_CHAR, Opaque, false);
398 case vmIntrinsics::_getIntOpaque: return inline_unsafe_access(!is_store, T_INT, Opaque, false);
399 case vmIntrinsics::_getLongOpaque: return inline_unsafe_access(!is_store, T_LONG, Opaque, false);
400 case vmIntrinsics::_getFloatOpaque: return inline_unsafe_access(!is_store, T_FLOAT, Opaque, false);
401 case vmIntrinsics::_getDoubleOpaque: return inline_unsafe_access(!is_store, T_DOUBLE, Opaque, false);
402
403 case vmIntrinsics::_putReferenceOpaque: return inline_unsafe_access( is_store, T_OBJECT, Opaque, false);
404 case vmIntrinsics::_putBooleanOpaque: return inline_unsafe_access( is_store, T_BOOLEAN, Opaque, false);
405 case vmIntrinsics::_putByteOpaque: return inline_unsafe_access( is_store, T_BYTE, Opaque, false);
406 case vmIntrinsics::_putShortOpaque: return inline_unsafe_access( is_store, T_SHORT, Opaque, false);
407 case vmIntrinsics::_putCharOpaque: return inline_unsafe_access( is_store, T_CHAR, Opaque, false);
408 case vmIntrinsics::_putIntOpaque: return inline_unsafe_access( is_store, T_INT, Opaque, false);
409 case vmIntrinsics::_putLongOpaque: return inline_unsafe_access( is_store, T_LONG, Opaque, false);
410 case vmIntrinsics::_putFloatOpaque: return inline_unsafe_access( is_store, T_FLOAT, Opaque, false);
411 case vmIntrinsics::_putDoubleOpaque: return inline_unsafe_access( is_store, T_DOUBLE, Opaque, false);
412
413 case vmIntrinsics::_compareAndSetReference: return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap, Volatile);
414 case vmIntrinsics::_compareAndSetByte: return inline_unsafe_load_store(T_BYTE, LS_cmp_swap, Volatile);
415 case vmIntrinsics::_compareAndSetShort: return inline_unsafe_load_store(T_SHORT, LS_cmp_swap, Volatile);
416 case vmIntrinsics::_compareAndSetInt: return inline_unsafe_load_store(T_INT, LS_cmp_swap, Volatile);
417 case vmIntrinsics::_compareAndSetLong: return inline_unsafe_load_store(T_LONG, LS_cmp_swap, Volatile);
418
419 case vmIntrinsics::_weakCompareAndSetReferencePlain: return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Relaxed);
420 case vmIntrinsics::_weakCompareAndSetReferenceAcquire: return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Acquire);
421 case vmIntrinsics::_weakCompareAndSetReferenceRelease: return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Release);
422 case vmIntrinsics::_weakCompareAndSetReference: return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Volatile);
423 case vmIntrinsics::_weakCompareAndSetBytePlain: return inline_unsafe_load_store(T_BYTE, LS_cmp_swap_weak, Relaxed);
424 case vmIntrinsics::_weakCompareAndSetByteAcquire: return inline_unsafe_load_store(T_BYTE, LS_cmp_swap_weak, Acquire);
425 case vmIntrinsics::_weakCompareAndSetByteRelease: return inline_unsafe_load_store(T_BYTE, LS_cmp_swap_weak, Release);
426 case vmIntrinsics::_weakCompareAndSetByte: return inline_unsafe_load_store(T_BYTE, LS_cmp_swap_weak, Volatile);
427 case vmIntrinsics::_weakCompareAndSetShortPlain: return inline_unsafe_load_store(T_SHORT, LS_cmp_swap_weak, Relaxed);
428 case vmIntrinsics::_weakCompareAndSetShortAcquire: return inline_unsafe_load_store(T_SHORT, LS_cmp_swap_weak, Acquire);
429 case vmIntrinsics::_weakCompareAndSetShortRelease: return inline_unsafe_load_store(T_SHORT, LS_cmp_swap_weak, Release);
430 case vmIntrinsics::_weakCompareAndSetShort: return inline_unsafe_load_store(T_SHORT, LS_cmp_swap_weak, Volatile);
431 case vmIntrinsics::_weakCompareAndSetIntPlain: return inline_unsafe_load_store(T_INT, LS_cmp_swap_weak, Relaxed);
432 case vmIntrinsics::_weakCompareAndSetIntAcquire: return inline_unsafe_load_store(T_INT, LS_cmp_swap_weak, Acquire);
433 case vmIntrinsics::_weakCompareAndSetIntRelease: return inline_unsafe_load_store(T_INT, LS_cmp_swap_weak, Release);
434 case vmIntrinsics::_weakCompareAndSetInt: return inline_unsafe_load_store(T_INT, LS_cmp_swap_weak, Volatile);
435 case vmIntrinsics::_weakCompareAndSetLongPlain: return inline_unsafe_load_store(T_LONG, LS_cmp_swap_weak, Relaxed);
436 case vmIntrinsics::_weakCompareAndSetLongAcquire: return inline_unsafe_load_store(T_LONG, LS_cmp_swap_weak, Acquire);
437 case vmIntrinsics::_weakCompareAndSetLongRelease: return inline_unsafe_load_store(T_LONG, LS_cmp_swap_weak, Release);
438 case vmIntrinsics::_weakCompareAndSetLong: return inline_unsafe_load_store(T_LONG, LS_cmp_swap_weak, Volatile);
439
440 case vmIntrinsics::_compareAndExchangeReference: return inline_unsafe_load_store(T_OBJECT, LS_cmp_exchange, Volatile);
441 case vmIntrinsics::_compareAndExchangeReferenceAcquire: return inline_unsafe_load_store(T_OBJECT, LS_cmp_exchange, Acquire);
442 case vmIntrinsics::_compareAndExchangeReferenceRelease: return inline_unsafe_load_store(T_OBJECT, LS_cmp_exchange, Release);
443 case vmIntrinsics::_compareAndExchangeByte: return inline_unsafe_load_store(T_BYTE, LS_cmp_exchange, Volatile);
444 case vmIntrinsics::_compareAndExchangeByteAcquire: return inline_unsafe_load_store(T_BYTE, LS_cmp_exchange, Acquire);
445 case vmIntrinsics::_compareAndExchangeByteRelease: return inline_unsafe_load_store(T_BYTE, LS_cmp_exchange, Release);
446 case vmIntrinsics::_compareAndExchangeShort: return inline_unsafe_load_store(T_SHORT, LS_cmp_exchange, Volatile);
447 case vmIntrinsics::_compareAndExchangeShortAcquire: return inline_unsafe_load_store(T_SHORT, LS_cmp_exchange, Acquire);
448 case vmIntrinsics::_compareAndExchangeShortRelease: return inline_unsafe_load_store(T_SHORT, LS_cmp_exchange, Release);
449 case vmIntrinsics::_compareAndExchangeInt: return inline_unsafe_load_store(T_INT, LS_cmp_exchange, Volatile);
450 case vmIntrinsics::_compareAndExchangeIntAcquire: return inline_unsafe_load_store(T_INT, LS_cmp_exchange, Acquire);
451 case vmIntrinsics::_compareAndExchangeIntRelease: return inline_unsafe_load_store(T_INT, LS_cmp_exchange, Release);
452 case vmIntrinsics::_compareAndExchangeLong: return inline_unsafe_load_store(T_LONG, LS_cmp_exchange, Volatile);
453 case vmIntrinsics::_compareAndExchangeLongAcquire: return inline_unsafe_load_store(T_LONG, LS_cmp_exchange, Acquire);
454 case vmIntrinsics::_compareAndExchangeLongRelease: return inline_unsafe_load_store(T_LONG, LS_cmp_exchange, Release);
455
456 case vmIntrinsics::_getAndAddByte: return inline_unsafe_load_store(T_BYTE, LS_get_add, Volatile);
457 case vmIntrinsics::_getAndAddShort: return inline_unsafe_load_store(T_SHORT, LS_get_add, Volatile);
458 case vmIntrinsics::_getAndAddInt: return inline_unsafe_load_store(T_INT, LS_get_add, Volatile);
459 case vmIntrinsics::_getAndAddLong: return inline_unsafe_load_store(T_LONG, LS_get_add, Volatile);
460
461 case vmIntrinsics::_getAndSetByte: return inline_unsafe_load_store(T_BYTE, LS_get_set, Volatile);
462 case vmIntrinsics::_getAndSetShort: return inline_unsafe_load_store(T_SHORT, LS_get_set, Volatile);
463 case vmIntrinsics::_getAndSetInt: return inline_unsafe_load_store(T_INT, LS_get_set, Volatile);
464 case vmIntrinsics::_getAndSetLong: return inline_unsafe_load_store(T_LONG, LS_get_set, Volatile);
465 case vmIntrinsics::_getAndSetReference: return inline_unsafe_load_store(T_OBJECT, LS_get_set, Volatile);
466
467 case vmIntrinsics::_loadFence:
468 case vmIntrinsics::_storeFence:
469 case vmIntrinsics::_storeStoreFence:
470 case vmIntrinsics::_fullFence: return inline_unsafe_fence(intrinsic_id());
471
472 case vmIntrinsics::_onSpinWait: return inline_onspinwait();
473
474 case vmIntrinsics::_currentThread: return inline_native_currentThread();
475
476 #ifdef JFR_HAVE_INTRINSICS
477 case vmIntrinsics::_counterTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, JFR_TIME_FUNCTION), "counterTime");
478 case vmIntrinsics::_getClassId: return inline_native_classID();
479 case vmIntrinsics::_getEventWriter: return inline_native_getEventWriter();
480 #endif
481 case vmIntrinsics::_currentTimeMillis: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
482 case vmIntrinsics::_nanoTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
483 case vmIntrinsics::_writeback0: return inline_unsafe_writeback0();
484 case vmIntrinsics::_writebackPreSync0: return inline_unsafe_writebackSync0(true);
485 case vmIntrinsics::_writebackPostSync0: return inline_unsafe_writebackSync0(false);
486 case vmIntrinsics::_allocateInstance: return inline_unsafe_allocate();
487 case vmIntrinsics::_copyMemory: return inline_unsafe_copyMemory();
488 case vmIntrinsics::_getLength: return inline_native_getLength();
489 case vmIntrinsics::_copyOf: return inline_array_copyOf(false);
490 case vmIntrinsics::_copyOfRange: return inline_array_copyOf(true);
491 case vmIntrinsics::_equalsB: return inline_array_equals(StrIntrinsicNode::LL);
492 case vmIntrinsics::_equalsC: return inline_array_equals(StrIntrinsicNode::UU);
493 case vmIntrinsics::_Preconditions_checkIndex: return inline_preconditions_checkIndex(T_INT);
494 case vmIntrinsics::_Preconditions_checkLongIndex: return inline_preconditions_checkIndex(T_LONG);
495 case vmIntrinsics::_clone: return inline_native_clone(intrinsic()->is_virtual());
496
497 case vmIntrinsics::_allocateUninitializedArray: return inline_unsafe_newArray(true);
498 case vmIntrinsics::_newArray: return inline_unsafe_newArray(false);
499
500 case vmIntrinsics::_isAssignableFrom: return inline_native_subtype_check();
501
502 case vmIntrinsics::_isInstance:
503 case vmIntrinsics::_getModifiers:
504 case vmIntrinsics::_isInterface:
505 case vmIntrinsics::_isArray:
506 case vmIntrinsics::_isPrimitive:
507 case vmIntrinsics::_isHidden:
508 case vmIntrinsics::_getSuperclass:
509 case vmIntrinsics::_getClassAccessFlags: return inline_native_Class_query(intrinsic_id());
510
511 case vmIntrinsics::_floatToRawIntBits:
512 case vmIntrinsics::_floatToIntBits:
513 case vmIntrinsics::_intBitsToFloat:
514 case vmIntrinsics::_doubleToRawLongBits:
515 case vmIntrinsics::_doubleToLongBits:
516 case vmIntrinsics::_longBitsToDouble: return inline_fp_conversions(intrinsic_id());
517
518 case vmIntrinsics::_numberOfLeadingZeros_i:
519 case vmIntrinsics::_numberOfLeadingZeros_l:
520 case vmIntrinsics::_numberOfTrailingZeros_i:
521 case vmIntrinsics::_numberOfTrailingZeros_l:
522 case vmIntrinsics::_bitCount_i:
523 case vmIntrinsics::_bitCount_l:
524 case vmIntrinsics::_reverseBytes_i:
525 case vmIntrinsics::_reverseBytes_l:
526 case vmIntrinsics::_reverseBytes_s:
527 case vmIntrinsics::_reverseBytes_c: return inline_number_methods(intrinsic_id());
528
529 case vmIntrinsics::_getCallerClass: return inline_native_Reflection_getCallerClass();
530
531 case vmIntrinsics::_Reference_get: return inline_reference_get();
532 case vmIntrinsics::_Reference_refersTo0: return inline_reference_refersTo0(false);
533 case vmIntrinsics::_PhantomReference_refersTo0: return inline_reference_refersTo0(true);
534
535 case vmIntrinsics::_Class_cast: return inline_Class_cast();
536
537 case vmIntrinsics::_aescrypt_encryptBlock:
538 case vmIntrinsics::_aescrypt_decryptBlock: return inline_aescrypt_Block(intrinsic_id());
539
540 case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
541 case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
542 return inline_cipherBlockChaining_AESCrypt(intrinsic_id());
543
544 case vmIntrinsics::_electronicCodeBook_encryptAESCrypt:
545 case vmIntrinsics::_electronicCodeBook_decryptAESCrypt:
546 return inline_electronicCodeBook_AESCrypt(intrinsic_id());
547
548 case vmIntrinsics::_counterMode_AESCrypt:
549 return inline_counterMode_AESCrypt(intrinsic_id());
550
551 case vmIntrinsics::_md5_implCompress:
552 case vmIntrinsics::_sha_implCompress:
553 case vmIntrinsics::_sha2_implCompress:
554 case vmIntrinsics::_sha5_implCompress:
555 case vmIntrinsics::_sha3_implCompress:
556 return inline_digestBase_implCompress(intrinsic_id());
557
558 case vmIntrinsics::_digestBase_implCompressMB:
559 return inline_digestBase_implCompressMB(predicate);
560
561 case vmIntrinsics::_multiplyToLen:
562 return inline_multiplyToLen();
563
564 case vmIntrinsics::_squareToLen:
565 return inline_squareToLen();
566
567 case vmIntrinsics::_mulAdd:
568 return inline_mulAdd();
569
570 case vmIntrinsics::_montgomeryMultiply:
571 return inline_montgomeryMultiply();
572 case vmIntrinsics::_montgomerySquare:
573 return inline_montgomerySquare();
574
575 case vmIntrinsics::_bigIntegerRightShiftWorker:
576 return inline_bigIntegerShift(true);
577 case vmIntrinsics::_bigIntegerLeftShiftWorker:
578 return inline_bigIntegerShift(false);
579
580 case vmIntrinsics::_vectorizedMismatch:
581 return inline_vectorizedMismatch();
582
583 case vmIntrinsics::_ghash_processBlocks:
584 return inline_ghash_processBlocks();
585 case vmIntrinsics::_base64_encodeBlock:
586 return inline_base64_encodeBlock();
587 case vmIntrinsics::_base64_decodeBlock:
588 return inline_base64_decodeBlock();
589
590 case vmIntrinsics::_encodeISOArray:
591 case vmIntrinsics::_encodeByteISOArray:
592 return inline_encodeISOArray(false);
593 case vmIntrinsics::_encodeAsciiArray:
594 return inline_encodeISOArray(true);
595
596 case vmIntrinsics::_updateCRC32:
597 return inline_updateCRC32();
598 case vmIntrinsics::_updateBytesCRC32:
599 return inline_updateBytesCRC32();
600 case vmIntrinsics::_updateByteBufferCRC32:
601 return inline_updateByteBufferCRC32();
602
603 case vmIntrinsics::_updateBytesCRC32C:
604 return inline_updateBytesCRC32C();
605 case vmIntrinsics::_updateDirectByteBufferCRC32C:
606 return inline_updateDirectByteBufferCRC32C();
607
608 case vmIntrinsics::_updateBytesAdler32:
609 return inline_updateBytesAdler32();
610 case vmIntrinsics::_updateByteBufferAdler32:
611 return inline_updateByteBufferAdler32();
612
613 case vmIntrinsics::_profileBoolean:
614 return inline_profileBoolean();
615 case vmIntrinsics::_isCompileConstant:
616 return inline_isCompileConstant();
617
618 case vmIntrinsics::_hasNegatives:
619 return inline_hasNegatives();
620
621 case vmIntrinsics::_fmaD:
622 case vmIntrinsics::_fmaF:
623 return inline_fma(intrinsic_id());
624
625 case vmIntrinsics::_isDigit:
626 case vmIntrinsics::_isLowerCase:
627 case vmIntrinsics::_isUpperCase:
628 case vmIntrinsics::_isWhitespace:
629 return inline_character_compare(intrinsic_id());
630
631 case vmIntrinsics::_maxF:
632 case vmIntrinsics::_minF:
633 case vmIntrinsics::_maxD:
634 case vmIntrinsics::_minD:
635 return inline_fp_min_max(intrinsic_id());
636
637 case vmIntrinsics::_VectorUnaryOp:
638 return inline_vector_nary_operation(1);
639 case vmIntrinsics::_VectorBinaryOp:
640 return inline_vector_nary_operation(2);
641 case vmIntrinsics::_VectorTernaryOp:
642 return inline_vector_nary_operation(3);
643 case vmIntrinsics::_VectorBroadcastCoerced:
644 return inline_vector_broadcast_coerced();
645 case vmIntrinsics::_VectorShuffleIota:
646 return inline_vector_shuffle_iota();
647 case vmIntrinsics::_VectorMaskOp:
648 return inline_vector_mask_operation();
649 case vmIntrinsics::_VectorShuffleToVector:
650 return inline_vector_shuffle_to_vector();
651 case vmIntrinsics::_VectorLoadOp:
652 return inline_vector_mem_operation(/*is_store=*/false);
653 case vmIntrinsics::_VectorStoreOp:
654 return inline_vector_mem_operation(/*is_store=*/true);
655 case vmIntrinsics::_VectorGatherOp:
656 return inline_vector_gather_scatter(/*is_scatter*/ false);
657 case vmIntrinsics::_VectorScatterOp:
658 return inline_vector_gather_scatter(/*is_scatter*/ true);
659 case vmIntrinsics::_VectorReductionCoerced:
660 return inline_vector_reduction();
661 case vmIntrinsics::_VectorTest:
662 return inline_vector_test();
663 case vmIntrinsics::_VectorBlend:
664 return inline_vector_blend();
665 case vmIntrinsics::_VectorRearrange:
666 return inline_vector_rearrange();
667 case vmIntrinsics::_VectorCompare:
668 return inline_vector_compare();
669 case vmIntrinsics::_VectorBroadcastInt:
670 return inline_vector_broadcast_int();
671 case vmIntrinsics::_VectorConvert:
672 return inline_vector_convert();
673 case vmIntrinsics::_VectorInsert:
674 return inline_vector_insert();
675 case vmIntrinsics::_VectorExtract:
676 return inline_vector_extract();
677
678 case vmIntrinsics::_getObjectSize:
679 return inline_getObjectSize();
680
681 case vmIntrinsics::_blackhole:
682 return inline_blackhole();
683
684 default:
685 // If you get here, it may be that someone has added a new intrinsic
686 // to the list in vmIntrinsics.hpp without implementing it here.
687 #ifndef PRODUCT
688 if ((PrintMiscellaneous && (Verbose || WizardMode)) || PrintOpto) {
689 tty->print_cr("*** Warning: Unimplemented intrinsic %s(%d)",
690 vmIntrinsics::name_at(intrinsic_id()), vmIntrinsics::as_int(intrinsic_id()));
691 }
692 #endif
693 return false;
694 }
695 }
696
697 Node* LibraryCallKit::try_to_predicate(int predicate) {
698 if (!jvms()->has_method()) {
699 // Root JVMState has a null method.
700 assert(map()->memory()->Opcode() == Op_Parm, "");
701 // Insert the memory aliasing node
702 set_all_memory(reset_memory());
703 }
704 assert(merged_memory(), "");
705
706 switch (intrinsic_id()) {
707 case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
708 return inline_cipherBlockChaining_AESCrypt_predicate(false);
709 case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
710 return inline_cipherBlockChaining_AESCrypt_predicate(true);
711 case vmIntrinsics::_electronicCodeBook_encryptAESCrypt:
712 return inline_electronicCodeBook_AESCrypt_predicate(false);
713 case vmIntrinsics::_electronicCodeBook_decryptAESCrypt:
714 return inline_electronicCodeBook_AESCrypt_predicate(true);
715 case vmIntrinsics::_counterMode_AESCrypt:
716 return inline_counterMode_AESCrypt_predicate();
717 case vmIntrinsics::_digestBase_implCompressMB:
718 return inline_digestBase_implCompressMB_predicate(predicate);
719
720 default:
721 // If you get here, it may be that someone has added a new intrinsic
722 // to the list in vmIntrinsics.hpp without implementing it here.
723 #ifndef PRODUCT
724 if ((PrintMiscellaneous && (Verbose || WizardMode)) || PrintOpto) {
725 tty->print_cr("*** Warning: Unimplemented predicate for intrinsic %s(%d)",
726 vmIntrinsics::name_at(intrinsic_id()), vmIntrinsics::as_int(intrinsic_id()));
727 }
728 #endif
729 Node* slow_ctl = control();
730 set_control(top()); // No fast path instrinsic
731 return slow_ctl;
732 }
733 }
734
735 //------------------------------set_result-------------------------------
736 // Helper function for finishing intrinsics.
737 void LibraryCallKit::set_result(RegionNode* region, PhiNode* value) {
738 record_for_igvn(region);
739 set_control(_gvn.transform(region));
740 set_result( _gvn.transform(value));
741 assert(value->type()->basic_type() == result()->bottom_type()->basic_type(), "sanity");
742 }
743
744 //------------------------------generate_guard---------------------------
745 // Helper function for generating guarded fast-slow graph structures.
746 // The given 'test', if true, guards a slow path. If the test fails
747 // then a fast path can be taken. (We generally hope it fails.)
748 // In all cases, GraphKit::control() is updated to the fast path.
749 // The returned value represents the control for the slow path.
750 // The return value is never 'top'; it is either a valid control
751 // or null if it is obvious that the slow path can never be taken.
752 // Also, if region and the slow control are not null, the slow edge
753 // is appended to the region.
754 Node* LibraryCallKit::generate_guard(Node* test, RegionNode* region, float true_prob) {
755 if (stopped()) {
756 // Already short circuited.
757 return nullptr;
758 }
759
760 // Build an if node and its projections.
761 // If test is true we take the slow path, which we assume is uncommon.
762 if (_gvn.type(test) == TypeInt::ZERO) {
763 // The slow branch is never taken. No need to build this guard.
764 return nullptr;
765 }
766
767 IfNode* iff = create_and_map_if(control(), test, true_prob, COUNT_UNKNOWN);
768
769 Node* if_slow = _gvn.transform(new IfTrueNode(iff));
770 if (if_slow == top()) {
771 // The slow branch is never taken. No need to build this guard.
772 return nullptr;
773 }
774
775 if (region != nullptr)
776 region->add_req(if_slow);
777
778 Node* if_fast = _gvn.transform(new IfFalseNode(iff));
779 set_control(if_fast);
780
781 return if_slow;
782 }
783
784 inline Node* LibraryCallKit::generate_slow_guard(Node* test, RegionNode* region) {
785 return generate_guard(test, region, PROB_UNLIKELY_MAG(3));
786 }
787 inline Node* LibraryCallKit::generate_fair_guard(Node* test, RegionNode* region) {
788 return generate_guard(test, region, PROB_FAIR);
789 }
790
791 inline Node* LibraryCallKit::generate_negative_guard(Node* index, RegionNode* region,
792 Node* *pos_index) {
793 if (stopped())
794 return nullptr; // already stopped
795 if (_gvn.type(index)->higher_equal(TypeInt::POS)) // [0,maxint]
796 return nullptr; // index is already adequately typed
797 Node* cmp_lt = _gvn.transform(new CmpINode(index, intcon(0)));
798 Node* bol_lt = _gvn.transform(new BoolNode(cmp_lt, BoolTest::lt));
799 Node* is_neg = generate_guard(bol_lt, region, PROB_MIN);
800 if (is_neg != nullptr && pos_index != nullptr) {
801 // Emulate effect of Parse::adjust_map_after_if.
802 Node* ccast = new CastIINode(index, TypeInt::POS);
803 ccast->set_req(0, control());
804 (*pos_index) = _gvn.transform(ccast);
805 }
806 return is_neg;
807 }
808
809 // Make sure that 'position' is a valid limit index, in [0..length].
810 // There are two equivalent plans for checking this:
811 // A. (offset + copyLength) unsigned<= arrayLength
812 // B. offset <= (arrayLength - copyLength)
813 // We require that all of the values above, except for the sum and
814 // difference, are already known to be non-negative.
815 // Plan A is robust in the face of overflow, if offset and copyLength
816 // are both hugely positive.
817 //
818 // Plan B is less direct and intuitive, but it does not overflow at
819 // all, since the difference of two non-negatives is always
820 // representable. Whenever Java methods must perform the equivalent
821 // check they generally use Plan B instead of Plan A.
822 // For the moment we use Plan A.
823 inline Node* LibraryCallKit::generate_limit_guard(Node* offset,
824 Node* subseq_length,
825 Node* array_length,
826 RegionNode* region) {
827 if (stopped())
828 return nullptr; // already stopped
829 bool zero_offset = _gvn.type(offset) == TypeInt::ZERO;
830 if (zero_offset && subseq_length->eqv_uncast(array_length))
831 return nullptr; // common case of whole-array copy
832 Node* last = subseq_length;
833 if (!zero_offset) // last += offset
834 last = _gvn.transform(new AddINode(last, offset));
835 Node* cmp_lt = _gvn.transform(new CmpUNode(array_length, last));
836 Node* bol_lt = _gvn.transform(new BoolNode(cmp_lt, BoolTest::lt));
837 Node* is_over = generate_guard(bol_lt, region, PROB_MIN);
838 return is_over;
839 }
840
841 // Emit range checks for the given String.value byte array
842 void LibraryCallKit::generate_string_range_check(Node* array, Node* offset, Node* count, bool char_count) {
843 if (stopped()) {
844 return; // already stopped
845 }
846 RegionNode* bailout = new RegionNode(1);
847 record_for_igvn(bailout);
848 if (char_count) {
849 // Convert char count to byte count
850 count = _gvn.transform(new LShiftINode(count, intcon(1)));
851 }
852
853 // Offset and count must not be negative
854 generate_negative_guard(offset, bailout);
855 generate_negative_guard(count, bailout);
856 // Offset + count must not exceed length of array
857 generate_limit_guard(offset, count, load_array_length(array), bailout);
858
859 if (bailout->req() > 1) {
860 PreserveJVMState pjvms(this);
861 set_control(_gvn.transform(bailout));
862 uncommon_trap(Deoptimization::Reason_intrinsic,
863 Deoptimization::Action_maybe_recompile);
864 }
865 }
866
867 //--------------------------generate_current_thread--------------------
868 Node* LibraryCallKit::generate_current_thread(Node* &tls_output) {
869 ciKlass* thread_klass = env()->Thread_klass();
870 const Type* thread_type = TypeOopPtr::make_from_klass(thread_klass)->cast_to_ptr_type(TypePtr::NotNull);
871 Node* thread = _gvn.transform(new ThreadLocalNode());
872 Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::threadObj_offset()));
873 tls_output = thread;
874 Node* thread_obj_handle = LoadNode::make(_gvn, nullptr, immutable_memory(), p, p->bottom_type()->is_ptr(), TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
875 thread_obj_handle = _gvn.transform(thread_obj_handle);
876 return access_load(thread_obj_handle, thread_type, T_OBJECT, IN_NATIVE | C2_IMMUTABLE_MEMORY);
877 }
878
879
880 //------------------------------make_string_method_node------------------------
881 // Helper method for String intrinsic functions. This version is called with
882 // str1 and str2 pointing to byte[] nodes containing Latin1 or UTF16 encoded
883 // characters (depending on 'is_byte'). cnt1 and cnt2 are pointing to Int nodes
884 // containing the lengths of str1 and str2.
885 Node* LibraryCallKit::make_string_method_node(int opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2, StrIntrinsicNode::ArgEnc ae) {
886 Node* result = nullptr;
887 switch (opcode) {
888 case Op_StrIndexOf:
889 result = new StrIndexOfNode(control(), memory(TypeAryPtr::BYTES),
890 str1_start, cnt1, str2_start, cnt2, ae);
891 break;
892 case Op_StrComp:
893 result = new StrCompNode(control(), memory(TypeAryPtr::BYTES),
894 str1_start, cnt1, str2_start, cnt2, ae);
895 break;
896 case Op_StrEquals:
897 // We already know that cnt1 == cnt2 here (checked in 'inline_string_equals').
898 // Use the constant length if there is one because optimized match rule may exist.
899 result = new StrEqualsNode(control(), memory(TypeAryPtr::BYTES),
900 str1_start, str2_start, cnt2->is_Con() ? cnt2 : cnt1, ae);
901 break;
902 default:
903 ShouldNotReachHere();
904 return nullptr;
905 }
906
907 // All these intrinsics have checks.
908 C->set_has_split_ifs(true); // Has chance for split-if optimization
909 clear_upper_avx();
910
911 return _gvn.transform(result);
912 }
913
914 //------------------------------inline_string_compareTo------------------------
915 bool LibraryCallKit::inline_string_compareTo(StrIntrinsicNode::ArgEnc ae) {
916 Node* arg1 = argument(0);
917 Node* arg2 = argument(1);
918
919 arg1 = must_be_not_null(arg1, true);
920 arg2 = must_be_not_null(arg2, true);
921
922 // Get start addr and length of first argument
923 Node* arg1_start = array_element_address(arg1, intcon(0), T_BYTE);
924 Node* arg1_cnt = load_array_length(arg1);
925
926 // Get start addr and length of second argument
927 Node* arg2_start = array_element_address(arg2, intcon(0), T_BYTE);
928 Node* arg2_cnt = load_array_length(arg2);
929
930 Node* result = make_string_method_node(Op_StrComp, arg1_start, arg1_cnt, arg2_start, arg2_cnt, ae);
931 set_result(result);
932 return true;
933 }
934
935 //------------------------------inline_string_equals------------------------
936 bool LibraryCallKit::inline_string_equals(StrIntrinsicNode::ArgEnc ae) {
937 Node* arg1 = argument(0);
938 Node* arg2 = argument(1);
939
940 // paths (plus control) merge
941 RegionNode* region = new RegionNode(3);
942 Node* phi = new PhiNode(region, TypeInt::BOOL);
943
944 if (!stopped()) {
945
946 arg1 = must_be_not_null(arg1, true);
947 arg2 = must_be_not_null(arg2, true);
948
949 // Get start addr and length of first argument
950 Node* arg1_start = array_element_address(arg1, intcon(0), T_BYTE);
951 Node* arg1_cnt = load_array_length(arg1);
952
953 // Get start addr and length of second argument
954 Node* arg2_start = array_element_address(arg2, intcon(0), T_BYTE);
955 Node* arg2_cnt = load_array_length(arg2);
956
957 // Check for arg1_cnt != arg2_cnt
958 Node* cmp = _gvn.transform(new CmpINode(arg1_cnt, arg2_cnt));
959 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
960 Node* if_ne = generate_slow_guard(bol, nullptr);
961 if (if_ne != nullptr) {
962 phi->init_req(2, intcon(0));
963 region->init_req(2, if_ne);
964 }
965
966 // Check for count == 0 is done by assembler code for StrEquals.
967
968 if (!stopped()) {
969 Node* equals = make_string_method_node(Op_StrEquals, arg1_start, arg1_cnt, arg2_start, arg2_cnt, ae);
970 phi->init_req(1, equals);
971 region->init_req(1, control());
972 }
973 }
974
975 // post merge
976 set_control(_gvn.transform(region));
977 record_for_igvn(region);
978
979 set_result(_gvn.transform(phi));
980 return true;
981 }
982
983 //------------------------------inline_array_equals----------------------------
984 bool LibraryCallKit::inline_array_equals(StrIntrinsicNode::ArgEnc ae) {
985 assert(ae == StrIntrinsicNode::UU || ae == StrIntrinsicNode::LL, "unsupported array types");
986 Node* arg1 = argument(0);
987 Node* arg2 = argument(1);
988
989 const TypeAryPtr* mtype = (ae == StrIntrinsicNode::UU) ? TypeAryPtr::CHARS : TypeAryPtr::BYTES;
990 set_result(_gvn.transform(new AryEqNode(control(), memory(mtype), arg1, arg2, ae)));
991 clear_upper_avx();
992
993 return true;
994 }
995
996 //------------------------------inline_hasNegatives------------------------------
997 bool LibraryCallKit::inline_hasNegatives() {
998 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
999 return false;
1000 }
1001
1002 assert(callee()->signature()->size() == 3, "hasNegatives has 3 parameters");
1003 // no receiver since it is static method
1004 Node* ba = argument(0);
1005 Node* offset = argument(1);
1006 Node* len = argument(2);
1007
1008 ba = must_be_not_null(ba, true);
1009
1010 // Range checks
1011 generate_string_range_check(ba, offset, len, false);
1012 if (stopped()) {
1013 return true;
1014 }
1015 Node* ba_start = array_element_address(ba, offset, T_BYTE);
1016 Node* result = new HasNegativesNode(control(), memory(TypeAryPtr::BYTES), ba_start, len);
1017 set_result(_gvn.transform(result));
1018 return true;
1019 }
1020
1021 bool LibraryCallKit::inline_preconditions_checkIndex(BasicType bt) {
1022 Node* index = argument(0);
1023 Node* length = bt == T_INT ? argument(1) : argument(2);
1024 if (too_many_traps(Deoptimization::Reason_intrinsic) || too_many_traps(Deoptimization::Reason_range_check)) {
1025 return false;
1026 }
1027
1028 // check that length is positive
1029 Node* len_pos_cmp = _gvn.transform(CmpNode::make(length, integercon(0, bt), bt));
1030 Node* len_pos_bol = _gvn.transform(new BoolNode(len_pos_cmp, BoolTest::ge));
1031
1032 {
1033 BuildCutout unless(this, len_pos_bol, PROB_MAX);
1034 uncommon_trap(Deoptimization::Reason_intrinsic,
1035 Deoptimization::Action_make_not_entrant);
1036 }
1037
1038 if (stopped()) {
1039 // Length is known to be always negative during compilation and the IR graph so far constructed is good so return success
1040 return true;
1041 }
1042
1043 // length is now known postive, add a cast node to make this explicit
1044 jlong upper_bound = _gvn.type(length)->is_integer(bt)->hi_as_long();
1045 Node* casted_length = ConstraintCastNode::make(control(), length, TypeInteger::make(0, upper_bound, Type::WidenMax, bt), bt);
1046 casted_length = _gvn.transform(casted_length);
1047 replace_in_map(length, casted_length);
1048 length = casted_length;
1049
1050 // Use an unsigned comparison for the range check itself
1051 Node* rc_cmp = _gvn.transform(CmpNode::make(index, length, bt, true));
1052 BoolTest::mask btest = BoolTest::lt;
1053 Node* rc_bool = _gvn.transform(new BoolNode(rc_cmp, btest));
1054 RangeCheckNode* rc = new RangeCheckNode(control(), rc_bool, PROB_MAX, COUNT_UNKNOWN);
1055 _gvn.set_type(rc, rc->Value(&_gvn));
1056 if (!rc_bool->is_Con()) {
1057 record_for_igvn(rc);
1058 }
1059 set_control(_gvn.transform(new IfTrueNode(rc)));
1060 {
1061 PreserveJVMState pjvms(this);
1062 set_control(_gvn.transform(new IfFalseNode(rc)));
1063 uncommon_trap(Deoptimization::Reason_range_check,
1064 Deoptimization::Action_make_not_entrant);
1065 }
1066
1067 if (stopped()) {
1068 // Range check is known to always fail during compilation and the IR graph so far constructed is good so return success
1069 return true;
1070 }
1071
1072 // index is now known to be >= 0 and < length, cast it
1073 Node* result = ConstraintCastNode::make(control(), index, TypeInteger::make(0, upper_bound, Type::WidenMax, bt), bt);
1074 result = _gvn.transform(result);
1075 set_result(result);
1076 replace_in_map(index, result);
1077 return true;
1078 }
1079
1080 //------------------------------inline_string_indexOf------------------------
1081 bool LibraryCallKit::inline_string_indexOf(StrIntrinsicNode::ArgEnc ae) {
1082 if (!Matcher::match_rule_supported(Op_StrIndexOf)) {
1083 return false;
1084 }
1085 Node* src = argument(0);
1086 Node* tgt = argument(1);
1087
1088 // Make the merge point
1089 RegionNode* result_rgn = new RegionNode(4);
1090 Node* result_phi = new PhiNode(result_rgn, TypeInt::INT);
1091
1092 src = must_be_not_null(src, true);
1093 tgt = must_be_not_null(tgt, true);
1094
1095 // Get start addr and length of source string
1096 Node* src_start = array_element_address(src, intcon(0), T_BYTE);
1097 Node* src_count = load_array_length(src);
1098
1099 // Get start addr and length of substring
1100 Node* tgt_start = array_element_address(tgt, intcon(0), T_BYTE);
1101 Node* tgt_count = load_array_length(tgt);
1102
1103 if (ae == StrIntrinsicNode::UU || ae == StrIntrinsicNode::UL) {
1104 // Divide src size by 2 if String is UTF16 encoded
1105 src_count = _gvn.transform(new RShiftINode(src_count, intcon(1)));
1106 }
1107 if (ae == StrIntrinsicNode::UU) {
1108 // Divide substring size by 2 if String is UTF16 encoded
1109 tgt_count = _gvn.transform(new RShiftINode(tgt_count, intcon(1)));
1110 }
1111
1112 Node* result = make_indexOf_node(src_start, src_count, tgt_start, tgt_count, result_rgn, result_phi, ae);
1113 if (result != nullptr) {
1114 result_phi->init_req(3, result);
1115 result_rgn->init_req(3, control());
1116 }
1117 set_control(_gvn.transform(result_rgn));
1118 record_for_igvn(result_rgn);
1119 set_result(_gvn.transform(result_phi));
1120
1121 return true;
1122 }
1123
1124 //-----------------------------inline_string_indexOf-----------------------
1125 bool LibraryCallKit::inline_string_indexOfI(StrIntrinsicNode::ArgEnc ae) {
1126 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
1127 return false;
1128 }
1129 if (!Matcher::match_rule_supported(Op_StrIndexOf)) {
1130 return false;
1131 }
1132 assert(callee()->signature()->size() == 5, "String.indexOf() has 5 arguments");
1133 Node* src = argument(0); // byte[]
1134 Node* src_count = argument(1); // char count
1135 Node* tgt = argument(2); // byte[]
1136 Node* tgt_count = argument(3); // char count
1137 Node* from_index = argument(4); // char index
1138
1139 src = must_be_not_null(src, true);
1140 tgt = must_be_not_null(tgt, true);
1141
1142 // Multiply byte array index by 2 if String is UTF16 encoded
1143 Node* src_offset = (ae == StrIntrinsicNode::LL) ? from_index : _gvn.transform(new LShiftINode(from_index, intcon(1)));
1144 src_count = _gvn.transform(new SubINode(src_count, from_index));
1145 Node* src_start = array_element_address(src, src_offset, T_BYTE);
1146 Node* tgt_start = array_element_address(tgt, intcon(0), T_BYTE);
1147
1148 // Range checks
1149 generate_string_range_check(src, src_offset, src_count, ae != StrIntrinsicNode::LL);
1150 generate_string_range_check(tgt, intcon(0), tgt_count, ae == StrIntrinsicNode::UU);
1151 if (stopped()) {
1152 return true;
1153 }
1154
1155 RegionNode* region = new RegionNode(5);
1156 Node* phi = new PhiNode(region, TypeInt::INT);
1157
1158 Node* result = make_indexOf_node(src_start, src_count, tgt_start, tgt_count, region, phi, ae);
1159 if (result != nullptr) {
1160 // The result is index relative to from_index if substring was found, -1 otherwise.
1161 // Generate code which will fold into cmove.
1162 Node* cmp = _gvn.transform(new CmpINode(result, intcon(0)));
1163 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::lt));
1164
1165 Node* if_lt = generate_slow_guard(bol, nullptr);
1166 if (if_lt != nullptr) {
1167 // result == -1
1168 phi->init_req(3, result);
1169 region->init_req(3, if_lt);
1170 }
1171 if (!stopped()) {
1172 result = _gvn.transform(new AddINode(result, from_index));
1173 phi->init_req(4, result);
1174 region->init_req(4, control());
1175 }
1176 }
1177
1178 set_control(_gvn.transform(region));
1179 record_for_igvn(region);
1180 set_result(_gvn.transform(phi));
1181 clear_upper_avx();
1182
1183 return true;
1184 }
1185
1186 // Create StrIndexOfNode with fast path checks
1187 Node* LibraryCallKit::make_indexOf_node(Node* src_start, Node* src_count, Node* tgt_start, Node* tgt_count,
1188 RegionNode* region, Node* phi, StrIntrinsicNode::ArgEnc ae) {
1189 // Check for substr count > string count
1190 Node* cmp = _gvn.transform(new CmpINode(tgt_count, src_count));
1191 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::gt));
1192 Node* if_gt = generate_slow_guard(bol, nullptr);
1193 if (if_gt != nullptr) {
1194 phi->init_req(1, intcon(-1));
1195 region->init_req(1, if_gt);
1196 }
1197 if (!stopped()) {
1198 // Check for substr count == 0
1199 cmp = _gvn.transform(new CmpINode(tgt_count, intcon(0)));
1200 bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
1201 Node* if_zero = generate_slow_guard(bol, nullptr);
1202 if (if_zero != nullptr) {
1203 phi->init_req(2, intcon(0));
1204 region->init_req(2, if_zero);
1205 }
1206 }
1207 if (!stopped()) {
1208 return make_string_method_node(Op_StrIndexOf, src_start, src_count, tgt_start, tgt_count, ae);
1209 }
1210 return nullptr;
1211 }
1212
1213 //-----------------------------inline_string_indexOfChar-----------------------
1214 bool LibraryCallKit::inline_string_indexOfChar(StrIntrinsicNode::ArgEnc ae) {
1215 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
1216 return false;
1217 }
1218 if (!Matcher::match_rule_supported(Op_StrIndexOfChar)) {
1219 return false;
1220 }
1221 assert(callee()->signature()->size() == 4, "String.indexOfChar() has 4 arguments");
1222 Node* src = argument(0); // byte[]
1223 Node* int_ch = argument(1);
1224 Node* from_index = argument(2);
1225 Node* max = argument(3);
1226
1227 src = must_be_not_null(src, true);
1228
1229 Node* src_offset = ae == StrIntrinsicNode::L ? from_index : _gvn.transform(new LShiftINode(from_index, intcon(1)));
1230 Node* src_start = array_element_address(src, src_offset, T_BYTE);
1231 Node* src_count = _gvn.transform(new SubINode(max, from_index));
1232
1233 // Range checks
1234 generate_string_range_check(src, src_offset, src_count, ae == StrIntrinsicNode::U);
1235
1236 // Check for int_ch >= 0
1237 Node* int_ch_cmp = _gvn.transform(new CmpINode(int_ch, intcon(0)));
1238 Node* int_ch_bol = _gvn.transform(new BoolNode(int_ch_cmp, BoolTest::ge));
1239 {
1240 BuildCutout unless(this, int_ch_bol, PROB_MAX);
1241 uncommon_trap(Deoptimization::Reason_intrinsic,
1242 Deoptimization::Action_maybe_recompile);
1243 }
1244 if (stopped()) {
1245 return true;
1246 }
1247
1248 RegionNode* region = new RegionNode(3);
1249 Node* phi = new PhiNode(region, TypeInt::INT);
1250
1251 Node* result = new StrIndexOfCharNode(control(), memory(TypeAryPtr::BYTES), src_start, src_count, int_ch, ae);
1252 C->set_has_split_ifs(true); // Has chance for split-if optimization
1253 _gvn.transform(result);
1254
1255 Node* cmp = _gvn.transform(new CmpINode(result, intcon(0)));
1256 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::lt));
1257
1258 Node* if_lt = generate_slow_guard(bol, nullptr);
1259 if (if_lt != nullptr) {
1260 // result == -1
1261 phi->init_req(2, result);
1262 region->init_req(2, if_lt);
1263 }
1264 if (!stopped()) {
1265 result = _gvn.transform(new AddINode(result, from_index));
1266 phi->init_req(1, result);
1267 region->init_req(1, control());
1268 }
1269 set_control(_gvn.transform(region));
1270 record_for_igvn(region);
1271 set_result(_gvn.transform(phi));
1272
1273 return true;
1274 }
1275 //---------------------------inline_string_copy---------------------
1276 // compressIt == true --> generate a compressed copy operation (compress char[]/byte[] to byte[])
1277 // int StringUTF16.compress(char[] src, int srcOff, byte[] dst, int dstOff, int len)
1278 // int StringUTF16.compress(byte[] src, int srcOff, byte[] dst, int dstOff, int len)
1279 // compressIt == false --> generate an inflated copy operation (inflate byte[] to char[]/byte[])
1280 // void StringLatin1.inflate(byte[] src, int srcOff, char[] dst, int dstOff, int len)
1281 // void StringLatin1.inflate(byte[] src, int srcOff, byte[] dst, int dstOff, int len)
1282 bool LibraryCallKit::inline_string_copy(bool compress) {
1283 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
1284 return false;
1285 }
1286 int nargs = 5; // 2 oops, 3 ints
1287 assert(callee()->signature()->size() == nargs, "string copy has 5 arguments");
1288
1289 Node* src = argument(0);
1290 Node* src_offset = argument(1);
1291 Node* dst = argument(2);
1292 Node* dst_offset = argument(3);
1293 Node* length = argument(4);
1294
1295 // Check for allocation before we add nodes that would confuse
1296 // tightly_coupled_allocation()
1297 AllocateArrayNode* alloc = tightly_coupled_allocation(dst);
1298
1299 // Figure out the size and type of the elements we will be copying.
1300 const TypeAryPtr* src_type = src->Value(&_gvn)->isa_aryptr();
1301 const TypeAryPtr* dst_type = dst->Value(&_gvn)->isa_aryptr();
1302 if (src_type == nullptr || dst_type == nullptr) {
1303 return false;
1304 }
1305 BasicType src_elem = src_type->klass()->as_array_klass()->element_type()->basic_type();
1306 BasicType dst_elem = dst_type->klass()->as_array_klass()->element_type()->basic_type();
1307 assert((compress && dst_elem == T_BYTE && (src_elem == T_BYTE || src_elem == T_CHAR)) ||
1308 (!compress && src_elem == T_BYTE && (dst_elem == T_BYTE || dst_elem == T_CHAR)),
1309 "Unsupported array types for inline_string_copy");
1310
1311 src = must_be_not_null(src, true);
1312 dst = must_be_not_null(dst, true);
1313
1314 // Convert char[] offsets to byte[] offsets
1315 bool convert_src = (compress && src_elem == T_BYTE);
1316 bool convert_dst = (!compress && dst_elem == T_BYTE);
1317 if (convert_src) {
1318 src_offset = _gvn.transform(new LShiftINode(src_offset, intcon(1)));
1319 } else if (convert_dst) {
1320 dst_offset = _gvn.transform(new LShiftINode(dst_offset, intcon(1)));
1321 }
1322
1323 // Range checks
1324 generate_string_range_check(src, src_offset, length, convert_src);
1325 generate_string_range_check(dst, dst_offset, length, convert_dst);
1326 if (stopped()) {
1327 return true;
1328 }
1329
1330 Node* src_start = array_element_address(src, src_offset, src_elem);
1331 Node* dst_start = array_element_address(dst, dst_offset, dst_elem);
1332 // 'src_start' points to src array + scaled offset
1333 // 'dst_start' points to dst array + scaled offset
1334 Node* count = nullptr;
1335 if (compress) {
1336 count = compress_string(src_start, TypeAryPtr::get_array_body_type(src_elem), dst_start, length);
1337 } else {
1338 inflate_string(src_start, dst_start, TypeAryPtr::get_array_body_type(dst_elem), length);
1339 }
1340
1341 if (alloc != nullptr) {
1342 if (alloc->maybe_set_complete(&_gvn)) {
1343 // "You break it, you buy it."
1344 InitializeNode* init = alloc->initialization();
1345 assert(init->is_complete(), "we just did this");
1346 init->set_complete_with_arraycopy();
1347 assert(dst->is_CheckCastPP(), "sanity");
1348 assert(dst->in(0)->in(0) == init, "dest pinned");
1349 }
1350 // Do not let stores that initialize this object be reordered with
1351 // a subsequent store that would make this object accessible by
1352 // other threads.
1353 // Record what AllocateNode this StoreStore protects so that
1354 // escape analysis can go from the MemBarStoreStoreNode to the
1355 // AllocateNode and eliminate the MemBarStoreStoreNode if possible
1356 // based on the escape status of the AllocateNode.
1357 insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
1358 }
1359 if (compress) {
1360 set_result(_gvn.transform(count));
1361 }
1362 clear_upper_avx();
1363
1364 return true;
1365 }
1366
1367 #ifdef _LP64
1368 #define XTOP ,top() /*additional argument*/
1369 #else //_LP64
1370 #define XTOP /*no additional argument*/
1371 #endif //_LP64
1372
1373 //------------------------inline_string_toBytesU--------------------------
1374 // public static byte[] StringUTF16.toBytes(char[] value, int off, int len)
1375 bool LibraryCallKit::inline_string_toBytesU() {
1376 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
1377 return false;
1378 }
1379 // Get the arguments.
1380 Node* value = argument(0);
1381 Node* offset = argument(1);
1382 Node* length = argument(2);
1383
1384 Node* newcopy = nullptr;
1385
1386 // Set the original stack and the reexecute bit for the interpreter to reexecute
1387 // the bytecode that invokes StringUTF16.toBytes() if deoptimization happens.
1388 { PreserveReexecuteState preexecs(this);
1389 jvms()->set_should_reexecute(true);
1390
1391 // Check if a null path was taken unconditionally.
1392 value = null_check(value);
1393
1394 RegionNode* bailout = new RegionNode(1);
1395 record_for_igvn(bailout);
1396
1397 // Range checks
1398 generate_negative_guard(offset, bailout);
1399 generate_negative_guard(length, bailout);
1400 generate_limit_guard(offset, length, load_array_length(value), bailout);
1401 // Make sure that resulting byte[] length does not overflow Integer.MAX_VALUE
1402 generate_limit_guard(length, intcon(0), intcon(max_jint/2), bailout);
1403
1404 if (bailout->req() > 1) {
1405 PreserveJVMState pjvms(this);
1406 set_control(_gvn.transform(bailout));
1407 uncommon_trap(Deoptimization::Reason_intrinsic,
1408 Deoptimization::Action_maybe_recompile);
1409 }
1410 if (stopped()) {
1411 return true;
1412 }
1413
1414 Node* size = _gvn.transform(new LShiftINode(length, intcon(1)));
1415 Node* klass_node = makecon(TypeKlassPtr::make(ciTypeArrayKlass::make(T_BYTE)));
1416 newcopy = new_array(klass_node, size, 0); // no arguments to push
1417 AllocateArrayNode* alloc = tightly_coupled_allocation(newcopy);
1418 guarantee(alloc != nullptr, "created above");
1419
1420 // Calculate starting addresses.
1421 Node* src_start = array_element_address(value, offset, T_CHAR);
1422 Node* dst_start = basic_plus_adr(newcopy, arrayOopDesc::base_offset_in_bytes(T_BYTE));
1423
1424 // Check if src array address is aligned to HeapWordSize (dst is always aligned)
1425 const TypeInt* toffset = gvn().type(offset)->is_int();
1426 bool aligned = toffset->is_con() && ((toffset->get_con() * type2aelembytes(T_CHAR)) % HeapWordSize == 0);
1427
1428 // Figure out which arraycopy runtime method to call (disjoint, uninitialized).
1429 const char* copyfunc_name = "arraycopy";
1430 address copyfunc_addr = StubRoutines::select_arraycopy_function(T_CHAR, aligned, true, copyfunc_name, true);
1431 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
1432 OptoRuntime::fast_arraycopy_Type(),
1433 copyfunc_addr, copyfunc_name, TypeRawPtr::BOTTOM,
1434 src_start, dst_start, ConvI2X(length) XTOP);
1435 // Do not let reads from the cloned object float above the arraycopy.
1436 if (alloc->maybe_set_complete(&_gvn)) {
1437 // "You break it, you buy it."
1438 InitializeNode* init = alloc->initialization();
1439 assert(init->is_complete(), "we just did this");
1440 init->set_complete_with_arraycopy();
1441 assert(newcopy->is_CheckCastPP(), "sanity");
1442 assert(newcopy->in(0)->in(0) == init, "dest pinned");
1443 }
1444 // Do not let stores that initialize this object be reordered with
1445 // a subsequent store that would make this object accessible by
1446 // other threads.
1447 // Record what AllocateNode this StoreStore protects so that
1448 // escape analysis can go from the MemBarStoreStoreNode to the
1449 // AllocateNode and eliminate the MemBarStoreStoreNode if possible
1450 // based on the escape status of the AllocateNode.
1451 insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
1452 } // original reexecute is set back here
1453
1454 C->set_has_split_ifs(true); // Has chance for split-if optimization
1455 if (!stopped()) {
1456 set_result(newcopy);
1457 }
1458 clear_upper_avx();
1459
1460 return true;
1461 }
1462
1463 //------------------------inline_string_getCharsU--------------------------
1464 // public void StringUTF16.getChars(byte[] src, int srcBegin, int srcEnd, char dst[], int dstBegin)
1465 bool LibraryCallKit::inline_string_getCharsU() {
1466 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
1467 return false;
1468 }
1469
1470 // Get the arguments.
1471 Node* src = argument(0);
1472 Node* src_begin = argument(1);
1473 Node* src_end = argument(2); // exclusive offset (i < src_end)
1474 Node* dst = argument(3);
1475 Node* dst_begin = argument(4);
1476
1477 // Check for allocation before we add nodes that would confuse
1478 // tightly_coupled_allocation()
1479 AllocateArrayNode* alloc = tightly_coupled_allocation(dst);
1480
1481 // Check if a null path was taken unconditionally.
1482 src = null_check(src);
1483 dst = null_check(dst);
1484 if (stopped()) {
1485 return true;
1486 }
1487
1488 // Get length and convert char[] offset to byte[] offset
1489 Node* length = _gvn.transform(new SubINode(src_end, src_begin));
1490 src_begin = _gvn.transform(new LShiftINode(src_begin, intcon(1)));
1491
1492 // Range checks
1493 generate_string_range_check(src, src_begin, length, true);
1494 generate_string_range_check(dst, dst_begin, length, false);
1495 if (stopped()) {
1496 return true;
1497 }
1498
1499 if (!stopped()) {
1500 // Calculate starting addresses.
1501 Node* src_start = array_element_address(src, src_begin, T_BYTE);
1502 Node* dst_start = array_element_address(dst, dst_begin, T_CHAR);
1503
1504 // Check if array addresses are aligned to HeapWordSize
1505 const TypeInt* tsrc = gvn().type(src_begin)->is_int();
1506 const TypeInt* tdst = gvn().type(dst_begin)->is_int();
1507 bool aligned = tsrc->is_con() && ((tsrc->get_con() * type2aelembytes(T_BYTE)) % HeapWordSize == 0) &&
1508 tdst->is_con() && ((tdst->get_con() * type2aelembytes(T_CHAR)) % HeapWordSize == 0);
1509
1510 // Figure out which arraycopy runtime method to call (disjoint, uninitialized).
1511 const char* copyfunc_name = "arraycopy";
1512 address copyfunc_addr = StubRoutines::select_arraycopy_function(T_CHAR, aligned, true, copyfunc_name, true);
1513 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
1514 OptoRuntime::fast_arraycopy_Type(),
1515 copyfunc_addr, copyfunc_name, TypeRawPtr::BOTTOM,
1516 src_start, dst_start, ConvI2X(length) XTOP);
1517 // Do not let reads from the cloned object float above the arraycopy.
1518 if (alloc != nullptr) {
1519 if (alloc->maybe_set_complete(&_gvn)) {
1520 // "You break it, you buy it."
1521 InitializeNode* init = alloc->initialization();
1522 assert(init->is_complete(), "we just did this");
1523 init->set_complete_with_arraycopy();
1524 assert(dst->is_CheckCastPP(), "sanity");
1525 assert(dst->in(0)->in(0) == init, "dest pinned");
1526 }
1527 // Do not let stores that initialize this object be reordered with
1528 // a subsequent store that would make this object accessible by
1529 // other threads.
1530 // Record what AllocateNode this StoreStore protects so that
1531 // escape analysis can go from the MemBarStoreStoreNode to the
1532 // AllocateNode and eliminate the MemBarStoreStoreNode if possible
1533 // based on the escape status of the AllocateNode.
1534 insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
1535 } else {
1536 insert_mem_bar(Op_MemBarCPUOrder);
1537 }
1538 }
1539
1540 C->set_has_split_ifs(true); // Has chance for split-if optimization
1541 return true;
1542 }
1543
1544 //----------------------inline_string_char_access----------------------------
1545 // Store/Load char to/from byte[] array.
1546 // static void StringUTF16.putChar(byte[] val, int index, int c)
1547 // static char StringUTF16.getChar(byte[] val, int index)
1548 bool LibraryCallKit::inline_string_char_access(bool is_store) {
1549 Node* value = argument(0);
1550 Node* index = argument(1);
1551 Node* ch = is_store ? argument(2) : nullptr;
1552
1553 // This intrinsic accesses byte[] array as char[] array. Computing the offsets
1554 // correctly requires matched array shapes.
1555 assert (arrayOopDesc::base_offset_in_bytes(T_CHAR) == arrayOopDesc::base_offset_in_bytes(T_BYTE),
1556 "sanity: byte[] and char[] bases agree");
1557 assert (type2aelembytes(T_CHAR) == type2aelembytes(T_BYTE)*2,
1558 "sanity: byte[] and char[] scales agree");
1559
1560 // Bail when getChar over constants is requested: constant folding would
1561 // reject folding mismatched char access over byte[]. A normal inlining for getChar
1562 // Java method would constant fold nicely instead.
1563 if (!is_store && value->is_Con() && index->is_Con()) {
1564 return false;
1565 }
1566
1567 // Save state and restore on bailout
1568 uint old_sp = sp();
1569 SafePointNode* old_map = clone_map();
1570
1571 value = must_be_not_null(value, true);
1572
1573 Node* adr = array_element_address(value, index, T_CHAR);
1574 if (adr->is_top()) {
1575 set_map(old_map);
1576 set_sp(old_sp);
1577 return false;
1578 }
1579 destruct_map_clone(old_map);
1580 if (is_store) {
1581 access_store_at(value, adr, TypeAryPtr::BYTES, ch, TypeInt::CHAR, T_CHAR, IN_HEAP | MO_UNORDERED | C2_MISMATCHED);
1582 } else {
1583 ch = access_load_at(value, adr, TypeAryPtr::BYTES, TypeInt::CHAR, T_CHAR, IN_HEAP | MO_UNORDERED | C2_MISMATCHED | C2_CONTROL_DEPENDENT_LOAD | C2_UNKNOWN_CONTROL_LOAD);
1584 set_result(ch);
1585 }
1586 return true;
1587 }
1588
1589 //--------------------------round_double_node--------------------------------
1590 // Round a double node if necessary.
1591 Node* LibraryCallKit::round_double_node(Node* n) {
1592 if (Matcher::strict_fp_requires_explicit_rounding) {
1593 #ifdef IA32
1594 if (UseSSE < 2) {
1595 n = _gvn.transform(new RoundDoubleNode(nullptr, n));
1596 }
1597 #else
1598 Unimplemented();
1599 #endif // IA32
1600 }
1601 return n;
1602 }
1603
1604 //------------------------------inline_math-----------------------------------
1605 // public static double Math.abs(double)
1606 // public static double Math.sqrt(double)
1607 // public static double Math.log(double)
1608 // public static double Math.log10(double)
1609 bool LibraryCallKit::inline_double_math(vmIntrinsics::ID id) {
1610 Node* arg = round_double_node(argument(0));
1611 Node* n = nullptr;
1612 switch (id) {
1613 case vmIntrinsics::_dabs: n = new AbsDNode( arg); break;
1614 case vmIntrinsics::_dsqrt: n = new SqrtDNode(C, control(), arg); break;
1615 case vmIntrinsics::_ceil: n = RoundDoubleModeNode::make(_gvn, arg, RoundDoubleModeNode::rmode_ceil); break;
1616 case vmIntrinsics::_floor: n = RoundDoubleModeNode::make(_gvn, arg, RoundDoubleModeNode::rmode_floor); break;
1617 case vmIntrinsics::_rint: n = RoundDoubleModeNode::make(_gvn, arg, RoundDoubleModeNode::rmode_rint); break;
1618 case vmIntrinsics::_dcopySign: n = CopySignDNode::make(_gvn, arg, round_double_node(argument(2))); break;
1619 case vmIntrinsics::_dsignum: n = SignumDNode::make(_gvn, arg); break;
1620 default: fatal_unexpected_iid(id); break;
1621 }
1622 set_result(_gvn.transform(n));
1623 return true;
1624 }
1625
1626 //------------------------------inline_math-----------------------------------
1627 // public static float Math.abs(float)
1628 // public static int Math.abs(int)
1629 // public static long Math.abs(long)
1630 bool LibraryCallKit::inline_math(vmIntrinsics::ID id) {
1631 Node* arg = argument(0);
1632 Node* n = nullptr;
1633 switch (id) {
1634 case vmIntrinsics::_fabs: n = new AbsFNode( arg); break;
1635 case vmIntrinsics::_iabs: n = new AbsINode( arg); break;
1636 case vmIntrinsics::_labs: n = new AbsLNode( arg); break;
1637 case vmIntrinsics::_fcopySign: n = new CopySignFNode(arg, argument(1)); break;
1638 case vmIntrinsics::_fsignum: n = SignumFNode::make(_gvn, arg); break;
1639 default: fatal_unexpected_iid(id); break;
1640 }
1641 set_result(_gvn.transform(n));
1642 return true;
1643 }
1644
1645 //------------------------------runtime_math-----------------------------
1646 bool LibraryCallKit::runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName) {
1647 assert(call_type == OptoRuntime::Math_DD_D_Type() || call_type == OptoRuntime::Math_D_D_Type(),
1648 "must be (DD)D or (D)D type");
1649
1650 // Inputs
1651 Node* a = round_double_node(argument(0));
1652 Node* b = (call_type == OptoRuntime::Math_DD_D_Type()) ? round_double_node(argument(2)) : nullptr;
1653
1654 const TypePtr* no_memory_effects = nullptr;
1655 Node* trig = make_runtime_call(RC_LEAF, call_type, funcAddr, funcName,
1656 no_memory_effects,
1657 a, top(), b, b ? top() : nullptr);
1658 Node* value = _gvn.transform(new ProjNode(trig, TypeFunc::Parms+0));
1659 #ifdef ASSERT
1660 Node* value_top = _gvn.transform(new ProjNode(trig, TypeFunc::Parms+1));
1661 assert(value_top == top(), "second value must be top");
1662 #endif
1663
1664 set_result(value);
1665 return true;
1666 }
1667
1668 //------------------------------inline_math_pow-----------------------------
1669 bool LibraryCallKit::inline_math_pow() {
1670 Node* exp = round_double_node(argument(2));
1671 const TypeD* d = _gvn.type(exp)->isa_double_constant();
1672 if (d != nullptr) {
1673 if (d->getd() == 2.0) {
1674 // Special case: pow(x, 2.0) => x * x
1675 Node* base = round_double_node(argument(0));
1676 set_result(_gvn.transform(new MulDNode(base, base)));
1677 return true;
1678 } else if (d->getd() == 0.5 && Matcher::match_rule_supported(Op_SqrtD)) {
1679 // Special case: pow(x, 0.5) => sqrt(x)
1680 Node* base = round_double_node(argument(0));
1681 Node* zero = _gvn.zerocon(T_DOUBLE);
1682
1683 RegionNode* region = new RegionNode(3);
1684 Node* phi = new PhiNode(region, Type::DOUBLE);
1685
1686 Node* cmp = _gvn.transform(new CmpDNode(base, zero));
1687 // According to the API specs, pow(-0.0, 0.5) = 0.0 and sqrt(-0.0) = -0.0.
1688 // So pow(-0.0, 0.5) shouldn't be replaced with sqrt(-0.0).
1689 // -0.0/+0.0 are both excluded since floating-point comparison doesn't distinguish -0.0 from +0.0.
1690 Node* test = _gvn.transform(new BoolNode(cmp, BoolTest::le));
1691
1692 Node* if_pow = generate_slow_guard(test, nullptr);
1693 Node* value_sqrt = _gvn.transform(new SqrtDNode(C, control(), base));
1694 phi->init_req(1, value_sqrt);
1695 region->init_req(1, control());
1696
1697 if (if_pow != nullptr) {
1698 set_control(if_pow);
1699 address target = StubRoutines::dpow() != nullptr ? StubRoutines::dpow() :
1700 CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
1701 const TypePtr* no_memory_effects = nullptr;
1702 Node* trig = make_runtime_call(RC_LEAF, OptoRuntime::Math_DD_D_Type(), target, "POW",
1703 no_memory_effects, base, top(), exp, top());
1704 Node* value_pow = _gvn.transform(new ProjNode(trig, TypeFunc::Parms+0));
1705 #ifdef ASSERT
1706 Node* value_top = _gvn.transform(new ProjNode(trig, TypeFunc::Parms+1));
1707 assert(value_top == top(), "second value must be top");
1708 #endif
1709 phi->init_req(2, value_pow);
1710 region->init_req(2, _gvn.transform(new ProjNode(trig, TypeFunc::Control)));
1711 }
1712
1713 C->set_has_split_ifs(true); // Has chance for split-if optimization
1714 set_control(_gvn.transform(region));
1715 record_for_igvn(region);
1716 set_result(_gvn.transform(phi));
1717
1718 return true;
1719 }
1720 }
1721
1722 return StubRoutines::dpow() != nullptr ?
1723 runtime_math(OptoRuntime::Math_DD_D_Type(), StubRoutines::dpow(), "dpow") :
1724 runtime_math(OptoRuntime::Math_DD_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dpow), "POW");
1725 }
1726
1727 //------------------------------inline_math_native-----------------------------
1728 bool LibraryCallKit::inline_math_native(vmIntrinsics::ID id) {
1729 #define FN_PTR(f) CAST_FROM_FN_PTR(address, f)
1730 switch (id) {
1731 // These intrinsics are not properly supported on all hardware
1732 case vmIntrinsics::_dsin:
1733 return StubRoutines::dsin() != nullptr ?
1734 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dsin(), "dsin") :
1735 runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dsin), "SIN");
1736 case vmIntrinsics::_dcos:
1737 return StubRoutines::dcos() != nullptr ?
1738 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dcos(), "dcos") :
1739 runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dcos), "COS");
1740 case vmIntrinsics::_dtan:
1741 return StubRoutines::dtan() != nullptr ?
1742 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dtan(), "dtan") :
1743 runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dtan), "TAN");
1744 case vmIntrinsics::_dlog:
1745 return StubRoutines::dlog() != nullptr ?
1746 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dlog(), "dlog") :
1747 runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dlog), "LOG");
1748 case vmIntrinsics::_dlog10:
1749 return StubRoutines::dlog10() != nullptr ?
1750 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dlog10(), "dlog10") :
1751 runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dlog10), "LOG10");
1752
1753 // These intrinsics are supported on all hardware
1754 case vmIntrinsics::_ceil:
1755 case vmIntrinsics::_floor:
1756 case vmIntrinsics::_rint: return Matcher::match_rule_supported(Op_RoundDoubleMode) ? inline_double_math(id) : false;
1757 case vmIntrinsics::_dsqrt: return Matcher::match_rule_supported(Op_SqrtD) ? inline_double_math(id) : false;
1758 case vmIntrinsics::_dabs: return Matcher::has_match_rule(Op_AbsD) ? inline_double_math(id) : false;
1759 case vmIntrinsics::_fabs: return Matcher::match_rule_supported(Op_AbsF) ? inline_math(id) : false;
1760 case vmIntrinsics::_iabs: return Matcher::match_rule_supported(Op_AbsI) ? inline_math(id) : false;
1761 case vmIntrinsics::_labs: return Matcher::match_rule_supported(Op_AbsL) ? inline_math(id) : false;
1762
1763 case vmIntrinsics::_dexp:
1764 return StubRoutines::dexp() != nullptr ?
1765 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dexp(), "dexp") :
1766 runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dexp), "EXP");
1767 #undef FN_PTR
1768
1769 case vmIntrinsics::_dpow: return inline_math_pow();
1770 case vmIntrinsics::_dcopySign: return inline_double_math(id);
1771 case vmIntrinsics::_fcopySign: return inline_math(id);
1772 case vmIntrinsics::_dsignum: return Matcher::match_rule_supported(Op_SignumD) ? inline_double_math(id) : false;
1773 case vmIntrinsics::_fsignum: return Matcher::match_rule_supported(Op_SignumF) ? inline_math(id) : false;
1774
1775 // These intrinsics are not yet correctly implemented
1776 case vmIntrinsics::_datan2:
1777 return false;
1778
1779 default:
1780 fatal_unexpected_iid(id);
1781 return false;
1782 }
1783 }
1784
1785 static bool is_simple_name(Node* n) {
1786 return (n->req() == 1 // constant
1787 || (n->is_Type() && n->as_Type()->type()->singleton())
1788 || n->is_Proj() // parameter or return value
1789 || n->is_Phi() // local of some sort
1790 );
1791 }
1792
1793 //----------------------------inline_notify-----------------------------------*
1794 bool LibraryCallKit::inline_notify(vmIntrinsics::ID id) {
1795 const TypeFunc* ftype = OptoRuntime::monitor_notify_Type();
1796 address func;
1797 if (id == vmIntrinsics::_notify) {
1798 func = OptoRuntime::monitor_notify_Java();
1799 } else {
1800 func = OptoRuntime::monitor_notifyAll_Java();
1801 }
1802 Node* call = make_runtime_call(RC_NO_LEAF, ftype, func, nullptr, TypeRawPtr::BOTTOM, argument(0));
1803 make_slow_call_ex(call, env()->Throwable_klass(), false);
1804 return true;
1805 }
1806
1807
1808 //----------------------------inline_min_max-----------------------------------
1809 bool LibraryCallKit::inline_min_max(vmIntrinsics::ID id) {
1810 set_result(generate_min_max(id, argument(0), argument(1)));
1811 return true;
1812 }
1813
1814 void LibraryCallKit::inline_math_mathExact(Node* math, Node *test) {
1815 Node* bol = _gvn.transform( new BoolNode(test, BoolTest::overflow) );
1816 IfNode* check = create_and_map_if(control(), bol, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN);
1817 Node* fast_path = _gvn.transform( new IfFalseNode(check));
1818 Node* slow_path = _gvn.transform( new IfTrueNode(check) );
1819
1820 {
1821 PreserveJVMState pjvms(this);
1822 PreserveReexecuteState preexecs(this);
1823 jvms()->set_should_reexecute(true);
1824
1825 set_control(slow_path);
1826 set_i_o(i_o());
1827
1828 uncommon_trap(Deoptimization::Reason_intrinsic,
1829 Deoptimization::Action_none);
1830 }
1831
1832 set_control(fast_path);
1833 set_result(math);
1834 }
1835
1836 template <typename OverflowOp>
1837 bool LibraryCallKit::inline_math_overflow(Node* arg1, Node* arg2) {
1838 typedef typename OverflowOp::MathOp MathOp;
1839
1840 MathOp* mathOp = new MathOp(arg1, arg2);
1841 Node* operation = _gvn.transform( mathOp );
1842 Node* ofcheck = _gvn.transform( new OverflowOp(arg1, arg2) );
1843 inline_math_mathExact(operation, ofcheck);
1844 return true;
1845 }
1846
1847 bool LibraryCallKit::inline_math_addExactI(bool is_increment) {
1848 return inline_math_overflow<OverflowAddINode>(argument(0), is_increment ? intcon(1) : argument(1));
1849 }
1850
1851 bool LibraryCallKit::inline_math_addExactL(bool is_increment) {
1852 return inline_math_overflow<OverflowAddLNode>(argument(0), is_increment ? longcon(1) : argument(2));
1853 }
1854
1855 bool LibraryCallKit::inline_math_subtractExactI(bool is_decrement) {
1856 return inline_math_overflow<OverflowSubINode>(argument(0), is_decrement ? intcon(1) : argument(1));
1857 }
1858
1859 bool LibraryCallKit::inline_math_subtractExactL(bool is_decrement) {
1860 return inline_math_overflow<OverflowSubLNode>(argument(0), is_decrement ? longcon(1) : argument(2));
1861 }
1862
1863 bool LibraryCallKit::inline_math_negateExactI() {
1864 return inline_math_overflow<OverflowSubINode>(intcon(0), argument(0));
1865 }
1866
1867 bool LibraryCallKit::inline_math_negateExactL() {
1868 return inline_math_overflow<OverflowSubLNode>(longcon(0), argument(0));
1869 }
1870
1871 bool LibraryCallKit::inline_math_multiplyExactI() {
1872 return inline_math_overflow<OverflowMulINode>(argument(0), argument(1));
1873 }
1874
1875 bool LibraryCallKit::inline_math_multiplyExactL() {
1876 return inline_math_overflow<OverflowMulLNode>(argument(0), argument(2));
1877 }
1878
1879 bool LibraryCallKit::inline_math_multiplyHigh() {
1880 set_result(_gvn.transform(new MulHiLNode(argument(0), argument(2))));
1881 return true;
1882 }
1883
1884 Node*
1885 LibraryCallKit::generate_min_max(vmIntrinsics::ID id, Node* x0, Node* y0) {
1886 // These are the candidate return value:
1887 Node* xvalue = x0;
1888 Node* yvalue = y0;
1889
1890 if (xvalue == yvalue) {
1891 return xvalue;
1892 }
1893
1894 bool want_max = (id == vmIntrinsics::_max);
1895
1896 const TypeInt* txvalue = _gvn.type(xvalue)->isa_int();
1897 const TypeInt* tyvalue = _gvn.type(yvalue)->isa_int();
1898 if (txvalue == NULL || tyvalue == NULL) return top();
1899 // This is not really necessary, but it is consistent with a
1900 // hypothetical MaxINode::Value method:
1901 int widen = MAX2(txvalue->_widen, tyvalue->_widen);
1902
1903 // %%% This folding logic should (ideally) be in a different place.
1904 // Some should be inside IfNode, and there to be a more reliable
1905 // transformation of ?: style patterns into cmoves. We also want
1906 // more powerful optimizations around cmove and min/max.
1907
1908 // Try to find a dominating comparison of these guys.
1909 // It can simplify the index computation for Arrays.copyOf
1910 // and similar uses of System.arraycopy.
1911 // First, compute the normalized version of CmpI(x, y).
1912 int cmp_op = Op_CmpI;
1913 Node* xkey = xvalue;
1914 Node* ykey = yvalue;
1915 Node* ideal_cmpxy = _gvn.transform(new CmpINode(xkey, ykey));
1916 if (ideal_cmpxy->is_Cmp()) {
1917 // E.g., if we have CmpI(length - offset, count),
1918 // it might idealize to CmpI(length, count + offset)
1919 cmp_op = ideal_cmpxy->Opcode();
1920 xkey = ideal_cmpxy->in(1);
1921 ykey = ideal_cmpxy->in(2);
1922 }
1923
1924 // Start by locating any relevant comparisons.
1925 Node* start_from = (xkey->outcnt() < ykey->outcnt()) ? xkey : ykey;
1926 Node* cmpxy = NULL;
1927 Node* cmpyx = NULL;
1928 for (DUIterator_Fast kmax, k = start_from->fast_outs(kmax); k < kmax; k++) {
1929 Node* cmp = start_from->fast_out(k);
1930 if (cmp->outcnt() > 0 && // must have prior uses
1931 cmp->in(0) == NULL && // must be context-independent
1932 cmp->Opcode() == cmp_op) { // right kind of compare
1933 if (cmp->in(1) == xkey && cmp->in(2) == ykey) cmpxy = cmp;
1934 if (cmp->in(1) == ykey && cmp->in(2) == xkey) cmpyx = cmp;
1935 }
1936 }
1937
1938 const int NCMPS = 2;
1939 Node* cmps[NCMPS] = { cmpxy, cmpyx };
1940 int cmpn;
1941 for (cmpn = 0; cmpn < NCMPS; cmpn++) {
1942 if (cmps[cmpn] != NULL) break; // find a result
1943 }
1944 if (cmpn < NCMPS) {
1945 // Look for a dominating test that tells us the min and max.
1946 int depth = 0; // Limit search depth for speed
1947 Node* dom = control();
1948 for (; dom != NULL; dom = IfNode::up_one_dom(dom, true)) {
1949 if (++depth >= 100) break;
1950 Node* ifproj = dom;
1951 if (!ifproj->is_Proj()) continue;
1952 Node* iff = ifproj->in(0);
1953 if (!iff->is_If()) continue;
1954 Node* bol = iff->in(1);
1955 if (!bol->is_Bool()) continue;
1956 Node* cmp = bol->in(1);
1957 if (cmp == NULL) continue;
1958 for (cmpn = 0; cmpn < NCMPS; cmpn++)
1959 if (cmps[cmpn] == cmp) break;
1960 if (cmpn == NCMPS) continue;
1961 BoolTest::mask btest = bol->as_Bool()->_test._test;
1962 if (ifproj->is_IfFalse()) btest = BoolTest(btest).negate();
1963 if (cmp->in(1) == ykey) btest = BoolTest(btest).commute();
1964 // At this point, we know that 'x btest y' is true.
1965 switch (btest) {
1966 case BoolTest::eq:
1967 // They are proven equal, so we can collapse the min/max.
1968 // Either value is the answer. Choose the simpler.
1969 if (is_simple_name(yvalue) && !is_simple_name(xvalue))
1970 return yvalue;
1971 return xvalue;
1972 case BoolTest::lt: // x < y
1973 case BoolTest::le: // x <= y
1974 return (want_max ? yvalue : xvalue);
1975 case BoolTest::gt: // x > y
1976 case BoolTest::ge: // x >= y
1977 return (want_max ? xvalue : yvalue);
1978 default:
1979 break;
1980 }
1981 }
1982 }
1983
1984 // We failed to find a dominating test.
1985 // Let's pick a test that might GVN with prior tests.
1986 Node* best_bol = NULL;
1987 BoolTest::mask best_btest = BoolTest::illegal;
1988 for (cmpn = 0; cmpn < NCMPS; cmpn++) {
1989 Node* cmp = cmps[cmpn];
1990 if (cmp == NULL) continue;
1991 for (DUIterator_Fast jmax, j = cmp->fast_outs(jmax); j < jmax; j++) {
1992 Node* bol = cmp->fast_out(j);
1993 if (!bol->is_Bool()) continue;
1994 BoolTest::mask btest = bol->as_Bool()->_test._test;
1995 if (btest == BoolTest::eq || btest == BoolTest::ne) continue;
1996 if (cmp->in(1) == ykey) btest = BoolTest(btest).commute();
1997 if (bol->outcnt() > (best_bol == NULL ? 0 : best_bol->outcnt())) {
1998 best_bol = bol->as_Bool();
1999 best_btest = btest;
2000 }
2001 }
2002 }
2003
2004 Node* answer_if_true = NULL;
2005 Node* answer_if_false = NULL;
2006 switch (best_btest) {
2007 default:
2008 if (cmpxy == NULL)
2009 cmpxy = ideal_cmpxy;
2010 best_bol = _gvn.transform(new BoolNode(cmpxy, BoolTest::lt));
2011 // and fall through:
2012 case BoolTest::lt: // x < y
2013 case BoolTest::le: // x <= y
2014 answer_if_true = (want_max ? yvalue : xvalue);
2015 answer_if_false = (want_max ? xvalue : yvalue);
2016 break;
2017 case BoolTest::gt: // x > y
2018 case BoolTest::ge: // x >= y
2019 answer_if_true = (want_max ? xvalue : yvalue);
2020 answer_if_false = (want_max ? yvalue : xvalue);
2021 break;
2022 }
2023
2024 jint hi, lo;
2025 if (want_max) {
2026 // We can sharpen the minimum.
2027 hi = MAX2(txvalue->_hi, tyvalue->_hi);
2028 lo = MAX2(txvalue->_lo, tyvalue->_lo);
2029 } else {
2030 // We can sharpen the maximum.
2031 hi = MIN2(txvalue->_hi, tyvalue->_hi);
2032 lo = MIN2(txvalue->_lo, tyvalue->_lo);
2033 }
2034
2035 // Use a flow-free graph structure, to avoid creating excess control edges
2036 // which could hinder other optimizations.
2037 // Since Math.min/max is often used with arraycopy, we want
2038 // tightly_coupled_allocation to be able to see beyond min/max expressions.
2039 Node* cmov = CMoveNode::make(NULL, best_bol,
2040 answer_if_false, answer_if_true,
2041 TypeInt::make(lo, hi, widen));
2042
2043 return _gvn.transform(cmov);
2044
2045 /*
2046 // This is not as desirable as it may seem, since Min and Max
2047 // nodes do not have a full set of optimizations.
2048 // And they would interfere, anyway, with 'if' optimizations
2049 // and with CMoveI canonical forms.
2050 switch (id) {
2051 case vmIntrinsics::_min:
2052 result_val = _gvn.transform(new (C, 3) MinINode(x,y)); break;
2053 case vmIntrinsics::_max:
2054 result_val = _gvn.transform(new (C, 3) MaxINode(x,y)); break;
2055 default:
2056 ShouldNotReachHere();
2057 }
2058 */
2059 }
2060
2061 inline int
2062 LibraryCallKit::classify_unsafe_addr(Node* &base, Node* &offset, BasicType type) {
2063 const TypePtr* base_type = TypePtr::NULL_PTR;
2064 if (base != nullptr) base_type = _gvn.type(base)->isa_ptr();
2065 if (base_type == nullptr) {
2066 // Unknown type.
2067 return Type::AnyPtr;
2068 } else if (base_type == TypePtr::NULL_PTR) {
2069 // Since this is a null+long form, we have to switch to a rawptr.
2070 base = _gvn.transform(new CastX2PNode(offset));
2071 offset = MakeConX(0);
2072 return Type::RawPtr;
2073 } else if (base_type->base() == Type::RawPtr) {
2074 return Type::RawPtr;
2075 } else if (base_type->isa_oopptr()) {
2076 // Base is never null => always a heap address.
2077 if (!TypePtr::NULL_PTR->higher_equal(base_type)) {
2078 return Type::OopPtr;
2079 }
2080 // Offset is small => always a heap address.
2081 const TypeX* offset_type = _gvn.type(offset)->isa_intptr_t();
2082 if (offset_type != nullptr &&
2083 base_type->offset() == 0 && // (should always be?)
2084 offset_type->_lo >= 0 &&
2085 !MacroAssembler::needs_explicit_null_check(offset_type->_hi)) {
2086 return Type::OopPtr;
2087 } else if (type == T_OBJECT) {
2088 // off heap access to an oop doesn't make any sense. Has to be on
2089 // heap.
2090 return Type::OopPtr;
2091 }
2092 // Otherwise, it might either be oop+off or null+addr.
2093 return Type::AnyPtr;
2094 } else {
2095 // No information:
2096 return Type::AnyPtr;
2097 }
2098 }
2099
2100 Node* LibraryCallKit::make_unsafe_address(Node*& base, Node* offset, BasicType type, bool can_cast) {
2101 Node* uncasted_base = base;
2102 int kind = classify_unsafe_addr(uncasted_base, offset, type);
2103 if (kind == Type::RawPtr) {
2104 return basic_plus_adr(top(), uncasted_base, offset);
2105 } else if (kind == Type::AnyPtr) {
2106 assert(base == uncasted_base, "unexpected base change");
2107 if (can_cast) {
2108 if (!_gvn.type(base)->speculative_maybe_null() &&
2109 !too_many_traps(Deoptimization::Reason_speculate_null_check)) {
2110 // According to profiling, this access is always on
2111 // heap. Casting the base to not null and thus avoiding membars
2112 // around the access should allow better optimizations
2113 Node* null_ctl = top();
2114 base = null_check_oop(base, &null_ctl, true, true, true);
2115 assert(null_ctl->is_top(), "no null control here");
2116 return basic_plus_adr(base, offset);
2117 } else if (_gvn.type(base)->speculative_always_null() &&
2118 !too_many_traps(Deoptimization::Reason_speculate_null_assert)) {
2119 // According to profiling, this access is always off
2120 // heap.
2121 base = null_assert(base);
2122 Node* raw_base = _gvn.transform(new CastX2PNode(offset));
2123 offset = MakeConX(0);
2124 return basic_plus_adr(top(), raw_base, offset);
2125 }
2126 }
2127 // We don't know if it's an on heap or off heap access. Fall back
2128 // to raw memory access.
2129 Node* raw = _gvn.transform(new CheckCastPPNode(control(), base, TypeRawPtr::BOTTOM));
2130 return basic_plus_adr(top(), raw, offset);
2131 } else {
2132 assert(base == uncasted_base, "unexpected base change");
2133 // We know it's an on heap access so base can't be null
2134 if (TypePtr::NULL_PTR->higher_equal(_gvn.type(base))) {
2135 base = must_be_not_null(base, true);
2136 }
2137 return basic_plus_adr(base, offset);
2138 }
2139 }
2140
2141 //--------------------------inline_number_methods-----------------------------
2142 // inline int Integer.numberOfLeadingZeros(int)
2143 // inline int Long.numberOfLeadingZeros(long)
2144 //
2145 // inline int Integer.numberOfTrailingZeros(int)
2146 // inline int Long.numberOfTrailingZeros(long)
2147 //
2148 // inline int Integer.bitCount(int)
2149 // inline int Long.bitCount(long)
2150 //
2151 // inline char Character.reverseBytes(char)
2152 // inline short Short.reverseBytes(short)
2153 // inline int Integer.reverseBytes(int)
2154 // inline long Long.reverseBytes(long)
2155 bool LibraryCallKit::inline_number_methods(vmIntrinsics::ID id) {
2156 Node* arg = argument(0);
2157 Node* n = nullptr;
2158 switch (id) {
2159 case vmIntrinsics::_numberOfLeadingZeros_i: n = new CountLeadingZerosINode( arg); break;
2160 case vmIntrinsics::_numberOfLeadingZeros_l: n = new CountLeadingZerosLNode( arg); break;
2161 case vmIntrinsics::_numberOfTrailingZeros_i: n = new CountTrailingZerosINode(arg); break;
2162 case vmIntrinsics::_numberOfTrailingZeros_l: n = new CountTrailingZerosLNode(arg); break;
2163 case vmIntrinsics::_bitCount_i: n = new PopCountINode( arg); break;
2164 case vmIntrinsics::_bitCount_l: n = new PopCountLNode( arg); break;
2165 case vmIntrinsics::_reverseBytes_c: n = new ReverseBytesUSNode(0, arg); break;
2166 case vmIntrinsics::_reverseBytes_s: n = new ReverseBytesSNode( 0, arg); break;
2167 case vmIntrinsics::_reverseBytes_i: n = new ReverseBytesINode( 0, arg); break;
2168 case vmIntrinsics::_reverseBytes_l: n = new ReverseBytesLNode( 0, arg); break;
2169 default: fatal_unexpected_iid(id); break;
2170 }
2171 set_result(_gvn.transform(n));
2172 return true;
2173 }
2174
2175 //----------------------------inline_unsafe_access----------------------------
2176
2177 const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type) {
2178 // Attempt to infer a sharper value type from the offset and base type.
2179 ciKlass* sharpened_klass = nullptr;
2180
2181 // See if it is an instance field, with an object type.
2182 if (alias_type->field() != nullptr) {
2183 if (alias_type->field()->type()->is_klass()) {
2184 sharpened_klass = alias_type->field()->type()->as_klass();
2185 }
2186 }
2187
2188 // See if it is a narrow oop array.
2189 if (adr_type->isa_aryptr()) {
2190 if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
2191 const TypeOopPtr *elem_type = adr_type->is_aryptr()->elem()->isa_oopptr();
2192 if (elem_type != nullptr) {
2193 sharpened_klass = elem_type->klass();
2194 }
2195 }
2196 }
2197
2198 // The sharpened class might be unloaded if there is no class loader
2199 // contraint in place.
2200 if (sharpened_klass != nullptr && sharpened_klass->is_loaded()) {
2201 const TypeOopPtr* tjp = TypeOopPtr::make_from_klass(sharpened_klass);
2202
2203 #ifndef PRODUCT
2204 if (C->print_intrinsics() || C->print_inlining()) {
2205 tty->print(" from base type: "); adr_type->dump(); tty->cr();
2206 tty->print(" sharpened value: "); tjp->dump(); tty->cr();
2207 }
2208 #endif
2209 // Sharpen the value type.
2210 return tjp;
2211 }
2212 return nullptr;
2213 }
2214
2215 DecoratorSet LibraryCallKit::mo_decorator_for_access_kind(AccessKind kind) {
2216 switch (kind) {
2217 case Relaxed:
2218 return MO_UNORDERED;
2219 case Opaque:
2220 return MO_RELAXED;
2221 case Acquire:
2222 return MO_ACQUIRE;
2223 case Release:
2224 return MO_RELEASE;
2225 case Volatile:
2226 return MO_SEQ_CST;
2227 default:
2228 ShouldNotReachHere();
2229 return 0;
2230 }
2231 }
2232
2233 bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, const AccessKind kind, const bool unaligned) {
2234 if (callee()->is_static()) return false; // caller must have the capability!
2235 DecoratorSet decorators = C2_UNSAFE_ACCESS;
2236 guarantee(!is_store || kind != Acquire, "Acquire accesses can be produced only for loads");
2237 guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
2238 assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
2239
2240 if (is_reference_type(type)) {
2241 decorators |= ON_UNKNOWN_OOP_REF;
2242 }
2243
2244 if (unaligned) {
2245 decorators |= C2_UNALIGNED;
2246 }
2247
2248 #ifndef PRODUCT
2249 {
2250 ResourceMark rm;
2251 // Check the signatures.
2252 ciSignature* sig = callee()->signature();
2253 #ifdef ASSERT
2254 if (!is_store) {
2255 // Object getReference(Object base, int/long offset), etc.
2256 BasicType rtype = sig->return_type()->basic_type();
2257 assert(rtype == type, "getter must return the expected value");
2258 assert(sig->count() == 2, "oop getter has 2 arguments");
2259 assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2260 assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2261 } else {
2262 // void putReference(Object base, int/long offset, Object x), etc.
2263 assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
2264 assert(sig->count() == 3, "oop putter has 3 arguments");
2265 assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
2266 assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
2267 BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
2268 assert(vtype == type, "putter must accept the expected value");
2269 }
2270 #endif // ASSERT
2271 }
2272 #endif //PRODUCT
2273
2274 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
2275
2276 Node* receiver = argument(0); // type: oop
2277
2278 // Build address expression.
2279 Node* heap_base_oop = top();
2280
2281 // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2282 Node* base = argument(1); // type: oop
2283 // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2284 Node* offset = argument(2); // type: long
2285 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2286 // to be plain byte offsets, which are also the same as those accepted
2287 // by oopDesc::field_addr.
2288 assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2289 "fieldOffset must be byte-scaled");
2290 // 32-bit machines ignore the high half!
2291 offset = ConvL2X(offset);
2292
2293 // Save state and restore on bailout
2294 uint old_sp = sp();
2295 SafePointNode* old_map = clone_map();
2296
2297 Node* adr = make_unsafe_address(base, offset, type, kind == Relaxed);
2298
2299 if (_gvn.type(base)->isa_ptr() == TypePtr::NULL_PTR) {
2300 if (type != T_OBJECT) {
2301 decorators |= IN_NATIVE; // off-heap primitive access
2302 } else {
2303 set_map(old_map);
2304 set_sp(old_sp);
2305 return false; // off-heap oop accesses are not supported
2306 }
2307 } else {
2308 heap_base_oop = base; // on-heap or mixed access
2309 }
2310
2311 // Can base be null? Otherwise, always on-heap access.
2312 bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(base));
2313
2314 if (!can_access_non_heap) {
2315 decorators |= IN_HEAP;
2316 }
2317
2318 Node* val = is_store ? argument(4) : nullptr;
2319
2320 const TypePtr* adr_type = _gvn.type(adr)->isa_ptr();
2321 if (adr_type == TypePtr::NULL_PTR) {
2322 set_map(old_map);
2323 set_sp(old_sp);
2324 return false; // off-heap access with zero address
2325 }
2326
2327 // Try to categorize the address.
2328 Compile::AliasType* alias_type = C->alias_type(adr_type);
2329 assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2330
2331 if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2332 alias_type->adr_type() == TypeAryPtr::RANGE) {
2333 set_map(old_map);
2334 set_sp(old_sp);
2335 return false; // not supported
2336 }
2337
2338 bool mismatched = false;
2339 BasicType bt = alias_type->basic_type();
2340 if (bt != T_ILLEGAL) {
2341 assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
2342 if (bt == T_BYTE && adr_type->isa_aryptr()) {
2343 // Alias type doesn't differentiate between byte[] and boolean[]).
2344 // Use address type to get the element type.
2345 bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2346 }
2347 if (bt == T_ARRAY || bt == T_NARROWOOP) {
2348 // accessing an array field with getReference is not a mismatch
2349 bt = T_OBJECT;
2350 }
2351 if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2352 // Don't intrinsify mismatched object accesses
2353 set_map(old_map);
2354 set_sp(old_sp);
2355 return false;
2356 }
2357 mismatched = (bt != type);
2358 } else if (alias_type->adr_type()->isa_oopptr()) {
2359 mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
2360 }
2361
2362 destruct_map_clone(old_map);
2363 assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2364
2365 if (mismatched) {
2366 decorators |= C2_MISMATCHED;
2367 }
2368
2369 // First guess at the value type.
2370 const Type *value_type = Type::get_const_basic_type(type);
2371
2372 // Figure out the memory ordering.
2373 decorators |= mo_decorator_for_access_kind(kind);
2374
2375 if (!is_store && type == T_OBJECT) {
2376 const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2377 if (tjp != nullptr) {
2378 value_type = tjp;
2379 }
2380 }
2381
2382 receiver = null_check(receiver);
2383 if (stopped()) {
2384 return true;
2385 }
2386 // Heap pointers get a null-check from the interpreter,
2387 // as a courtesy. However, this is not guaranteed by Unsafe,
2388 // and it is not possible to fully distinguish unintended nulls
2389 // from intended ones in this API.
2390
2391 if (!is_store) {
2392 Node* p = nullptr;
2393 // Try to constant fold a load from a constant field
2394 ciField* field = alias_type->field();
2395 if (heap_base_oop != top() && field != nullptr && field->is_constant() && !mismatched) {
2396 // final or stable field
2397 p = make_constant_from_field(field, heap_base_oop);
2398 }
2399
2400 if (p == nullptr) { // Could not constant fold the load
2401 p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);
2402 // Normalize the value returned by getBoolean in the following cases
2403 if (type == T_BOOLEAN &&
2404 (mismatched ||
2405 heap_base_oop == top() || // - heap_base_oop is null or
2406 (can_access_non_heap && field == nullptr)) // - heap_base_oop is potentially null
2407 // and the unsafe access is made to large offset
2408 // (i.e., larger than the maximum offset necessary for any
2409 // field access)
2410 ) {
2411 IdealKit ideal = IdealKit(this);
2412 #define __ ideal.
2413 IdealVariable normalized_result(ideal);
2414 __ declarations_done();
2415 __ set(normalized_result, p);
2416 __ if_then(p, BoolTest::ne, ideal.ConI(0));
2417 __ set(normalized_result, ideal.ConI(1));
2418 ideal.end_if();
2419 final_sync(ideal);
2420 p = __ value(normalized_result);
2421 #undef __
2422 }
2423 }
2424 if (type == T_ADDRESS) {
2425 p = gvn().transform(new CastP2XNode(nullptr, p));
2426 p = ConvX2UL(p);
2427 }
2428 // The load node has the control of the preceding MemBarCPUOrder. All
2429 // following nodes will have the control of the MemBarCPUOrder inserted at
2430 // the end of this method. So, pushing the load onto the stack at a later
2431 // point is fine.
2432 set_result(p);
2433 } else {
2434 if (bt == T_ADDRESS) {
2435 // Repackage the long as a pointer.
2436 val = ConvL2X(val);
2437 val = gvn().transform(new CastX2PNode(val));
2438 }
2439 access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators);
2440 }
2441
2442 return true;
2443 }
2444
2445 //----------------------------inline_unsafe_load_store----------------------------
2446 // This method serves a couple of different customers (depending on LoadStoreKind):
2447 //
2448 // LS_cmp_swap:
2449 //
2450 // boolean compareAndSetReference(Object o, long offset, Object expected, Object x);
2451 // boolean compareAndSetInt( Object o, long offset, int expected, int x);
2452 // boolean compareAndSetLong( Object o, long offset, long expected, long x);
2453 //
2454 // LS_cmp_swap_weak:
2455 //
2456 // boolean weakCompareAndSetReference( Object o, long offset, Object expected, Object x);
2457 // boolean weakCompareAndSetReferencePlain( Object o, long offset, Object expected, Object x);
2458 // boolean weakCompareAndSetReferenceAcquire(Object o, long offset, Object expected, Object x);
2459 // boolean weakCompareAndSetReferenceRelease(Object o, long offset, Object expected, Object x);
2460 //
2461 // boolean weakCompareAndSetInt( Object o, long offset, int expected, int x);
2462 // boolean weakCompareAndSetIntPlain( Object o, long offset, int expected, int x);
2463 // boolean weakCompareAndSetIntAcquire( Object o, long offset, int expected, int x);
2464 // boolean weakCompareAndSetIntRelease( Object o, long offset, int expected, int x);
2465 //
2466 // boolean weakCompareAndSetLong( Object o, long offset, long expected, long x);
2467 // boolean weakCompareAndSetLongPlain( Object o, long offset, long expected, long x);
2468 // boolean weakCompareAndSetLongAcquire( Object o, long offset, long expected, long x);
2469 // boolean weakCompareAndSetLongRelease( Object o, long offset, long expected, long x);
2470 //
2471 // LS_cmp_exchange:
2472 //
2473 // Object compareAndExchangeReferenceVolatile(Object o, long offset, Object expected, Object x);
2474 // Object compareAndExchangeReferenceAcquire( Object o, long offset, Object expected, Object x);
2475 // Object compareAndExchangeReferenceRelease( Object o, long offset, Object expected, Object x);
2476 //
2477 // Object compareAndExchangeIntVolatile( Object o, long offset, Object expected, Object x);
2478 // Object compareAndExchangeIntAcquire( Object o, long offset, Object expected, Object x);
2479 // Object compareAndExchangeIntRelease( Object o, long offset, Object expected, Object x);
2480 //
2481 // Object compareAndExchangeLongVolatile( Object o, long offset, Object expected, Object x);
2482 // Object compareAndExchangeLongAcquire( Object o, long offset, Object expected, Object x);
2483 // Object compareAndExchangeLongRelease( Object o, long offset, Object expected, Object x);
2484 //
2485 // LS_get_add:
2486 //
2487 // int getAndAddInt( Object o, long offset, int delta)
2488 // long getAndAddLong(Object o, long offset, long delta)
2489 //
2490 // LS_get_set:
2491 //
2492 // int getAndSet(Object o, long offset, int newValue)
2493 // long getAndSet(Object o, long offset, long newValue)
2494 // Object getAndSet(Object o, long offset, Object newValue)
2495 //
2496 bool LibraryCallKit::inline_unsafe_load_store(const BasicType type, const LoadStoreKind kind, const AccessKind access_kind) {
2497 // This basic scheme here is the same as inline_unsafe_access, but
2498 // differs in enough details that combining them would make the code
2499 // overly confusing. (This is a true fact! I originally combined
2500 // them, but even I was confused by it!) As much code/comments as
2501 // possible are retained from inline_unsafe_access though to make
2502 // the correspondences clearer. - dl
2503
2504 if (callee()->is_static()) return false; // caller must have the capability!
2505
2506 DecoratorSet decorators = C2_UNSAFE_ACCESS;
2507 decorators |= mo_decorator_for_access_kind(access_kind);
2508
2509 #ifndef PRODUCT
2510 BasicType rtype;
2511 {
2512 ResourceMark rm;
2513 // Check the signatures.
2514 ciSignature* sig = callee()->signature();
2515 rtype = sig->return_type()->basic_type();
2516 switch(kind) {
2517 case LS_get_add:
2518 case LS_get_set: {
2519 // Check the signatures.
2520 #ifdef ASSERT
2521 assert(rtype == type, "get and set must return the expected type");
2522 assert(sig->count() == 3, "get and set has 3 arguments");
2523 assert(sig->type_at(0)->basic_type() == T_OBJECT, "get and set base is object");
2524 assert(sig->type_at(1)->basic_type() == T_LONG, "get and set offset is long");
2525 assert(sig->type_at(2)->basic_type() == type, "get and set must take expected type as new value/delta");
2526 assert(access_kind == Volatile, "mo is not passed to intrinsic nodes in current implementation");
2527 #endif // ASSERT
2528 break;
2529 }
2530 case LS_cmp_swap:
2531 case LS_cmp_swap_weak: {
2532 // Check the signatures.
2533 #ifdef ASSERT
2534 assert(rtype == T_BOOLEAN, "CAS must return boolean");
2535 assert(sig->count() == 4, "CAS has 4 arguments");
2536 assert(sig->type_at(0)->basic_type() == T_OBJECT, "CAS base is object");
2537 assert(sig->type_at(1)->basic_type() == T_LONG, "CAS offset is long");
2538 #endif // ASSERT
2539 break;
2540 }
2541 case LS_cmp_exchange: {
2542 // Check the signatures.
2543 #ifdef ASSERT
2544 assert(rtype == type, "CAS must return the expected type");
2545 assert(sig->count() == 4, "CAS has 4 arguments");
2546 assert(sig->type_at(0)->basic_type() == T_OBJECT, "CAS base is object");
2547 assert(sig->type_at(1)->basic_type() == T_LONG, "CAS offset is long");
2548 #endif // ASSERT
2549 break;
2550 }
2551 default:
2552 ShouldNotReachHere();
2553 }
2554 }
2555 #endif //PRODUCT
2556
2557 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
2558
2559 // Get arguments:
2560 Node* receiver = nullptr;
2561 Node* base = nullptr;
2562 Node* offset = nullptr;
2563 Node* oldval = nullptr;
2564 Node* newval = nullptr;
2565 switch(kind) {
2566 case LS_cmp_swap:
2567 case LS_cmp_swap_weak:
2568 case LS_cmp_exchange: {
2569 const bool two_slot_type = type2size[type] == 2;
2570 receiver = argument(0); // type: oop
2571 base = argument(1); // type: oop
2572 offset = argument(2); // type: long
2573 oldval = argument(4); // type: oop, int, or long
2574 newval = argument(two_slot_type ? 6 : 5); // type: oop, int, or long
2575 break;
2576 }
2577 case LS_get_add:
2578 case LS_get_set: {
2579 receiver = argument(0); // type: oop
2580 base = argument(1); // type: oop
2581 offset = argument(2); // type: long
2582 oldval = nullptr;
2583 newval = argument(4); // type: oop, int, or long
2584 break;
2585 }
2586 default:
2587 ShouldNotReachHere();
2588 }
2589
2590 // Build field offset expression.
2591 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2592 // to be plain byte offsets, which are also the same as those accepted
2593 // by oopDesc::field_addr.
2594 assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
2595 // 32-bit machines ignore the high half of long offsets
2596 offset = ConvL2X(offset);
2597 // Save state and restore on bailout
2598 uint old_sp = sp();
2599 SafePointNode* old_map = clone_map();
2600 Node* adr = make_unsafe_address(base, offset,type, false);
2601 const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2602
2603 Compile::AliasType* alias_type = C->alias_type(adr_type);
2604 BasicType bt = alias_type->basic_type();
2605 if (bt != T_ILLEGAL &&
2606 (is_reference_type(bt) != (type == T_OBJECT))) {
2607 // Don't intrinsify mismatched object accesses.
2608 set_map(old_map);
2609 set_sp(old_sp);
2610 return false;
2611 }
2612
2613 destruct_map_clone(old_map);
2614
2615 // For CAS, unlike inline_unsafe_access, there seems no point in
2616 // trying to refine types. Just use the coarse types here.
2617 assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2618 const Type *value_type = Type::get_const_basic_type(type);
2619
2620 switch (kind) {
2621 case LS_get_set:
2622 case LS_cmp_exchange: {
2623 if (type == T_OBJECT) {
2624 const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2625 if (tjp != nullptr) {
2626 value_type = tjp;
2627 }
2628 }
2629 break;
2630 }
2631 case LS_cmp_swap:
2632 case LS_cmp_swap_weak:
2633 case LS_get_add:
2634 break;
2635 default:
2636 ShouldNotReachHere();
2637 }
2638
2639 // Null check receiver.
2640 receiver = null_check(receiver);
2641 if (stopped()) {
2642 return true;
2643 }
2644
2645 int alias_idx = C->get_alias_index(adr_type);
2646
2647 if (is_reference_type(type)) {
2648 decorators |= IN_HEAP | ON_UNKNOWN_OOP_REF;
2649
2650 // Transformation of a value which could be null pointer (CastPP #null)
2651 // could be delayed during Parse (for example, in adjust_map_after_if()).
2652 // Execute transformation here to avoid barrier generation in such case.
2653 if (_gvn.type(newval) == TypePtr::NULL_PTR)
2654 newval = _gvn.makecon(TypePtr::NULL_PTR);
2655
2656 if (oldval != nullptr && _gvn.type(oldval) == TypePtr::NULL_PTR) {
2657 // Refine the value to a null constant, when it is known to be null
2658 oldval = _gvn.makecon(TypePtr::NULL_PTR);
2659 }
2660 }
2661
2662 Node* result = nullptr;
2663 switch (kind) {
2664 case LS_cmp_exchange: {
2665 result = access_atomic_cmpxchg_val_at(base, adr, adr_type, alias_idx,
2666 oldval, newval, value_type, type, decorators);
2667 break;
2668 }
2669 case LS_cmp_swap_weak:
2670 decorators |= C2_WEAK_CMPXCHG;
2671 case LS_cmp_swap: {
2672 result = access_atomic_cmpxchg_bool_at(base, adr, adr_type, alias_idx,
2673 oldval, newval, value_type, type, decorators);
2674 break;
2675 }
2676 case LS_get_set: {
2677 result = access_atomic_xchg_at(base, adr, adr_type, alias_idx,
2678 newval, value_type, type, decorators);
2679 break;
2680 }
2681 case LS_get_add: {
2682 result = access_atomic_add_at(base, adr, adr_type, alias_idx,
2683 newval, value_type, type, decorators);
2684 break;
2685 }
2686 default:
2687 ShouldNotReachHere();
2688 }
2689
2690 assert(type2size[result->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
2691 set_result(result);
2692 return true;
2693 }
2694
2695 bool LibraryCallKit::inline_unsafe_fence(vmIntrinsics::ID id) {
2696 // Regardless of form, don't allow previous ld/st to move down,
2697 // then issue acquire, release, or volatile mem_bar.
2698 insert_mem_bar(Op_MemBarCPUOrder);
2699 switch(id) {
2700 case vmIntrinsics::_loadFence:
2701 insert_mem_bar(Op_LoadFence);
2702 return true;
2703 case vmIntrinsics::_storeFence:
2704 insert_mem_bar(Op_StoreFence);
2705 return true;
2706 case vmIntrinsics::_storeStoreFence:
2707 insert_mem_bar(Op_StoreStoreFence);
2708 return true;
2709 case vmIntrinsics::_fullFence:
2710 insert_mem_bar(Op_MemBarVolatile);
2711 return true;
2712 default:
2713 fatal_unexpected_iid(id);
2714 return false;
2715 }
2716 }
2717
2718 bool LibraryCallKit::inline_onspinwait() {
2719 insert_mem_bar(Op_OnSpinWait);
2720 return true;
2721 }
2722
2723 bool LibraryCallKit::klass_needs_init_guard(Node* kls) {
2724 if (!kls->is_Con()) {
2725 return true;
2726 }
2727 const TypeKlassPtr* klsptr = kls->bottom_type()->isa_klassptr();
2728 if (klsptr == nullptr) {
2729 return true;
2730 }
2731 ciInstanceKlass* ik = klsptr->klass()->as_instance_klass();
2732 // don't need a guard for a klass that is already initialized
2733 return !ik->is_initialized();
2734 }
2735
2736 //----------------------------inline_unsafe_writeback0-------------------------
2737 // public native void Unsafe.writeback0(long address)
2738 bool LibraryCallKit::inline_unsafe_writeback0() {
2739 if (!Matcher::has_match_rule(Op_CacheWB)) {
2740 return false;
2741 }
2742 #ifndef PRODUCT
2743 assert(Matcher::has_match_rule(Op_CacheWBPreSync), "found match rule for CacheWB but not CacheWBPreSync");
2744 assert(Matcher::has_match_rule(Op_CacheWBPostSync), "found match rule for CacheWB but not CacheWBPostSync");
2745 ciSignature* sig = callee()->signature();
2746 assert(sig->type_at(0)->basic_type() == T_LONG, "Unsafe_writeback0 address is long!");
2747 #endif
2748 null_check_receiver(); // null-check, then ignore
2749 Node *addr = argument(1);
2750 addr = new CastX2PNode(addr);
2751 addr = _gvn.transform(addr);
2752 Node *flush = new CacheWBNode(control(), memory(TypeRawPtr::BOTTOM), addr);
2753 flush = _gvn.transform(flush);
2754 set_memory(flush, TypeRawPtr::BOTTOM);
2755 return true;
2756 }
2757
2758 //----------------------------inline_unsafe_writeback0-------------------------
2759 // public native void Unsafe.writeback0(long address)
2760 bool LibraryCallKit::inline_unsafe_writebackSync0(bool is_pre) {
2761 if (is_pre && !Matcher::has_match_rule(Op_CacheWBPreSync)) {
2762 return false;
2763 }
2764 if (!is_pre && !Matcher::has_match_rule(Op_CacheWBPostSync)) {
2765 return false;
2766 }
2767 #ifndef PRODUCT
2768 assert(Matcher::has_match_rule(Op_CacheWB),
2769 (is_pre ? "found match rule for CacheWBPreSync but not CacheWB"
2770 : "found match rule for CacheWBPostSync but not CacheWB"));
2771
2772 #endif
2773 null_check_receiver(); // null-check, then ignore
2774 Node *sync;
2775 if (is_pre) {
2776 sync = new CacheWBPreSyncNode(control(), memory(TypeRawPtr::BOTTOM));
2777 } else {
2778 sync = new CacheWBPostSyncNode(control(), memory(TypeRawPtr::BOTTOM));
2779 }
2780 sync = _gvn.transform(sync);
2781 set_memory(sync, TypeRawPtr::BOTTOM);
2782 return true;
2783 }
2784
2785 //----------------------------inline_unsafe_allocate---------------------------
2786 // public native Object Unsafe.allocateInstance(Class<?> cls);
2787 bool LibraryCallKit::inline_unsafe_allocate() {
2788 if (callee()->is_static()) return false; // caller must have the capability!
2789
2790 null_check_receiver(); // null-check, then ignore
2791 Node* cls = null_check(argument(1));
2792 if (stopped()) return true;
2793
2794 Node* kls = load_klass_from_mirror(cls, false, nullptr, 0);
2795 kls = null_check(kls);
2796 if (stopped()) return true; // argument was like int.class
2797
2798 Node* test = nullptr;
2799 if (LibraryCallKit::klass_needs_init_guard(kls)) {
2800 // Note: The argument might still be an illegal value like
2801 // Serializable.class or Object[].class. The runtime will handle it.
2802 // But we must make an explicit check for initialization.
2803 Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset()));
2804 // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
2805 // can generate code to load it as unsigned byte.
2806 Node* inst = make_load(nullptr, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered);
2807 Node* bits = intcon(InstanceKlass::fully_initialized);
2808 test = _gvn.transform(new SubINode(inst, bits));
2809 // The 'test' is non-zero if we need to take a slow path.
2810 }
2811
2812 Node* obj = new_instance(kls, test);
2813 set_result(obj);
2814 return true;
2815 }
2816
2817 //------------------------inline_native_time_funcs--------------
2818 // inline code for System.currentTimeMillis() and System.nanoTime()
2819 // these have the same type and signature
2820 bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
2821 const TypeFunc* tf = OptoRuntime::void_long_Type();
2822 const TypePtr* no_memory_effects = nullptr;
2823 Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
2824 Node* value = _gvn.transform(new ProjNode(time, TypeFunc::Parms+0));
2825 #ifdef ASSERT
2826 Node* value_top = _gvn.transform(new ProjNode(time, TypeFunc::Parms+1));
2827 assert(value_top == top(), "second value must be top");
2828 #endif
2829 set_result(value);
2830 return true;
2831 }
2832
2833 #ifdef JFR_HAVE_INTRINSICS
2834
2835 /**
2836 * if oop->klass != null
2837 * // normal class
2838 * epoch = _epoch_state ? 2 : 1
2839 * if oop->klass->trace_id & ((epoch << META_SHIFT) | epoch)) != epoch {
2840 * ... // enter slow path when the klass is first recorded or the epoch of JFR shifts
2841 * }
2842 * id = oop->klass->trace_id >> TRACE_ID_SHIFT // normal class path
2843 * else
2844 * // primitive class
2845 * if oop->array_klass != null
2846 * id = (oop->array_klass->trace_id >> TRACE_ID_SHIFT) + 1 // primitive class path
2847 * else
2848 * id = LAST_TYPE_ID + 1 // void class path
2849 * if (!signaled)
2850 * signaled = true
2851 */
2852 bool LibraryCallKit::inline_native_classID() {
2853 Node* cls = argument(0);
2854
2855 IdealKit ideal(this);
2856 #define __ ideal.
2857 IdealVariable result(ideal); __ declarations_done();
2858 Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(),
2859 basic_plus_adr(cls, java_lang_Class::klass_offset()),
2860 TypeRawPtr::BOTTOM, TypeKlassPtr::OBJECT_OR_NULL));
2861
2862
2863 __ if_then(kls, BoolTest::ne, null()); {
2864 Node* kls_trace_id_addr = basic_plus_adr(kls, in_bytes(KLASS_TRACE_ID_OFFSET));
2865 Node* kls_trace_id_raw = ideal.load(ideal.ctrl(), kls_trace_id_addr,TypeLong::LONG, T_LONG, Compile::AliasIdxRaw);
2866
2867 Node* epoch_address = makecon(TypeRawPtr::make(Jfr::epoch_address()));
2868 Node* epoch = ideal.load(ideal.ctrl(), epoch_address, TypeInt::BOOL, T_BOOLEAN, Compile::AliasIdxRaw);
2869 epoch = _gvn.transform(new LShiftLNode(longcon(1), epoch));
2870 Node* mask = _gvn.transform(new LShiftLNode(epoch, intcon(META_SHIFT)));
2871 mask = _gvn.transform(new OrLNode(mask, epoch));
2872 Node* kls_trace_id_raw_and_mask = _gvn.transform(new AndLNode(kls_trace_id_raw, mask));
2873
2874 float unlikely = PROB_UNLIKELY(0.999);
2875 __ if_then(kls_trace_id_raw_and_mask, BoolTest::ne, epoch, unlikely); {
2876 sync_kit(ideal);
2877 make_runtime_call(RC_LEAF,
2878 OptoRuntime::get_class_id_intrinsic_Type(),
2879 CAST_FROM_FN_PTR(address, Jfr::get_class_id_intrinsic),
2880 "get_class_id_intrinsic",
2881 TypePtr::BOTTOM,
2882 kls);
2883 ideal.sync_kit(this);
2884 } __ end_if();
2885
2886 ideal.set(result, _gvn.transform(new URShiftLNode(kls_trace_id_raw, ideal.ConI(TRACE_ID_SHIFT))));
2887 } __ else_(); {
2888 Node* array_kls = _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(),
2889 basic_plus_adr(cls, java_lang_Class::array_klass_offset()),
2890 TypeRawPtr::BOTTOM, TypeKlassPtr::OBJECT_OR_NULL));
2891 __ if_then(array_kls, BoolTest::ne, null()); {
2892 Node* array_kls_trace_id_addr = basic_plus_adr(array_kls, in_bytes(KLASS_TRACE_ID_OFFSET));
2893 Node* array_kls_trace_id_raw = ideal.load(ideal.ctrl(), array_kls_trace_id_addr, TypeLong::LONG, T_LONG, Compile::AliasIdxRaw);
2894 Node* array_kls_trace_id = _gvn.transform(new URShiftLNode(array_kls_trace_id_raw, ideal.ConI(TRACE_ID_SHIFT)));
2895 ideal.set(result, _gvn.transform(new AddLNode(array_kls_trace_id, longcon(1))));
2896 } __ else_(); {
2897 // void class case
2898 ideal.set(result, _gvn.transform(longcon(LAST_TYPE_ID + 1)));
2899 } __ end_if();
2900
2901 Node* signaled_flag_address = makecon(TypeRawPtr::make(Jfr::signal_address()));
2902 Node* signaled = ideal.load(ideal.ctrl(), signaled_flag_address, TypeInt::BOOL, T_BOOLEAN, Compile::AliasIdxRaw, true, MemNode::acquire);
2903 __ if_then(signaled, BoolTest::ne, ideal.ConI(1)); {
2904 ideal.store(ideal.ctrl(), signaled_flag_address, ideal.ConI(1), T_BOOLEAN, Compile::AliasIdxRaw, MemNode::release, true);
2905 } __ end_if();
2906 } __ end_if();
2907
2908 final_sync(ideal);
2909 set_result(ideal.value(result));
2910 #undef __
2911 return true;
2912 }
2913
2914 bool LibraryCallKit::inline_native_getEventWriter() {
2915 Node* tls_ptr = _gvn.transform(new ThreadLocalNode());
2916
2917 Node* jobj_ptr = basic_plus_adr(top(), tls_ptr,
2918 in_bytes(THREAD_LOCAL_WRITER_OFFSET_JFR));
2919
2920 Node* jobj = make_load(control(), jobj_ptr, TypeRawPtr::BOTTOM, T_ADDRESS, MemNode::unordered);
2921
2922 Node* jobj_cmp_null = _gvn.transform( new CmpPNode(jobj, null()) );
2923 Node* test_jobj_eq_null = _gvn.transform( new BoolNode(jobj_cmp_null, BoolTest::eq) );
2924
2925 IfNode* iff_jobj_null =
2926 create_and_map_if(control(), test_jobj_eq_null, PROB_MIN, COUNT_UNKNOWN);
2927
2928 enum { _normal_path = 1,
2929 _null_path = 2,
2930 PATH_LIMIT };
2931
2932 RegionNode* result_rgn = new RegionNode(PATH_LIMIT);
2933 PhiNode* result_val = new PhiNode(result_rgn, TypeInstPtr::BOTTOM);
2934
2935 Node* jobj_is_null = _gvn.transform(new IfTrueNode(iff_jobj_null));
2936 result_rgn->init_req(_null_path, jobj_is_null);
2937 result_val->init_req(_null_path, null());
2938
2939 Node* jobj_is_not_null = _gvn.transform(new IfFalseNode(iff_jobj_null));
2940 set_control(jobj_is_not_null);
2941 Node* res = access_load(jobj, TypeInstPtr::NOTNULL, T_OBJECT,
2942 IN_NATIVE | C2_CONTROL_DEPENDENT_LOAD);
2943 result_rgn->init_req(_normal_path, control());
2944 result_val->init_req(_normal_path, res);
2945
2946 set_result(result_rgn, result_val);
2947
2948 return true;
2949 }
2950
2951 #endif // JFR_HAVE_INTRINSICS
2952
2953 //------------------------inline_native_currentThread------------------
2954 bool LibraryCallKit::inline_native_currentThread() {
2955 Node* junk = nullptr;
2956 set_result(generate_current_thread(junk));
2957 return true;
2958 }
2959
2960 //---------------------------load_mirror_from_klass----------------------------
2961 // Given a klass oop, load its java mirror (a java.lang.Class oop).
2962 Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
2963 Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
2964 Node* load = make_load(nullptr, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
2965 // mirror = ((OopHandle)mirror)->resolve();
2966 return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE);
2967 }
2968
2969 //-----------------------load_klass_from_mirror_common-------------------------
2970 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
2971 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
2972 // and branch to the given path on the region.
2973 // If never_see_null, take an uncommon trap on null, so we can optimistically
2974 // compile for the non-null case.
2975 // If the region is null, force never_see_null = true.
2976 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
2977 bool never_see_null,
2978 RegionNode* region,
2979 int null_path,
2980 int offset) {
2981 if (region == nullptr) never_see_null = true;
2982 Node* p = basic_plus_adr(mirror, offset);
2983 const TypeKlassPtr* kls_type = TypeKlassPtr::OBJECT_OR_NULL;
2984 Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
2985 Node* null_ctl = top();
2986 kls = null_check_oop(kls, &null_ctl, never_see_null);
2987 if (region != nullptr) {
2988 // Set region->in(null_path) if the mirror is a primitive (e.g, int.class).
2989 region->init_req(null_path, null_ctl);
2990 } else {
2991 assert(null_ctl == top(), "no loose ends");
2992 }
2993 return kls;
2994 }
2995
2996 //--------------------(inline_native_Class_query helpers)---------------------
2997 // Use this for JVM_ACC_INTERFACE, JVM_ACC_IS_CLONEABLE_FAST, JVM_ACC_HAS_FINALIZER.
2998 // Fall through if (mods & mask) == bits, take the guard otherwise.
2999 Node* LibraryCallKit::generate_access_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
3000 // Branch around if the given klass has the given modifier bit set.
3001 // Like generate_guard, adds a new path onto the region.
3002 Node* modp = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3003 Node* mods = make_load(nullptr, modp, TypeInt::INT, T_INT, MemNode::unordered);
3004 Node* mask = intcon(modifier_mask);
3005 Node* bits = intcon(modifier_bits);
3006 Node* mbit = _gvn.transform(new AndINode(mods, mask));
3007 Node* cmp = _gvn.transform(new CmpINode(mbit, bits));
3008 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
3009 return generate_fair_guard(bol, region);
3010 }
3011 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
3012 return generate_access_flags_guard(kls, JVM_ACC_INTERFACE, 0, region);
3013 }
3014 Node* LibraryCallKit::generate_hidden_class_guard(Node* kls, RegionNode* region) {
3015 return generate_access_flags_guard(kls, JVM_ACC_IS_HIDDEN_CLASS, 0, region);
3016 }
3017
3018 //-------------------------inline_native_Class_query-------------------
3019 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
3020 const Type* return_type = TypeInt::BOOL;
3021 Node* prim_return_value = top(); // what happens if it's a primitive class?
3022 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3023 bool expect_prim = false; // most of these guys expect to work on refs
3024
3025 enum { _normal_path = 1, _prim_path = 2, PATH_LIMIT };
3026
3027 Node* mirror = argument(0);
3028 Node* obj = top();
3029
3030 switch (id) {
3031 case vmIntrinsics::_isInstance:
3032 // nothing is an instance of a primitive type
3033 prim_return_value = intcon(0);
3034 obj = argument(1);
3035 break;
3036 case vmIntrinsics::_getModifiers:
3037 prim_return_value = intcon(JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC);
3038 assert(is_power_of_2((int)JVM_ACC_WRITTEN_FLAGS+1), "change next line");
3039 return_type = TypeInt::make(0, JVM_ACC_WRITTEN_FLAGS, Type::WidenMin);
3040 break;
3041 case vmIntrinsics::_isInterface:
3042 prim_return_value = intcon(0);
3043 break;
3044 case vmIntrinsics::_isArray:
3045 prim_return_value = intcon(0);
3046 expect_prim = true; // cf. ObjectStreamClass.getClassSignature
3047 break;
3048 case vmIntrinsics::_isPrimitive:
3049 prim_return_value = intcon(1);
3050 expect_prim = true; // obviously
3051 break;
3052 case vmIntrinsics::_isHidden:
3053 prim_return_value = intcon(0);
3054 break;
3055 case vmIntrinsics::_getSuperclass:
3056 prim_return_value = null();
3057 return_type = TypeInstPtr::MIRROR->cast_to_ptr_type(TypePtr::BotPTR);
3058 break;
3059 case vmIntrinsics::_getClassAccessFlags:
3060 prim_return_value = intcon(JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC);
3061 return_type = TypeInt::INT; // not bool! 6297094
3062 break;
3063 default:
3064 fatal_unexpected_iid(id);
3065 break;
3066 }
3067
3068 const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
3069 if (mirror_con == nullptr) return false; // cannot happen?
3070
3071 #ifndef PRODUCT
3072 if (C->print_intrinsics() || C->print_inlining()) {
3073 ciType* k = mirror_con->java_mirror_type();
3074 if (k) {
3075 tty->print("Inlining %s on constant Class ", vmIntrinsics::name_at(intrinsic_id()));
3076 k->print_name();
3077 tty->cr();
3078 }
3079 }
3080 #endif
3081
3082 // Null-check the mirror, and the mirror's klass ptr (in case it is a primitive).
3083 RegionNode* region = new RegionNode(PATH_LIMIT);
3084 record_for_igvn(region);
3085 PhiNode* phi = new PhiNode(region, return_type);
3086
3087 // The mirror will never be null of Reflection.getClassAccessFlags, however
3088 // it may be null for Class.isInstance or Class.getModifiers. Throw a NPE
3089 // if it is. See bug 4774291.
3090
3091 // For Reflection.getClassAccessFlags(), the null check occurs in
3092 // the wrong place; see inline_unsafe_access(), above, for a similar
3093 // situation.
3094 mirror = null_check(mirror);
3095 // If mirror or obj is dead, only null-path is taken.
3096 if (stopped()) return true;
3097
3098 if (expect_prim) never_see_null = false; // expect nulls (meaning prims)
3099
3100 // Now load the mirror's klass metaobject, and null-check it.
3101 // Side-effects region with the control path if the klass is null.
3102 Node* kls = load_klass_from_mirror(mirror, never_see_null, region, _prim_path);
3103 // If kls is null, we have a primitive mirror.
3104 phi->init_req(_prim_path, prim_return_value);
3105 if (stopped()) { set_result(region, phi); return true; }
3106 bool safe_for_replace = (region->in(_prim_path) == top());
3107
3108 Node* p; // handy temp
3109 Node* null_ctl;
3110
3111 // Now that we have the non-null klass, we can perform the real query.
3112 // For constant classes, the query will constant-fold in LoadNode::Value.
3113 Node* query_value = top();
3114 switch (id) {
3115 case vmIntrinsics::_isInstance:
3116 // nothing is an instance of a primitive type
3117 query_value = gen_instanceof(obj, kls, safe_for_replace);
3118 break;
3119
3120 case vmIntrinsics::_getModifiers:
3121 p = basic_plus_adr(kls, in_bytes(Klass::modifier_flags_offset()));
3122 query_value = make_load(nullptr, p, TypeInt::INT, T_INT, MemNode::unordered);
3123 break;
3124
3125 case vmIntrinsics::_isInterface:
3126 // (To verify this code sequence, check the asserts in JVM_IsInterface.)
3127 if (generate_interface_guard(kls, region) != nullptr)
3128 // A guard was added. If the guard is taken, it was an interface.
3129 phi->add_req(intcon(1));
3130 // If we fall through, it's a plain class.
3131 query_value = intcon(0);
3132 break;
3133
3134 case vmIntrinsics::_isArray:
3135 // (To verify this code sequence, check the asserts in JVM_IsArrayClass.)
3136 if (generate_array_guard(kls, region) != nullptr)
3137 // A guard was added. If the guard is taken, it was an array.
3138 phi->add_req(intcon(1));
3139 // If we fall through, it's a plain class.
3140 query_value = intcon(0);
3141 break;
3142
3143 case vmIntrinsics::_isPrimitive:
3144 query_value = intcon(0); // "normal" path produces false
3145 break;
3146
3147 case vmIntrinsics::_isHidden:
3148 // (To verify this code sequence, check the asserts in JVM_IsHiddenClass.)
3149 if (generate_hidden_class_guard(kls, region) != nullptr)
3150 // A guard was added. If the guard is taken, it was an hidden class.
3151 phi->add_req(intcon(1));
3152 // If we fall through, it's a plain class.
3153 query_value = intcon(0);
3154 break;
3155
3156
3157 case vmIntrinsics::_getSuperclass:
3158 // The rules here are somewhat unfortunate, but we can still do better
3159 // with random logic than with a JNI call.
3160 // Interfaces store null or Object as _super, but must report null.
3161 // Arrays store an intermediate super as _super, but must report Object.
3162 // Other types can report the actual _super.
3163 // (To verify this code sequence, check the asserts in JVM_IsInterface.)
3164 if (generate_interface_guard(kls, region) != nullptr)
3165 // A guard was added. If the guard is taken, it was an interface.
3166 phi->add_req(null());
3167 if (generate_array_guard(kls, region) != nullptr)
3168 // A guard was added. If the guard is taken, it was an array.
3169 phi->add_req(makecon(TypeInstPtr::make(env()->Object_klass()->java_mirror())));
3170 // If we fall through, it's a plain class. Get its _super.
3171 p = basic_plus_adr(kls, in_bytes(Klass::super_offset()));
3172 kls = _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeKlassPtr::OBJECT_OR_NULL));
3173 null_ctl = top();
3174 kls = null_check_oop(kls, &null_ctl);
3175 if (null_ctl != top()) {
3176 // If the guard is taken, Object.superClass is null (both klass and mirror).
3177 region->add_req(null_ctl);
3178 phi ->add_req(null());
3179 }
3180 if (!stopped()) {
3181 query_value = load_mirror_from_klass(kls);
3182 }
3183 break;
3184
3185 case vmIntrinsics::_getClassAccessFlags:
3186 p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3187 query_value = make_load(nullptr, p, TypeInt::INT, T_INT, MemNode::unordered);
3188 break;
3189
3190 default:
3191 fatal_unexpected_iid(id);
3192 break;
3193 }
3194
3195 // Fall-through is the normal case of a query to a real class.
3196 phi->init_req(1, query_value);
3197 region->init_req(1, control());
3198
3199 C->set_has_split_ifs(true); // Has chance for split-if optimization
3200 set_result(region, phi);
3201 return true;
3202 }
3203
3204 //-------------------------inline_Class_cast-------------------
3205 bool LibraryCallKit::inline_Class_cast() {
3206 Node* mirror = argument(0); // Class
3207 Node* obj = argument(1);
3208 const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
3209 if (mirror_con == nullptr) {
3210 return false; // dead path (mirror->is_top()).
3211 }
3212 if (obj == nullptr || obj->is_top()) {
3213 return false; // dead path
3214 }
3215 const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr();
3216
3217 // First, see if Class.cast() can be folded statically.
3218 // java_mirror_type() returns non-null for compile-time Class constants.
3219 ciType* tm = mirror_con->java_mirror_type();
3220 if (tm != nullptr && tm->is_klass() &&
3221 tp != NULL && tp->klass() != NULL) {
3222 if (!tp->klass()->is_loaded()) {
3223 // Don't use intrinsic when class is not loaded.
3224 return false;
3225 } else {
3226 int static_res = C->static_subtype_check(tm->as_klass(), tp->klass());
3227 if (static_res == Compile::SSC_always_true) {
3228 // isInstance() is true - fold the code.
3229 set_result(obj);
3230 return true;
3231 } else if (static_res == Compile::SSC_always_false) {
3232 // Don't use intrinsic, have to throw ClassCastException.
3233 // If the reference is null, the non-intrinsic bytecode will
3234 // be optimized appropriately.
3235 return false;
3236 }
3237 }
3238 }
3239
3240 // Bailout intrinsic and do normal inlining if exception path is frequent.
3241 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
3242 return false;
3243 }
3244
3245 // Generate dynamic checks.
3246 // Class.cast() is java implementation of _checkcast bytecode.
3247 // Do checkcast (Parse::do_checkcast()) optimizations here.
3248
3249 mirror = null_check(mirror);
3250 // If mirror is dead, only null-path is taken.
3251 if (stopped()) {
3252 return true;
3253 }
3254
3255 // Not-subtype or the mirror's klass ptr is null (in case it is a primitive).
3256 enum { _bad_type_path = 1, _prim_path = 2, PATH_LIMIT };
3257 RegionNode* region = new RegionNode(PATH_LIMIT);
3258 record_for_igvn(region);
3259
3260 // Now load the mirror's klass metaobject, and null-check it.
3261 // If kls is null, we have a primitive mirror and
3262 // nothing is an instance of a primitive type.
3263 Node* kls = load_klass_from_mirror(mirror, false, region, _prim_path);
3264
3265 Node* res = top();
3266 if (!stopped()) {
3267 Node* bad_type_ctrl = top();
3268 // Do checkcast optimizations.
3269 res = gen_checkcast(obj, kls, &bad_type_ctrl);
3270 region->init_req(_bad_type_path, bad_type_ctrl);
3271 }
3272 if (region->in(_prim_path) != top() ||
3273 region->in(_bad_type_path) != top()) {
3274 // Let Interpreter throw ClassCastException.
3275 PreserveJVMState pjvms(this);
3276 set_control(_gvn.transform(region));
3277 uncommon_trap(Deoptimization::Reason_intrinsic,
3278 Deoptimization::Action_maybe_recompile);
3279 }
3280 if (!stopped()) {
3281 set_result(res);
3282 }
3283 return true;
3284 }
3285
3286
3287 //--------------------------inline_native_subtype_check------------------------
3288 // This intrinsic takes the JNI calls out of the heart of
3289 // UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
3290 bool LibraryCallKit::inline_native_subtype_check() {
3291 // Pull both arguments off the stack.
3292 Node* args[2]; // two java.lang.Class mirrors: superc, subc
3293 args[0] = argument(0);
3294 args[1] = argument(1);
3295 Node* klasses[2]; // corresponding Klasses: superk, subk
3296 klasses[0] = klasses[1] = top();
3297
3298 enum {
3299 // A full decision tree on {superc is prim, subc is prim}:
3300 _prim_0_path = 1, // {P,N} => false
3301 // {P,P} & superc!=subc => false
3302 _prim_same_path, // {P,P} & superc==subc => true
3303 _prim_1_path, // {N,P} => false
3304 _ref_subtype_path, // {N,N} & subtype check wins => true
3305 _both_ref_path, // {N,N} & subtype check loses => false
3306 PATH_LIMIT
3307 };
3308
3309 RegionNode* region = new RegionNode(PATH_LIMIT);
3310 Node* phi = new PhiNode(region, TypeInt::BOOL);
3311 record_for_igvn(region);
3312
3313 const TypePtr* adr_type = TypeRawPtr::BOTTOM; // memory type of loads
3314 const TypeKlassPtr* kls_type = TypeKlassPtr::OBJECT_OR_NULL;
3315 int class_klass_offset = java_lang_Class::klass_offset();
3316
3317 // First null-check both mirrors and load each mirror's klass metaobject.
3318 int which_arg;
3319 for (which_arg = 0; which_arg <= 1; which_arg++) {
3320 Node* arg = args[which_arg];
3321 arg = null_check(arg);
3322 if (stopped()) break;
3323 args[which_arg] = arg;
3324
3325 Node* p = basic_plus_adr(arg, class_klass_offset);
3326 Node* kls = LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, adr_type, kls_type);
3327 klasses[which_arg] = _gvn.transform(kls);
3328 }
3329
3330 // Having loaded both klasses, test each for null.
3331 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3332 for (which_arg = 0; which_arg <= 1; which_arg++) {
3333 Node* kls = klasses[which_arg];
3334 Node* null_ctl = top();
3335 kls = null_check_oop(kls, &null_ctl, never_see_null);
3336 int prim_path = (which_arg == 0 ? _prim_0_path : _prim_1_path);
3337 region->init_req(prim_path, null_ctl);
3338 if (stopped()) break;
3339 klasses[which_arg] = kls;
3340 }
3341
3342 if (!stopped()) {
3343 // now we have two reference types, in klasses[0..1]
3344 Node* subk = klasses[1]; // the argument to isAssignableFrom
3345 Node* superk = klasses[0]; // the receiver
3346 region->set_req(_both_ref_path, gen_subtype_check(subk, superk));
3347 // now we have a successful reference subtype check
3348 region->set_req(_ref_subtype_path, control());
3349 }
3350
3351 // If both operands are primitive (both klasses null), then
3352 // we must return true when they are identical primitives.
3353 // It is convenient to test this after the first null klass check.
3354 set_control(region->in(_prim_0_path)); // go back to first null check
3355 if (!stopped()) {
3356 // Since superc is primitive, make a guard for the superc==subc case.
3357 Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1]));
3358 Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq));
3359 generate_guard(bol_eq, region, PROB_FAIR);
3360 if (region->req() == PATH_LIMIT+1) {
3361 // A guard was added. If the added guard is taken, superc==subc.
3362 region->swap_edges(PATH_LIMIT, _prim_same_path);
3363 region->del_req(PATH_LIMIT);
3364 }
3365 region->set_req(_prim_0_path, control()); // Not equal after all.
3366 }
3367
3368 // these are the only paths that produce 'true':
3369 phi->set_req(_prim_same_path, intcon(1));
3370 phi->set_req(_ref_subtype_path, intcon(1));
3371
3372 // pull together the cases:
3373 assert(region->req() == PATH_LIMIT, "sane region");
3374 for (uint i = 1; i < region->req(); i++) {
3375 Node* ctl = region->in(i);
3376 if (ctl == nullptr || ctl == top()) {
3377 region->set_req(i, top());
3378 phi ->set_req(i, top());
3379 } else if (phi->in(i) == nullptr) {
3380 phi->set_req(i, intcon(0)); // all other paths produce 'false'
3381 }
3382 }
3383
3384 set_control(_gvn.transform(region));
3385 set_result(_gvn.transform(phi));
3386 return true;
3387 }
3388
3389 //---------------------generate_array_guard_common------------------------
3390 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region,
3391 bool obj_array, bool not_array) {
3392
3393 if (stopped()) {
3394 return nullptr;
3395 }
3396
3397 // If obj_array/non_array==false/false:
3398 // Branch around if the given klass is in fact an array (either obj or prim).
3399 // If obj_array/non_array==false/true:
3400 // Branch around if the given klass is not an array klass of any kind.
3401 // If obj_array/non_array==true/true:
3402 // Branch around if the kls is not an oop array (kls is int[], String, etc.)
3403 // If obj_array/non_array==true/false:
3404 // Branch around if the kls is an oop array (Object[] or subtype)
3405 //
3406 // Like generate_guard, adds a new path onto the region.
3407 jint layout_con = 0;
3408 Node* layout_val = get_layout_helper(kls, layout_con);
3409 if (layout_val == nullptr) {
3410 bool query = (obj_array
3411 ? Klass::layout_helper_is_objArray(layout_con)
3412 : Klass::layout_helper_is_array(layout_con));
3413 if (query == not_array) {
3414 return nullptr; // never a branch
3415 } else { // always a branch
3416 Node* always_branch = control();
3417 if (region != nullptr)
3418 region->add_req(always_branch);
3419 set_control(top());
3420 return always_branch;
3421 }
3422 }
3423 // Now test the correct condition.
3424 jint nval = (obj_array
3425 ? (jint)(Klass::_lh_array_tag_type_value
3426 << Klass::_lh_array_tag_shift)
3427 : Klass::_lh_neutral_value);
3428 Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval)));
3429 BoolTest::mask btest = BoolTest::lt; // correct for testing is_[obj]array
3430 // invert the test if we are looking for a non-array
3431 if (not_array) btest = BoolTest(btest).negate();
3432 Node* bol = _gvn.transform(new BoolNode(cmp, btest));
3433 return generate_fair_guard(bol, region);
3434 }
3435
3436
3437 //-----------------------inline_native_newArray--------------------------
3438 // private static native Object java.lang.reflect.newArray(Class<?> componentType, int length);
3439 // private native Object Unsafe.allocateUninitializedArray0(Class<?> cls, int size);
3440 bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) {
3441 Node* mirror;
3442 Node* count_val;
3443 if (uninitialized) {
3444 mirror = argument(1);
3445 count_val = argument(2);
3446 } else {
3447 mirror = argument(0);
3448 count_val = argument(1);
3449 }
3450
3451 mirror = null_check(mirror);
3452 // If mirror or obj is dead, only null-path is taken.
3453 if (stopped()) return true;
3454
3455 enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
3456 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
3457 PhiNode* result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
3458 PhiNode* result_io = new PhiNode(result_reg, Type::ABIO);
3459 PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
3460
3461 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3462 Node* klass_node = load_array_klass_from_mirror(mirror, never_see_null,
3463 result_reg, _slow_path);
3464 Node* normal_ctl = control();
3465 Node* no_array_ctl = result_reg->in(_slow_path);
3466
3467 // Generate code for the slow case. We make a call to newArray().
3468 set_control(no_array_ctl);
3469 if (!stopped()) {
3470 // Either the input type is void.class, or else the
3471 // array klass has not yet been cached. Either the
3472 // ensuing call will throw an exception, or else it
3473 // will cache the array klass for next time.
3474 PreserveJVMState pjvms(this);
3475 CallJavaNode* slow_call = nullptr;
3476 if (uninitialized) {
3477 // Generate optimized virtual call (holder class 'Unsafe' is final)
3478 slow_call = generate_method_call(vmIntrinsics::_allocateUninitializedArray, false, false, true);
3479 } else {
3480 slow_call = generate_method_call_static(vmIntrinsics::_newArray, true);
3481 }
3482 Node* slow_result = set_results_for_java_call(slow_call);
3483 // this->control() comes from set_results_for_java_call
3484 result_reg->set_req(_slow_path, control());
3485 result_val->set_req(_slow_path, slow_result);
3486 result_io ->set_req(_slow_path, i_o());
3487 result_mem->set_req(_slow_path, reset_memory());
3488 }
3489
3490 set_control(normal_ctl);
3491 if (!stopped()) {
3492 // Normal case: The array type has been cached in the java.lang.Class.
3493 // The following call works fine even if the array type is polymorphic.
3494 // It could be a dynamic mix of int[], boolean[], Object[], etc.
3495 Node* obj = new_array(klass_node, count_val, 0); // no arguments to push
3496 result_reg->init_req(_normal_path, control());
3497 result_val->init_req(_normal_path, obj);
3498 result_io ->init_req(_normal_path, i_o());
3499 result_mem->init_req(_normal_path, reset_memory());
3500
3501 if (uninitialized) {
3502 // Mark the allocation so that zeroing is skipped
3503 AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(obj, &_gvn);
3504 alloc->maybe_set_complete(&_gvn);
3505 }
3506 }
3507
3508 // Return the combined state.
3509 set_i_o( _gvn.transform(result_io) );
3510 set_all_memory( _gvn.transform(result_mem));
3511
3512 C->set_has_split_ifs(true); // Has chance for split-if optimization
3513 set_result(result_reg, result_val);
3514 return true;
3515 }
3516
3517 //----------------------inline_native_getLength--------------------------
3518 // public static native int java.lang.reflect.Array.getLength(Object array);
3519 bool LibraryCallKit::inline_native_getLength() {
3520 if (too_many_traps(Deoptimization::Reason_intrinsic)) return false;
3521
3522 Node* array = null_check(argument(0));
3523 // If array is dead, only null-path is taken.
3524 if (stopped()) return true;
3525
3526 // Deoptimize if it is a non-array.
3527 Node* non_array = generate_non_array_guard(load_object_klass(array), nullptr);
3528
3529 if (non_array != nullptr) {
3530 PreserveJVMState pjvms(this);
3531 set_control(non_array);
3532 uncommon_trap(Deoptimization::Reason_intrinsic,
3533 Deoptimization::Action_maybe_recompile);
3534 }
3535
3536 // If control is dead, only non-array-path is taken.
3537 if (stopped()) return true;
3538
3539 // The works fine even if the array type is polymorphic.
3540 // It could be a dynamic mix of int[], boolean[], Object[], etc.
3541 Node* result = load_array_length(array);
3542
3543 C->set_has_split_ifs(true); // Has chance for split-if optimization
3544 set_result(result);
3545 return true;
3546 }
3547
3548 //------------------------inline_array_copyOf----------------------------
3549 // public static <T,U> T[] java.util.Arrays.copyOf( U[] original, int newLength, Class<? extends T[]> newType);
3550 // public static <T,U> T[] java.util.Arrays.copyOfRange(U[] original, int from, int to, Class<? extends T[]> newType);
3551 bool LibraryCallKit::inline_array_copyOf(bool is_copyOfRange) {
3552 if (too_many_traps(Deoptimization::Reason_intrinsic)) return false;
3553
3554 // Get the arguments.
3555 Node* original = argument(0);
3556 Node* start = is_copyOfRange? argument(1): intcon(0);
3557 Node* end = is_copyOfRange? argument(2): argument(1);
3558 Node* array_type_mirror = is_copyOfRange? argument(3): argument(2);
3559
3560 Node* newcopy = nullptr;
3561
3562 // Set the original stack and the reexecute bit for the interpreter to reexecute
3563 // the bytecode that invokes Arrays.copyOf if deoptimization happens.
3564 { PreserveReexecuteState preexecs(this);
3565 jvms()->set_should_reexecute(true);
3566
3567 array_type_mirror = null_check(array_type_mirror);
3568 original = null_check(original);
3569
3570 // Check if a null path was taken unconditionally.
3571 if (stopped()) return true;
3572
3573 Node* orig_length = load_array_length(original);
3574
3575 Node* klass_node = load_klass_from_mirror(array_type_mirror, false, nullptr, 0);
3576 klass_node = null_check(klass_node);
3577
3578 RegionNode* bailout = new RegionNode(1);
3579 record_for_igvn(bailout);
3580
3581 // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
3582 // Bail out if that is so.
3583 Node* not_objArray = generate_non_objArray_guard(klass_node, bailout);
3584 if (not_objArray != nullptr) {
3585 // Improve the klass node's type from the new optimistic assumption:
3586 ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
3587 const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/);
3588 Node* cast = new CastPPNode(klass_node, akls);
3589 cast->init_req(0, control());
3590 klass_node = _gvn.transform(cast);
3591 }
3592
3593 // Bail out if either start or end is negative.
3594 generate_negative_guard(start, bailout, &start);
3595 generate_negative_guard(end, bailout, &end);
3596
3597 Node* length = end;
3598 if (_gvn.type(start) != TypeInt::ZERO) {
3599 length = _gvn.transform(new SubINode(end, start));
3600 }
3601
3602 // Bail out if length is negative.
3603 // Without this the new_array would throw
3604 // NegativeArraySizeException but IllegalArgumentException is what
3605 // should be thrown
3606 generate_negative_guard(length, bailout, &length);
3607
3608 if (bailout->req() > 1) {
3609 PreserveJVMState pjvms(this);
3610 set_control(_gvn.transform(bailout));
3611 uncommon_trap(Deoptimization::Reason_intrinsic,
3612 Deoptimization::Action_maybe_recompile);
3613 }
3614
3615 if (!stopped()) {
3616 // How many elements will we copy from the original?
3617 // The answer is MinI(orig_length - start, length).
3618 Node* orig_tail = _gvn.transform(new SubINode(orig_length, start));
3619 Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
3620
3621 // Generate a direct call to the right arraycopy function(s).
3622 // We know the copy is disjoint but we might not know if the
3623 // oop stores need checking.
3624 // Extreme case: Arrays.copyOf((Integer[])x, 10, String[].class).
3625 // This will fail a store-check if x contains any non-nulls.
3626
3627 // ArrayCopyNode:Ideal may transform the ArrayCopyNode to
3628 // loads/stores but it is legal only if we're sure the
3629 // Arrays.copyOf would succeed. So we need all input arguments
3630 // to the copyOf to be validated, including that the copy to the
3631 // new array won't trigger an ArrayStoreException. That subtype
3632 // check can be optimized if we know something on the type of
3633 // the input array from type speculation.
3634 if (_gvn.type(klass_node)->singleton()) {
3635 ciKlass* subk = _gvn.type(load_object_klass(original))->is_klassptr()->klass();
3636 ciKlass* superk = _gvn.type(klass_node)->is_klassptr()->klass();
3637
3638 int test = C->static_subtype_check(superk, subk);
3639 if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
3640 const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();
3641 if (t_original->speculative_type() != nullptr) {
3642 original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true);
3643 }
3644 }
3645 }
3646
3647 bool validated = false;
3648 // Reason_class_check rather than Reason_intrinsic because we
3649 // want to intrinsify even if this traps.
3650 if (!too_many_traps(Deoptimization::Reason_class_check)) {
3651 Node* not_subtype_ctrl = gen_subtype_check(original, klass_node);
3652
3653 if (not_subtype_ctrl != top()) {
3654 PreserveJVMState pjvms(this);
3655 set_control(not_subtype_ctrl);
3656 uncommon_trap(Deoptimization::Reason_class_check,
3657 Deoptimization::Action_make_not_entrant);
3658 assert(stopped(), "Should be stopped");
3659 }
3660 validated = true;
3661 }
3662
3663 if (!stopped()) {
3664 newcopy = new_array(klass_node, length, 0); // no arguments to push
3665
3666 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true, false,
3667 load_object_klass(original), klass_node);
3668 if (!is_copyOfRange) {
3669 ac->set_copyof(validated);
3670 } else {
3671 ac->set_copyofrange(validated);
3672 }
3673 Node* n = _gvn.transform(ac);
3674 if (n == ac) {
3675 ac->connect_outputs(this);
3676 } else {
3677 assert(validated, "shouldn't transform if all arguments not validated");
3678 set_all_memory(n);
3679 }
3680 }
3681 }
3682 } // original reexecute is set back here
3683
3684 C->set_has_split_ifs(true); // Has chance for split-if optimization
3685 if (!stopped()) {
3686 set_result(newcopy);
3687 }
3688 return true;
3689 }
3690
3691
3692 //----------------------generate_virtual_guard---------------------------
3693 // Helper for hashCode and clone. Peeks inside the vtable to avoid a call.
3694 Node* LibraryCallKit::generate_virtual_guard(Node* obj_klass,
3695 RegionNode* slow_region) {
3696 ciMethod* method = callee();
3697 int vtable_index = method->vtable_index();
3698 assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
3699 "bad index %d", vtable_index);
3700 // Get the Method* out of the appropriate vtable entry.
3701 int entry_offset = in_bytes(Klass::vtable_start_offset()) +
3702 vtable_index*vtableEntry::size_in_bytes() +
3703 vtableEntry::method_offset_in_bytes();
3704 Node* entry_addr = basic_plus_adr(obj_klass, entry_offset);
3705 Node* target_call = make_load(nullptr, entry_addr, TypePtr::NOTNULL, T_ADDRESS, MemNode::unordered);
3706
3707 // Compare the target method with the expected method (e.g., Object.hashCode).
3708 const TypePtr* native_call_addr = TypeMetadataPtr::make(method);
3709
3710 Node* native_call = makecon(native_call_addr);
3711 Node* chk_native = _gvn.transform(new CmpPNode(target_call, native_call));
3712 Node* test_native = _gvn.transform(new BoolNode(chk_native, BoolTest::ne));
3713
3714 return generate_slow_guard(test_native, slow_region);
3715 }
3716
3717 //-----------------------generate_method_call----------------------------
3718 // Use generate_method_call to make a slow-call to the real
3719 // method if the fast path fails. An alternative would be to
3720 // use a stub like OptoRuntime::slow_arraycopy_Java.
3721 // This only works for expanding the current library call,
3722 // not another intrinsic. (E.g., don't use this for making an
3723 // arraycopy call inside of the copyOf intrinsic.)
3724 CallJavaNode*
3725 LibraryCallKit::generate_method_call(vmIntrinsicID method_id, bool is_virtual, bool is_static, bool res_not_null) {
3726 // When compiling the intrinsic method itself, do not use this technique.
3727 guarantee(callee() != C->method(), "cannot make slow-call to self");
3728
3729 ciMethod* method = callee();
3730 // ensure the JVMS we have will be correct for this call
3731 guarantee(method_id == method->intrinsic_id(), "must match");
3732
3733 const TypeFunc* tf = TypeFunc::make(method);
3734 if (res_not_null) {
3735 assert(tf->return_type() == T_OBJECT, "");
3736 const TypeTuple* range = tf->range();
3737 const Type** fields = TypeTuple::fields(range->cnt());
3738 fields[TypeFunc::Parms] = range->field_at(TypeFunc::Parms)->filter_speculative(TypePtr::NOTNULL);
3739 const TypeTuple* new_range = TypeTuple::make(range->cnt(), fields);
3740 tf = TypeFunc::make(tf->domain(), new_range);
3741 }
3742 CallJavaNode* slow_call;
3743 if (is_static) {
3744 assert(!is_virtual, "");
3745 slow_call = new CallStaticJavaNode(C, tf,
3746 SharedRuntime::get_resolve_static_call_stub(), method);
3747 } else if (is_virtual) {
3748 null_check_receiver();
3749 int vtable_index = Method::invalid_vtable_index;
3750 if (UseInlineCaches) {
3751 // Suppress the vtable call
3752 } else {
3753 // hashCode and clone are not a miranda methods,
3754 // so the vtable index is fixed.
3755 // No need to use the linkResolver to get it.
3756 vtable_index = method->vtable_index();
3757 assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
3758 "bad index %d", vtable_index);
3759 }
3760 slow_call = new CallDynamicJavaNode(tf,
3761 SharedRuntime::get_resolve_virtual_call_stub(),
3762 method, vtable_index);
3763 } else { // neither virtual nor static: opt_virtual
3764 null_check_receiver();
3765 slow_call = new CallStaticJavaNode(C, tf,
3766 SharedRuntime::get_resolve_opt_virtual_call_stub(), method);
3767 slow_call->set_optimized_virtual(true);
3768 }
3769 if (CallGenerator::is_inlined_method_handle_intrinsic(this->method(), bci(), callee())) {
3770 // To be able to issue a direct call (optimized virtual or virtual)
3771 // and skip a call to MH.linkTo*/invokeBasic adapter, additional information
3772 // about the method being invoked should be attached to the call site to
3773 // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
3774 slow_call->set_override_symbolic_info(true);
3775 }
3776 set_arguments_for_java_call(slow_call);
3777 set_edges_for_java_call(slow_call);
3778 return slow_call;
3779 }
3780
3781
3782 /**
3783 * Build special case code for calls to hashCode on an object. This call may
3784 * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
3785 * slightly different code.
3786 */
3787 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
3788 assert(is_static == callee()->is_static(), "correct intrinsic selection");
3789 assert(!(is_virtual && is_static), "either virtual, special, or static");
3790
3791 enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
3792
3793 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
3794 PhiNode* result_val = new PhiNode(result_reg, TypeInt::INT);
3795 PhiNode* result_io = new PhiNode(result_reg, Type::ABIO);
3796 PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
3797 Node* obj = nullptr;
3798 if (!is_static) {
3799 // Check for hashing null object
3800 obj = null_check_receiver();
3801 if (stopped()) return true; // unconditionally null
3802 result_reg->init_req(_null_path, top());
3803 result_val->init_req(_null_path, top());
3804 } else {
3805 // Do a null check, and return zero if null.
3806 // System.identityHashCode(null) == 0
3807 obj = argument(0);
3808 Node* null_ctl = top();
3809 obj = null_check_oop(obj, &null_ctl);
3810 result_reg->init_req(_null_path, null_ctl);
3811 result_val->init_req(_null_path, _gvn.intcon(0));
3812 }
3813
3814 // Unconditionally null? Then return right away.
3815 if (stopped()) {
3816 set_control( result_reg->in(_null_path));
3817 if (!stopped())
3818 set_result(result_val->in(_null_path));
3819 return true;
3820 }
3821
3822 // We only go to the fast case code if we pass a number of guards. The
3823 // paths which do not pass are accumulated in the slow_region.
3824 RegionNode* slow_region = new RegionNode(1);
3825 record_for_igvn(slow_region);
3826
3827 // If this is a virtual call, we generate a funny guard. We pull out
3828 // the vtable entry corresponding to hashCode() from the target object.
3829 // If the target method which we are calling happens to be the native
3830 // Object hashCode() method, we pass the guard. We do not need this
3831 // guard for non-virtual calls -- the caller is known to be the native
3832 // Object hashCode().
3833 if (is_virtual) {
3834 // After null check, get the object's klass.
3835 Node* obj_klass = load_object_klass(obj);
3836 generate_virtual_guard(obj_klass, slow_region);
3837 }
3838
3839 // Get the header out of the object, use LoadMarkNode when available
3840 Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
3841 // The control of the load must be null. Otherwise, the load can move before
3842 // the null check after castPP removal.
3843 Node* no_ctrl = nullptr;
3844 Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
3845
3846 // Test the header to see if it is unlocked.
3847 Node *lock_mask = _gvn.MakeConX(markWord::biased_lock_mask_in_place);
3848 Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
3849 Node *unlocked_val = _gvn.MakeConX(markWord::unlocked_value);
3850 Node *chk_unlocked = _gvn.transform(new CmpXNode( lmasked_header, unlocked_val));
3851 Node *test_unlocked = _gvn.transform(new BoolNode( chk_unlocked, BoolTest::ne));
3852
3853 generate_slow_guard(test_unlocked, slow_region);
3854
3855 // Get the hash value and check to see that it has been properly assigned.
3856 // We depend on hash_mask being at most 32 bits and avoid the use of
3857 // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
3858 // vm: see markWord.hpp.
3859 Node *hash_mask = _gvn.intcon(markWord::hash_mask);
3860 Node *hash_shift = _gvn.intcon(markWord::hash_shift);
3861 Node *hshifted_header= _gvn.transform(new URShiftXNode(header, hash_shift));
3862 // This hack lets the hash bits live anywhere in the mark object now, as long
3863 // as the shift drops the relevant bits into the low 32 bits. Note that
3864 // Java spec says that HashCode is an int so there's no point in capturing
3865 // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build).
3866 hshifted_header = ConvX2I(hshifted_header);
3867 Node *hash_val = _gvn.transform(new AndINode(hshifted_header, hash_mask));
3868
3869 Node *no_hash_val = _gvn.intcon(markWord::no_hash);
3870 Node *chk_assigned = _gvn.transform(new CmpINode( hash_val, no_hash_val));
3871 Node *test_assigned = _gvn.transform(new BoolNode( chk_assigned, BoolTest::eq));
3872
3873 generate_slow_guard(test_assigned, slow_region);
3874
3875 Node* init_mem = reset_memory();
3876 // fill in the rest of the null path:
3877 result_io ->init_req(_null_path, i_o());
3878 result_mem->init_req(_null_path, init_mem);
3879
3880 result_val->init_req(_fast_path, hash_val);
3881 result_reg->init_req(_fast_path, control());
3882 result_io ->init_req(_fast_path, i_o());
3883 result_mem->init_req(_fast_path, init_mem);
3884
3885 // Generate code for the slow case. We make a call to hashCode().
3886 set_control(_gvn.transform(slow_region));
3887 if (!stopped()) {
3888 // No need for PreserveJVMState, because we're using up the present state.
3889 set_all_memory(init_mem);
3890 vmIntrinsics::ID hashCode_id = is_static ? vmIntrinsics::_identityHashCode : vmIntrinsics::_hashCode;
3891 CallJavaNode* slow_call = generate_method_call(hashCode_id, is_virtual, is_static, false);
3892 Node* slow_result = set_results_for_java_call(slow_call);
3893 // this->control() comes from set_results_for_java_call
3894 result_reg->init_req(_slow_path, control());
3895 result_val->init_req(_slow_path, slow_result);
3896 result_io ->set_req(_slow_path, i_o());
3897 result_mem ->set_req(_slow_path, reset_memory());
3898 }
3899
3900 // Return the combined state.
3901 set_i_o( _gvn.transform(result_io) );
3902 set_all_memory( _gvn.transform(result_mem));
3903
3904 set_result(result_reg, result_val);
3905 return true;
3906 }
3907
3908 //---------------------------inline_native_getClass----------------------------
3909 // public final native Class<?> java.lang.Object.getClass();
3910 //
3911 // Build special case code for calls to getClass on an object.
3912 bool LibraryCallKit::inline_native_getClass() {
3913 Node* obj = null_check_receiver();
3914 if (stopped()) return true;
3915 set_result(load_mirror_from_klass(load_object_klass(obj)));
3916 return true;
3917 }
3918
3919 //-----------------inline_native_Reflection_getCallerClass---------------------
3920 // public static native Class<?> sun.reflect.Reflection.getCallerClass();
3921 //
3922 // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
3923 //
3924 // NOTE: This code must perform the same logic as JVM_GetCallerClass
3925 // in that it must skip particular security frames and checks for
3926 // caller sensitive methods.
3927 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
3928 #ifndef PRODUCT
3929 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
3930 tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
3931 }
3932 #endif
3933
3934 if (!jvms()->has_method()) {
3935 #ifndef PRODUCT
3936 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
3937 tty->print_cr(" Bailing out because intrinsic was inlined at top level");
3938 }
3939 #endif
3940 return false;
3941 }
3942
3943 // Walk back up the JVM state to find the caller at the required
3944 // depth.
3945 JVMState* caller_jvms = jvms();
3946
3947 // Cf. JVM_GetCallerClass
3948 // NOTE: Start the loop at depth 1 because the current JVM state does
3949 // not include the Reflection.getCallerClass() frame.
3950 for (int n = 1; caller_jvms != nullptr; caller_jvms = caller_jvms->caller(), n++) {
3951 ciMethod* m = caller_jvms->method();
3952 switch (n) {
3953 case 0:
3954 fatal("current JVM state does not include the Reflection.getCallerClass frame");
3955 break;
3956 case 1:
3957 // Frame 0 and 1 must be caller sensitive (see JVM_GetCallerClass).
3958 if (!m->caller_sensitive()) {
3959 #ifndef PRODUCT
3960 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
3961 tty->print_cr(" Bailing out: CallerSensitive annotation expected at frame %d", n);
3962 }
3963 #endif
3964 return false; // bail-out; let JVM_GetCallerClass do the work
3965 }
3966 break;
3967 default:
3968 if (!m->is_ignored_by_security_stack_walk()) {
3969 // We have reached the desired frame; return the holder class.
3970 // Acquire method holder as java.lang.Class and push as constant.
3971 ciInstanceKlass* caller_klass = caller_jvms->method()->holder();
3972 ciInstance* caller_mirror = caller_klass->java_mirror();
3973 set_result(makecon(TypeInstPtr::make(caller_mirror)));
3974
3975 #ifndef PRODUCT
3976 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
3977 tty->print_cr(" Succeeded: caller = %d) %s.%s, JVMS depth = %d", n, caller_klass->name()->as_utf8(), caller_jvms->method()->name()->as_utf8(), jvms()->depth());
3978 tty->print_cr(" JVM state at this point:");
3979 for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
3980 ciMethod* m = jvms()->of_depth(i)->method();
3981 tty->print_cr(" %d) %s.%s", n, m->holder()->name()->as_utf8(), m->name()->as_utf8());
3982 }
3983 }
3984 #endif
3985 return true;
3986 }
3987 break;
3988 }
3989 }
3990
3991 #ifndef PRODUCT
3992 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
3993 tty->print_cr(" Bailing out because caller depth exceeded inlining depth = %d", jvms()->depth());
3994 tty->print_cr(" JVM state at this point:");
3995 for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
3996 ciMethod* m = jvms()->of_depth(i)->method();
3997 tty->print_cr(" %d) %s.%s", n, m->holder()->name()->as_utf8(), m->name()->as_utf8());
3998 }
3999 }
4000 #endif
4001
4002 return false; // bail-out; let JVM_GetCallerClass do the work
4003 }
4004
4005 bool LibraryCallKit::inline_fp_conversions(vmIntrinsics::ID id) {
4006 Node* arg = argument(0);
4007 Node* result = nullptr;
4008
4009 switch (id) {
4010 case vmIntrinsics::_floatToRawIntBits: result = new MoveF2INode(arg); break;
4011 case vmIntrinsics::_intBitsToFloat: result = new MoveI2FNode(arg); break;
4012 case vmIntrinsics::_doubleToRawLongBits: result = new MoveD2LNode(arg); break;
4013 case vmIntrinsics::_longBitsToDouble: result = new MoveL2DNode(arg); break;
4014
4015 case vmIntrinsics::_doubleToLongBits: {
4016 // two paths (plus control) merge in a wood
4017 RegionNode *r = new RegionNode(3);
4018 Node *phi = new PhiNode(r, TypeLong::LONG);
4019
4020 Node *cmpisnan = _gvn.transform(new CmpDNode(arg, arg));
4021 // Build the boolean node
4022 Node *bolisnan = _gvn.transform(new BoolNode(cmpisnan, BoolTest::ne));
4023
4024 // Branch either way.
4025 // NaN case is less traveled, which makes all the difference.
4026 IfNode *ifisnan = create_and_xform_if(control(), bolisnan, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
4027 Node *opt_isnan = _gvn.transform(ifisnan);
4028 assert( opt_isnan->is_If(), "Expect an IfNode");
4029 IfNode *opt_ifisnan = (IfNode*)opt_isnan;
4030 Node *iftrue = _gvn.transform(new IfTrueNode(opt_ifisnan));
4031
4032 set_control(iftrue);
4033
4034 static const jlong nan_bits = CONST64(0x7ff8000000000000);
4035 Node *slow_result = longcon(nan_bits); // return NaN
4036 phi->init_req(1, _gvn.transform( slow_result ));
4037 r->init_req(1, iftrue);
4038
4039 // Else fall through
4040 Node *iffalse = _gvn.transform(new IfFalseNode(opt_ifisnan));
4041 set_control(iffalse);
4042
4043 phi->init_req(2, _gvn.transform(new MoveD2LNode(arg)));
4044 r->init_req(2, iffalse);
4045
4046 // Post merge
4047 set_control(_gvn.transform(r));
4048 record_for_igvn(r);
4049
4050 C->set_has_split_ifs(true); // Has chance for split-if optimization
4051 result = phi;
4052 assert(result->bottom_type()->isa_long(), "must be");
4053 break;
4054 }
4055
4056 case vmIntrinsics::_floatToIntBits: {
4057 // two paths (plus control) merge in a wood
4058 RegionNode *r = new RegionNode(3);
4059 Node *phi = new PhiNode(r, TypeInt::INT);
4060
4061 Node *cmpisnan = _gvn.transform(new CmpFNode(arg, arg));
4062 // Build the boolean node
4063 Node *bolisnan = _gvn.transform(new BoolNode(cmpisnan, BoolTest::ne));
4064
4065 // Branch either way.
4066 // NaN case is less traveled, which makes all the difference.
4067 IfNode *ifisnan = create_and_xform_if(control(), bolisnan, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
4068 Node *opt_isnan = _gvn.transform(ifisnan);
4069 assert( opt_isnan->is_If(), "Expect an IfNode");
4070 IfNode *opt_ifisnan = (IfNode*)opt_isnan;
4071 Node *iftrue = _gvn.transform(new IfTrueNode(opt_ifisnan));
4072
4073 set_control(iftrue);
4074
4075 static const jint nan_bits = 0x7fc00000;
4076 Node *slow_result = makecon(TypeInt::make(nan_bits)); // return NaN
4077 phi->init_req(1, _gvn.transform( slow_result ));
4078 r->init_req(1, iftrue);
4079
4080 // Else fall through
4081 Node *iffalse = _gvn.transform(new IfFalseNode(opt_ifisnan));
4082 set_control(iffalse);
4083
4084 phi->init_req(2, _gvn.transform(new MoveF2INode(arg)));
4085 r->init_req(2, iffalse);
4086
4087 // Post merge
4088 set_control(_gvn.transform(r));
4089 record_for_igvn(r);
4090
4091 C->set_has_split_ifs(true); // Has chance for split-if optimization
4092 result = phi;
4093 assert(result->bottom_type()->isa_int(), "must be");
4094 break;
4095 }
4096
4097 default:
4098 fatal_unexpected_iid(id);
4099 break;
4100 }
4101 set_result(_gvn.transform(result));
4102 return true;
4103 }
4104
4105 //----------------------inline_unsafe_copyMemory-------------------------
4106 // public native void Unsafe.copyMemory0(Object srcBase, long srcOffset, Object destBase, long destOffset, long bytes);
4107 bool LibraryCallKit::inline_unsafe_copyMemory() {
4108 if (callee()->is_static()) return false; // caller must have the capability!
4109 null_check_receiver(); // null-check receiver
4110 if (stopped()) return true;
4111
4112 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
4113
4114 Node* src_ptr = argument(1); // type: oop
4115 Node* src_off = ConvL2X(argument(2)); // type: long
4116 Node* dst_ptr = argument(4); // type: oop
4117 Node* dst_off = ConvL2X(argument(5)); // type: long
4118 Node* size = ConvL2X(argument(7)); // type: long
4119
4120 assert(Unsafe_field_offset_to_byte_offset(11) == 11,
4121 "fieldOffset must be byte-scaled");
4122
4123 Node* src = make_unsafe_address(src_ptr, src_off);
4124 Node* dst = make_unsafe_address(dst_ptr, dst_off);
4125
4126 // Conservatively insert a memory barrier on all memory slices.
4127 // Do not let writes of the copy source or destination float below the copy.
4128 insert_mem_bar(Op_MemBarCPUOrder);
4129
4130 Node* thread = _gvn.transform(new ThreadLocalNode());
4131 Node* doing_unsafe_access_addr = basic_plus_adr(top(), thread, in_bytes(JavaThread::doing_unsafe_access_offset()));
4132 BasicType doing_unsafe_access_bt = T_BYTE;
4133 assert((sizeof(bool) * CHAR_BIT) == 8, "not implemented");
4134
4135 // update volatile field
4136 store_to_memory(control(), doing_unsafe_access_addr, intcon(1), doing_unsafe_access_bt, Compile::AliasIdxRaw, MemNode::unordered);
4137
4138 // Call it. Note that the length argument is not scaled.
4139 make_runtime_call(RC_LEAF|RC_NO_FP,
4140 OptoRuntime::fast_arraycopy_Type(),
4141 StubRoutines::unsafe_arraycopy(),
4142 "unsafe_arraycopy",
4143 TypeRawPtr::BOTTOM,
4144 src, dst, size XTOP);
4145
4146 store_to_memory(control(), doing_unsafe_access_addr, intcon(0), doing_unsafe_access_bt, Compile::AliasIdxRaw, MemNode::unordered);
4147
4148 // Do not let reads of the copy destination float above the copy.
4149 insert_mem_bar(Op_MemBarCPUOrder);
4150
4151 return true;
4152 }
4153
4154 #undef XTOP
4155
4156 //------------------------clone_coping-----------------------------------
4157 // Helper function for inline_native_clone.
4158 void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array) {
4159 assert(obj_size != nullptr, "");
4160 Node* raw_obj = alloc_obj->in(1);
4161 assert(alloc_obj->is_CheckCastPP() && raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
4162
4163 AllocateNode* alloc = nullptr;
4164 if (ReduceBulkZeroing) {
4165 // We will be completely responsible for initializing this object -
4166 // mark Initialize node as complete.
4167 alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn);
4168 // The object was just allocated - there should be no any stores!
4169 guarantee(alloc != nullptr && alloc->maybe_set_complete(&_gvn), "");
4170 // Mark as complete_with_arraycopy so that on AllocateNode
4171 // expansion, we know this AllocateNode is initialized by an array
4172 // copy and a StoreStore barrier exists after the array copy.
4173 alloc->initialization()->set_complete_with_arraycopy();
4174 }
4175
4176 Node* size = _gvn.transform(obj_size);
4177 access_clone(obj, alloc_obj, size, is_array);
4178
4179 // Do not let reads from the cloned object float above the arraycopy.
4180 if (alloc != nullptr) {
4181 // Do not let stores that initialize this object be reordered with
4182 // a subsequent store that would make this object accessible by
4183 // other threads.
4184 // Record what AllocateNode this StoreStore protects so that
4185 // escape analysis can go from the MemBarStoreStoreNode to the
4186 // AllocateNode and eliminate the MemBarStoreStoreNode if possible
4187 // based on the escape status of the AllocateNode.
4188 insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
4189 } else {
4190 insert_mem_bar(Op_MemBarCPUOrder);
4191 }
4192 }
4193
4194 //------------------------inline_native_clone----------------------------
4195 // protected native Object java.lang.Object.clone();
4196 //
4197 // Here are the simple edge cases:
4198 // null receiver => normal trap
4199 // virtual and clone was overridden => slow path to out-of-line clone
4200 // not cloneable or finalizer => slow path to out-of-line Object.clone
4201 //
4202 // The general case has two steps, allocation and copying.
4203 // Allocation has two cases, and uses GraphKit::new_instance or new_array.
4204 //
4205 // Copying also has two cases, oop arrays and everything else.
4206 // Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
4207 // Everything else uses the tight inline loop supplied by CopyArrayNode.
4208 //
4209 // These steps fold up nicely if and when the cloned object's klass
4210 // can be sharply typed as an object array, a type array, or an instance.
4211 //
4212 bool LibraryCallKit::inline_native_clone(bool is_virtual) {
4213 PhiNode* result_val;
4214
4215 // Set the reexecute bit for the interpreter to reexecute
4216 // the bytecode that invokes Object.clone if deoptimization happens.
4217 { PreserveReexecuteState preexecs(this);
4218 jvms()->set_should_reexecute(true);
4219
4220 Node* obj = null_check_receiver();
4221 if (stopped()) return true;
4222
4223 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
4224
4225 // If we are going to clone an instance, we need its exact type to
4226 // know the number and types of fields to convert the clone to
4227 // loads/stores. Maybe a speculative type can help us.
4228 if (!obj_type->klass_is_exact() &&
4229 obj_type->speculative_type() != nullptr &&
4230 obj_type->speculative_type()->is_instance_klass()) {
4231 ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass();
4232 if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem &&
4233 !spec_ik->has_injected_fields()) {
4234 ciKlass* k = obj_type->klass();
4235 if (!k->is_instance_klass() ||
4236 k->as_instance_klass()->is_interface() ||
4237 k->as_instance_klass()->has_subklass()) {
4238 obj = maybe_cast_profiled_obj(obj, obj_type->speculative_type(), false);
4239 }
4240 }
4241 }
4242
4243 // Conservatively insert a memory barrier on all memory slices.
4244 // Do not let writes into the original float below the clone.
4245 insert_mem_bar(Op_MemBarCPUOrder);
4246
4247 // paths into result_reg:
4248 enum {
4249 _slow_path = 1, // out-of-line call to clone method (virtual or not)
4250 _objArray_path, // plain array allocation, plus arrayof_oop_arraycopy
4251 _array_path, // plain array allocation, plus arrayof_long_arraycopy
4252 _instance_path, // plain instance allocation, plus arrayof_long_arraycopy
4253 PATH_LIMIT
4254 };
4255 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4256 result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
4257 PhiNode* result_i_o = new PhiNode(result_reg, Type::ABIO);
4258 PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4259 record_for_igvn(result_reg);
4260
4261 Node* obj_klass = load_object_klass(obj);
4262 Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)nullptr);
4263 if (array_ctl != nullptr) {
4264 // It's an array.
4265 PreserveJVMState pjvms(this);
4266 set_control(array_ctl);
4267 Node* obj_length = load_array_length(obj);
4268 Node* obj_size = nullptr;
4269 Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size, /*deoptimize_on_exception=*/true);
4270
4271 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
4272 if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Parsing)) {
4273 // If it is an oop array, it requires very special treatment,
4274 // because gc barriers are required when accessing the array.
4275 Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)nullptr);
4276 if (is_obja != nullptr) {
4277 PreserveJVMState pjvms2(this);
4278 set_control(is_obja);
4279 // Generate a direct call to the right arraycopy function(s).
4280 // Clones are always tightly coupled.
4281 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, true, false);
4282 ac->set_clone_oop_array();
4283 Node* n = _gvn.transform(ac);
4284 assert(n == ac, "cannot disappear");
4285 ac->connect_outputs(this, /*deoptimize_on_exception=*/true);
4286
4287 result_reg->init_req(_objArray_path, control());
4288 result_val->init_req(_objArray_path, alloc_obj);
4289 result_i_o ->set_req(_objArray_path, i_o());
4290 result_mem ->set_req(_objArray_path, reset_memory());
4291 }
4292 }
4293 // Otherwise, there are no barriers to worry about.
4294 // (We can dispense with card marks if we know the allocation
4295 // comes out of eden (TLAB)... In fact, ReduceInitialCardMarks
4296 // causes the non-eden paths to take compensating steps to
4297 // simulate a fresh allocation, so that no further
4298 // card marks are required in compiled code to initialize
4299 // the object.)
4300
4301 if (!stopped()) {
4302 copy_to_clone(obj, alloc_obj, obj_size, true);
4303
4304 // Present the results of the copy.
4305 result_reg->init_req(_array_path, control());
4306 result_val->init_req(_array_path, alloc_obj);
4307 result_i_o ->set_req(_array_path, i_o());
4308 result_mem ->set_req(_array_path, reset_memory());
4309 }
4310 }
4311
4312 // We only go to the instance fast case code if we pass a number of guards.
4313 // The paths which do not pass are accumulated in the slow_region.
4314 RegionNode* slow_region = new RegionNode(1);
4315 record_for_igvn(slow_region);
4316 if (!stopped()) {
4317 // It's an instance (we did array above). Make the slow-path tests.
4318 // If this is a virtual call, we generate a funny guard. We grab
4319 // the vtable entry corresponding to clone() from the target object.
4320 // If the target method which we are calling happens to be the
4321 // Object clone() method, we pass the guard. We do not need this
4322 // guard for non-virtual calls; the caller is known to be the native
4323 // Object clone().
4324 if (is_virtual) {
4325 generate_virtual_guard(obj_klass, slow_region);
4326 }
4327
4328 // The object must be easily cloneable and must not have a finalizer.
4329 // Both of these conditions may be checked in a single test.
4330 // We could optimize the test further, but we don't care.
4331 generate_access_flags_guard(obj_klass,
4332 // Test both conditions:
4333 JVM_ACC_IS_CLONEABLE_FAST | JVM_ACC_HAS_FINALIZER,
4334 // Must be cloneable but not finalizer:
4335 JVM_ACC_IS_CLONEABLE_FAST,
4336 slow_region);
4337 }
4338
4339 if (!stopped()) {
4340 // It's an instance, and it passed the slow-path tests.
4341 PreserveJVMState pjvms(this);
4342 Node* obj_size = nullptr;
4343 // Need to deoptimize on exception from allocation since Object.clone intrinsic
4344 // is reexecuted if deoptimization occurs and there could be problems when merging
4345 // exception state between multiple Object.clone versions (reexecute=true vs reexecute=false).
4346 Node* alloc_obj = new_instance(obj_klass, nullptr, &obj_size, /*deoptimize_on_exception=*/true);
4347
4348 copy_to_clone(obj, alloc_obj, obj_size, false);
4349
4350 // Present the results of the slow call.
4351 result_reg->init_req(_instance_path, control());
4352 result_val->init_req(_instance_path, alloc_obj);
4353 result_i_o ->set_req(_instance_path, i_o());
4354 result_mem ->set_req(_instance_path, reset_memory());
4355 }
4356
4357 // Generate code for the slow case. We make a call to clone().
4358 set_control(_gvn.transform(slow_region));
4359 if (!stopped()) {
4360 PreserveJVMState pjvms(this);
4361 CallJavaNode* slow_call = generate_method_call(vmIntrinsics::_clone, is_virtual, false, true);
4362 // We need to deoptimize on exception (see comment above)
4363 Node* slow_result = set_results_for_java_call(slow_call, false, /* deoptimize */ true);
4364 // this->control() comes from set_results_for_java_call
4365 result_reg->init_req(_slow_path, control());
4366 result_val->init_req(_slow_path, slow_result);
4367 result_i_o ->set_req(_slow_path, i_o());
4368 result_mem ->set_req(_slow_path, reset_memory());
4369 }
4370
4371 // Return the combined state.
4372 set_control( _gvn.transform(result_reg));
4373 set_i_o( _gvn.transform(result_i_o));
4374 set_all_memory( _gvn.transform(result_mem));
4375 } // original reexecute is set back here
4376
4377 set_result(_gvn.transform(result_val));
4378 return true;
4379 }
4380
4381 // If we have a tightly coupled allocation, the arraycopy may take care
4382 // of the array initialization. If one of the guards we insert between
4383 // the allocation and the arraycopy causes a deoptimization, an
4384 // unitialized array will escape the compiled method. To prevent that
4385 // we set the JVM state for uncommon traps between the allocation and
4386 // the arraycopy to the state before the allocation so, in case of
4387 // deoptimization, we'll reexecute the allocation and the
4388 // initialization.
4389 JVMState* LibraryCallKit::arraycopy_restore_alloc_state(AllocateArrayNode* alloc, int& saved_reexecute_sp) {
4390 if (alloc != nullptr) {
4391 ciMethod* trap_method = alloc->jvms()->method();
4392 int trap_bci = alloc->jvms()->bci();
4393
4394 if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
4395 !C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_null_check)) {
4396 // Make sure there's no store between the allocation and the
4397 // arraycopy otherwise visible side effects could be rexecuted
4398 // in case of deoptimization and cause incorrect execution.
4399 bool no_interfering_store = true;
4400 Node* mem = alloc->in(TypeFunc::Memory);
4401 if (mem->is_MergeMem()) {
4402 for (MergeMemStream mms(merged_memory(), mem->as_MergeMem()); mms.next_non_empty2(); ) {
4403 Node* n = mms.memory();
4404 if (n != mms.memory2() && !(n->is_Proj() && n->in(0) == alloc->initialization())) {
4405 assert(n->is_Store(), "what else?");
4406 no_interfering_store = false;
4407 break;
4408 }
4409 }
4410 } else {
4411 for (MergeMemStream mms(merged_memory()); mms.next_non_empty(); ) {
4412 Node* n = mms.memory();
4413 if (n != mem && !(n->is_Proj() && n->in(0) == alloc->initialization())) {
4414 assert(n->is_Store(), "what else?");
4415 no_interfering_store = false;
4416 break;
4417 }
4418 }
4419 }
4420
4421 if (no_interfering_store) {
4422 SafePointNode* sfpt = create_safepoint_with_state_before_array_allocation(alloc);
4423
4424 JVMState* saved_jvms = jvms();
4425 saved_reexecute_sp = _reexecute_sp;
4426
4427 set_jvms(sfpt->jvms());
4428 _reexecute_sp = jvms()->sp();
4429
4430 return saved_jvms;
4431 }
4432 }
4433 }
4434 return nullptr;
4435 }
4436
4437 // Clone the JVMState of the array allocation and create a new safepoint with it. Re-push the array length to the stack
4438 // such that uncommon traps can be emitted to re-execute the array allocation in the interpreter.
4439 SafePointNode* LibraryCallKit::create_safepoint_with_state_before_array_allocation(const AllocateArrayNode* alloc) const {
4440 JVMState* old_jvms = alloc->jvms()->clone_shallow(C);
4441 uint size = alloc->req();
4442 SafePointNode* sfpt = new SafePointNode(size, old_jvms);
4443 old_jvms->set_map(sfpt);
4444 for (uint i = 0; i < size; i++) {
4445 sfpt->init_req(i, alloc->in(i));
4446 }
4447 // re-push array length for deoptimization
4448 sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp(), alloc->in(AllocateNode::ALength));
4449 old_jvms->set_sp(old_jvms->sp()+1);
4450 old_jvms->set_monoff(old_jvms->monoff()+1);
4451 old_jvms->set_scloff(old_jvms->scloff()+1);
4452 old_jvms->set_endoff(old_jvms->endoff()+1);
4453 old_jvms->set_should_reexecute(true);
4454
4455 sfpt->set_i_o(map()->i_o());
4456 sfpt->set_memory(map()->memory());
4457 sfpt->set_control(map()->control());
4458 return sfpt;
4459 }
4460
4461 // In case of a deoptimization, we restart execution at the
4462 // allocation, allocating a new array. We would leave an uninitialized
4463 // array in the heap that GCs wouldn't expect. Move the allocation
4464 // after the traps so we don't allocate the array if we
4465 // deoptimize. This is possible because tightly_coupled_allocation()
4466 // guarantees there's no observer of the allocated array at this point
4467 // and the control flow is simple enough.
4468 void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms_before_guards,
4469 int saved_reexecute_sp, uint new_idx) {
4470 if (saved_jvms_before_guards != nullptr && !stopped()) {
4471 replace_unrelated_uncommon_traps_with_alloc_state(alloc, saved_jvms_before_guards);
4472
4473 assert(alloc != nullptr, "only with a tightly coupled allocation");
4474 // restore JVM state to the state at the arraycopy
4475 saved_jvms_before_guards->map()->set_control(map()->control());
4476 assert(saved_jvms_before_guards->map()->memory() == map()->memory(), "memory state changed?");
4477 assert(saved_jvms_before_guards->map()->i_o() == map()->i_o(), "IO state changed?");
4478 // If we've improved the types of some nodes (null check) while
4479 // emitting the guards, propagate them to the current state
4480 map()->replaced_nodes().apply(saved_jvms_before_guards->map(), new_idx);
4481 set_jvms(saved_jvms_before_guards);
4482 _reexecute_sp = saved_reexecute_sp;
4483
4484 // Remove the allocation from above the guards
4485 CallProjections callprojs;
4486 alloc->extract_projections(&callprojs, true);
4487 InitializeNode* init = alloc->initialization();
4488 Node* alloc_mem = alloc->in(TypeFunc::Memory);
4489 C->gvn_replace_by(callprojs.fallthrough_ioproj, alloc->in(TypeFunc::I_O));
4490 C->gvn_replace_by(init->proj_out(TypeFunc::Memory), alloc_mem);
4491
4492 // The CastIINode created in GraphKit::new_array (in AllocateArrayNode::make_ideal_length) must stay below
4493 // the allocation (i.e. is only valid if the allocation succeeds):
4494 // 1) replace CastIINode with AllocateArrayNode's length here
4495 // 2) Create CastIINode again once allocation has moved (see below) at the end of this method
4496 //
4497 // Multiple identical CastIINodes might exist here. Each GraphKit::load_array_length() call will generate
4498 // new separate CastIINode (arraycopy guard checks or any array length use between array allocation and ararycopy)
4499 Node* init_control = init->proj_out(TypeFunc::Control);
4500 Node* alloc_length = alloc->Ideal_length();
4501 #ifdef ASSERT
4502 Node* prev_cast = nullptr;
4503 #endif
4504 for (uint i = 0; i < init_control->outcnt(); i++) {
4505 Node* init_out = init_control->raw_out(i);
4506 if (init_out->is_CastII() && init_out->in(TypeFunc::Control) == init_control && init_out->in(1) == alloc_length) {
4507 #ifdef ASSERT
4508 if (prev_cast == nullptr) {
4509 prev_cast = init_out;
4510 } else {
4511 if (prev_cast->cmp(*init_out) == false) {
4512 prev_cast->dump();
4513 init_out->dump();
4514 assert(false, "not equal CastIINode");
4515 }
4516 }
4517 #endif
4518 C->gvn_replace_by(init_out, alloc_length);
4519 }
4520 }
4521 C->gvn_replace_by(init->proj_out(TypeFunc::Control), alloc->in(0));
4522
4523 // move the allocation here (after the guards)
4524 _gvn.hash_delete(alloc);
4525 alloc->set_req(TypeFunc::Control, control());
4526 alloc->set_req(TypeFunc::I_O, i_o());
4527 Node *mem = reset_memory();
4528 set_all_memory(mem);
4529 alloc->set_req(TypeFunc::Memory, mem);
4530 set_control(init->proj_out_or_null(TypeFunc::Control));
4531 set_i_o(callprojs.fallthrough_ioproj);
4532
4533 // Update memory as done in GraphKit::set_output_for_allocation()
4534 const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
4535 const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
4536 if (ary_type->isa_aryptr() && length_type != nullptr) {
4537 ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
4538 }
4539 const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot);
4540 int elemidx = C->get_alias_index(telemref);
4541 set_memory(init->proj_out_or_null(TypeFunc::Memory), Compile::AliasIdxRaw);
4542 set_memory(init->proj_out_or_null(TypeFunc::Memory), elemidx);
4543
4544 Node* allocx = _gvn.transform(alloc);
4545 assert(allocx == alloc, "where has the allocation gone?");
4546 assert(dest->is_CheckCastPP(), "not an allocation result?");
4547
4548 _gvn.hash_delete(dest);
4549 dest->set_req(0, control());
4550 Node* destx = _gvn.transform(dest);
4551 assert(destx == dest, "where has the allocation result gone?");
4552
4553 array_ideal_length(alloc, ary_type, true);
4554 }
4555 }
4556
4557 // Unrelated UCTs between the array allocation and the array copy, which are considered safe by tightly_coupled_allocation(),
4558 // need to be replaced by an UCT with a state before the array allocation (including the array length). This is necessary
4559 // because we could hit one of these UCTs (which are executed before the emitted array copy guards and the actual array
4560 // allocation which is moved down in arraycopy_move_allocation_here()). When later resuming execution in the interpreter,
4561 // we would have wrongly skipped the array allocation. To prevent this, we resume execution at the array allocation in
4562 // the interpreter similar to what we are doing for the newly emitted guards for the array copy.
4563 void LibraryCallKit::replace_unrelated_uncommon_traps_with_alloc_state(AllocateArrayNode* alloc,
4564 JVMState* saved_jvms_before_guards) {
4565 if (saved_jvms_before_guards->map()->control()->is_IfProj()) {
4566 // There is at least one unrelated uncommon trap which needs to be replaced.
4567 SafePointNode* sfpt = create_safepoint_with_state_before_array_allocation(alloc);
4568
4569 JVMState* saved_jvms = jvms();
4570 const int saved_reexecute_sp = _reexecute_sp;
4571 set_jvms(sfpt->jvms());
4572 _reexecute_sp = jvms()->sp();
4573
4574 replace_unrelated_uncommon_traps_with_alloc_state(saved_jvms_before_guards);
4575
4576 // Restore state
4577 set_jvms(saved_jvms);
4578 _reexecute_sp = saved_reexecute_sp;
4579 }
4580 }
4581
4582 // Replace the unrelated uncommon traps with new uncommon trap nodes by reusing the action and reason. The new uncommon
4583 // traps will have the state of the array allocation. Let the old uncommon trap nodes die.
4584 void LibraryCallKit::replace_unrelated_uncommon_traps_with_alloc_state(JVMState* saved_jvms_before_guards) {
4585 Node* if_proj = saved_jvms_before_guards->map()->control(); // Start the search right before the newly emitted guards
4586 while (if_proj->is_IfProj()) {
4587 CallStaticJavaNode* uncommon_trap = get_uncommon_trap_from_success_proj(if_proj);
4588 if (uncommon_trap != nullptr) {
4589 create_new_uncommon_trap(uncommon_trap);
4590 }
4591 assert(if_proj->in(0)->is_If(), "must be If");
4592 if_proj = if_proj->in(0)->in(0);
4593 }
4594 assert(if_proj->is_Proj() && if_proj->in(0)->is_Initialize(),
4595 "must have reached control projection of init node");
4596 }
4597
4598 void LibraryCallKit::create_new_uncommon_trap(CallStaticJavaNode* uncommon_trap_call) {
4599 const int trap_request = uncommon_trap_call->uncommon_trap_request();
4600 assert(trap_request != 0, "no valid UCT trap request");
4601 PreserveJVMState pjvms(this);
4602 set_control(uncommon_trap_call->in(0));
4603 uncommon_trap(Deoptimization::trap_request_reason(trap_request),
4604 Deoptimization::trap_request_action(trap_request));
4605 assert(stopped(), "Should be stopped");
4606 _gvn.hash_delete(uncommon_trap_call);
4607 uncommon_trap_call->set_req(0, top()); // not used anymore, kill it
4608 }
4609
4610 //------------------------------inline_arraycopy-----------------------
4611 // public static native void java.lang.System.arraycopy(Object src, int srcPos,
4612 // Object dest, int destPos,
4613 // int length);
4614 bool LibraryCallKit::inline_arraycopy() {
4615 // Get the arguments.
4616 Node* src = argument(0); // type: oop
4617 Node* src_offset = argument(1); // type: int
4618 Node* dest = argument(2); // type: oop
4619 Node* dest_offset = argument(3); // type: int
4620 Node* length = argument(4); // type: int
4621
4622 uint new_idx = C->unique();
4623
4624 // Check for allocation before we add nodes that would confuse
4625 // tightly_coupled_allocation()
4626 AllocateArrayNode* alloc = tightly_coupled_allocation(dest);
4627
4628 int saved_reexecute_sp = -1;
4629 JVMState* saved_jvms_before_guards = arraycopy_restore_alloc_state(alloc, saved_reexecute_sp);
4630 // See arraycopy_restore_alloc_state() comment
4631 // if alloc == null we don't have to worry about a tightly coupled allocation so we can emit all needed guards
4632 // if saved_jvms_before_guards is not null (then alloc is not null) then we can handle guards and a tightly coupled allocation
4633 // if saved_jvms_before_guards is null and alloc is not null, we can't emit any guards
4634 bool can_emit_guards = (alloc == nullptr || saved_jvms_before_guards != nullptr);
4635
4636 // The following tests must be performed
4637 // (1) src and dest are arrays.
4638 // (2) src and dest arrays must have elements of the same BasicType
4639 // (3) src and dest must not be null.
4640 // (4) src_offset must not be negative.
4641 // (5) dest_offset must not be negative.
4642 // (6) length must not be negative.
4643 // (7) src_offset + length must not exceed length of src.
4644 // (8) dest_offset + length must not exceed length of dest.
4645 // (9) each element of an oop array must be assignable
4646
4647 // (3) src and dest must not be null.
4648 // always do this here because we need the JVM state for uncommon traps
4649 Node* null_ctl = top();
4650 src = saved_jvms_before_guards != nullptr ? null_check_oop(src, &null_ctl, true, true) : null_check(src, T_ARRAY);
4651 assert(null_ctl->is_top(), "no null control here");
4652 dest = null_check(dest, T_ARRAY);
4653
4654 if (!can_emit_guards) {
4655 // if saved_jvms_before_guards is null and alloc is not null, we don't emit any
4656 // guards but the arraycopy node could still take advantage of a
4657 // tightly allocated allocation. tightly_coupled_allocation() is
4658 // called again to make sure it takes the null check above into
4659 // account: the null check is mandatory and if it caused an
4660 // uncommon trap to be emitted then the allocation can't be
4661 // considered tightly coupled in this context.
4662 alloc = tightly_coupled_allocation(dest);
4663 }
4664
4665 bool validated = false;
4666
4667 const Type* src_type = _gvn.type(src);
4668 const Type* dest_type = _gvn.type(dest);
4669 const TypeAryPtr* top_src = src_type->isa_aryptr();
4670 const TypeAryPtr* top_dest = dest_type->isa_aryptr();
4671
4672 // Do we have the type of src?
4673 bool has_src = (top_src != nullptr && top_src->klass() != nullptr);
4674 // Do we have the type of dest?
4675 bool has_dest = (top_dest != nullptr && top_dest->klass() != nullptr);
4676 // Is the type for src from speculation?
4677 bool src_spec = false;
4678 // Is the type for dest from speculation?
4679 bool dest_spec = false;
4680
4681 if ((!has_src || !has_dest) && can_emit_guards) {
4682 // We don't have sufficient type information, let's see if
4683 // speculative types can help. We need to have types for both src
4684 // and dest so that it pays off.
4685
4686 // Do we already have or could we have type information for src
4687 bool could_have_src = has_src;
4688 // Do we already have or could we have type information for dest
4689 bool could_have_dest = has_dest;
4690
4691 ciKlass* src_k = nullptr;
4692 if (!has_src) {
4693 src_k = src_type->speculative_type_not_null();
4694 if (src_k != nullptr && src_k->is_array_klass()) {
4695 could_have_src = true;
4696 }
4697 }
4698
4699 ciKlass* dest_k = nullptr;
4700 if (!has_dest) {
4701 dest_k = dest_type->speculative_type_not_null();
4702 if (dest_k != nullptr && dest_k->is_array_klass()) {
4703 could_have_dest = true;
4704 }
4705 }
4706
4707 if (could_have_src && could_have_dest) {
4708 // This is going to pay off so emit the required guards
4709 if (!has_src) {
4710 src = maybe_cast_profiled_obj(src, src_k, true);
4711 src_type = _gvn.type(src);
4712 top_src = src_type->isa_aryptr();
4713 has_src = (top_src != nullptr && top_src->klass() != nullptr);
4714 src_spec = true;
4715 }
4716 if (!has_dest) {
4717 dest = maybe_cast_profiled_obj(dest, dest_k, true);
4718 dest_type = _gvn.type(dest);
4719 top_dest = dest_type->isa_aryptr();
4720 has_dest = (top_dest != nullptr && top_dest->klass() != nullptr);
4721 dest_spec = true;
4722 }
4723 }
4724 }
4725
4726 if (has_src && has_dest && can_emit_guards) {
4727 BasicType src_elem = top_src->klass()->as_array_klass()->element_type()->basic_type();
4728 BasicType dest_elem = top_dest->klass()->as_array_klass()->element_type()->basic_type();
4729 if (is_reference_type(src_elem)) src_elem = T_OBJECT;
4730 if (is_reference_type(dest_elem)) dest_elem = T_OBJECT;
4731
4732 if (src_elem == dest_elem && src_elem == T_OBJECT) {
4733 // If both arrays are object arrays then having the exact types
4734 // for both will remove the need for a subtype check at runtime
4735 // before the call and may make it possible to pick a faster copy
4736 // routine (without a subtype check on every element)
4737 // Do we have the exact type of src?
4738 bool could_have_src = src_spec;
4739 // Do we have the exact type of dest?
4740 bool could_have_dest = dest_spec;
4741 ciKlass* src_k = top_src->klass();
4742 ciKlass* dest_k = top_dest->klass();
4743 if (!src_spec) {
4744 src_k = src_type->speculative_type_not_null();
4745 if (src_k != nullptr && src_k->is_array_klass()) {
4746 could_have_src = true;
4747 }
4748 }
4749 if (!dest_spec) {
4750 dest_k = dest_type->speculative_type_not_null();
4751 if (dest_k != nullptr && dest_k->is_array_klass()) {
4752 could_have_dest = true;
4753 }
4754 }
4755 if (could_have_src && could_have_dest) {
4756 // If we can have both exact types, emit the missing guards
4757 if (could_have_src && !src_spec) {
4758 src = maybe_cast_profiled_obj(src, src_k, true);
4759 }
4760 if (could_have_dest && !dest_spec) {
4761 dest = maybe_cast_profiled_obj(dest, dest_k, true);
4762 }
4763 }
4764 }
4765 }
4766
4767 ciMethod* trap_method = method();
4768 int trap_bci = bci();
4769 if (saved_jvms_before_guards != nullptr) {
4770 trap_method = alloc->jvms()->method();
4771 trap_bci = alloc->jvms()->bci();
4772 }
4773
4774 bool negative_length_guard_generated = false;
4775
4776 if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
4777 can_emit_guards &&
4778 !src->is_top() && !dest->is_top()) {
4779 // validate arguments: enables transformation the ArrayCopyNode
4780 validated = true;
4781
4782 RegionNode* slow_region = new RegionNode(1);
4783 record_for_igvn(slow_region);
4784
4785 // (1) src and dest are arrays.
4786 generate_non_array_guard(load_object_klass(src), slow_region);
4787 generate_non_array_guard(load_object_klass(dest), slow_region);
4788
4789 // (2) src and dest arrays must have elements of the same BasicType
4790 // done at macro expansion or at Ideal transformation time
4791
4792 // (4) src_offset must not be negative.
4793 generate_negative_guard(src_offset, slow_region);
4794
4795 // (5) dest_offset must not be negative.
4796 generate_negative_guard(dest_offset, slow_region);
4797
4798 // (7) src_offset + length must not exceed length of src.
4799 generate_limit_guard(src_offset, length,
4800 load_array_length(src),
4801 slow_region);
4802
4803 // (8) dest_offset + length must not exceed length of dest.
4804 generate_limit_guard(dest_offset, length,
4805 load_array_length(dest),
4806 slow_region);
4807
4808 // (6) length must not be negative.
4809 // This is also checked in generate_arraycopy() during macro expansion, but
4810 // we also have to check it here for the case where the ArrayCopyNode will
4811 // be eliminated by Escape Analysis.
4812 if (EliminateAllocations) {
4813 generate_negative_guard(length, slow_region);
4814 negative_length_guard_generated = true;
4815 }
4816
4817 // (9) each element of an oop array must be assignable
4818 Node* dest_klass = load_object_klass(dest);
4819 if (src != dest) {
4820 Node* not_subtype_ctrl = gen_subtype_check(src, dest_klass);
4821
4822 if (not_subtype_ctrl != top()) {
4823 PreserveJVMState pjvms(this);
4824 set_control(not_subtype_ctrl);
4825 uncommon_trap(Deoptimization::Reason_intrinsic,
4826 Deoptimization::Action_make_not_entrant);
4827 assert(stopped(), "Should be stopped");
4828 }
4829 }
4830 {
4831 PreserveJVMState pjvms(this);
4832 set_control(_gvn.transform(slow_region));
4833 uncommon_trap(Deoptimization::Reason_intrinsic,
4834 Deoptimization::Action_make_not_entrant);
4835 assert(stopped(), "Should be stopped");
4836 }
4837
4838 const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr();
4839 const Type *toop = TypeOopPtr::make_from_klass(dest_klass_t->klass());
4840 src = _gvn.transform(new CheckCastPPNode(control(), src, toop));
4841 arraycopy_move_allocation_here(alloc, dest, saved_jvms_before_guards, saved_reexecute_sp, new_idx);
4842 }
4843
4844 if (stopped()) {
4845 return true;
4846 }
4847
4848 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != nullptr, negative_length_guard_generated,
4849 // Create LoadRange and LoadKlass nodes for use during macro expansion here
4850 // so the compiler has a chance to eliminate them: during macro expansion,
4851 // we have to set their control (CastPP nodes are eliminated).
4852 load_object_klass(src), load_object_klass(dest),
4853 load_array_length(src), load_array_length(dest));
4854
4855 ac->set_arraycopy(validated);
4856
4857 Node* n = _gvn.transform(ac);
4858 if (n == ac) {
4859 ac->connect_outputs(this);
4860 } else {
4861 assert(validated, "shouldn't transform if all arguments not validated");
4862 set_all_memory(n);
4863 }
4864 clear_upper_avx();
4865
4866
4867 return true;
4868 }
4869
4870
4871 // Helper function which determines if an arraycopy immediately follows
4872 // an allocation, with no intervening tests or other escapes for the object.
4873 AllocateArrayNode*
4874 LibraryCallKit::tightly_coupled_allocation(Node* ptr) {
4875 if (stopped()) return nullptr; // no fast path
4876 if (C->AliasLevel() == 0) return nullptr; // no MergeMems around
4877
4878 AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(ptr, &_gvn);
4879 if (alloc == nullptr) return nullptr;
4880
4881 Node* rawmem = memory(Compile::AliasIdxRaw);
4882 // Is the allocation's memory state untouched?
4883 if (!(rawmem->is_Proj() && rawmem->in(0)->is_Initialize())) {
4884 // Bail out if there have been raw-memory effects since the allocation.
4885 // (Example: There might have been a call or safepoint.)
4886 return nullptr;
4887 }
4888 rawmem = rawmem->in(0)->as_Initialize()->memory(Compile::AliasIdxRaw);
4889 if (!(rawmem->is_Proj() && rawmem->in(0) == alloc)) {
4890 return nullptr;
4891 }
4892
4893 // There must be no unexpected observers of this allocation.
4894 for (DUIterator_Fast imax, i = ptr->fast_outs(imax); i < imax; i++) {
4895 Node* obs = ptr->fast_out(i);
4896 if (obs != this->map()) {
4897 return nullptr;
4898 }
4899 }
4900
4901 // This arraycopy must unconditionally follow the allocation of the ptr.
4902 Node* alloc_ctl = ptr->in(0);
4903 Node* ctl = control();
4904 while (ctl != alloc_ctl) {
4905 // There may be guards which feed into the slow_region.
4906 // Any other control flow means that we might not get a chance
4907 // to finish initializing the allocated object.
4908 // Various low-level checks bottom out in uncommon traps. These
4909 // are considered safe since we've already checked above that
4910 // there is no unexpected observer of this allocation.
4911 if (get_uncommon_trap_from_success_proj(ctl) != nullptr) {
4912 assert(ctl->in(0)->is_If(), "must be If");
4913 ctl = ctl->in(0)->in(0);
4914 } else {
4915 return nullptr;
4916 }
4917 }
4918
4919 // If we get this far, we have an allocation which immediately
4920 // precedes the arraycopy, and we can take over zeroing the new object.
4921 // The arraycopy will finish the initialization, and provide
4922 // a new control state to which we will anchor the destination pointer.
4923
4924 return alloc;
4925 }
4926
4927 CallStaticJavaNode* LibraryCallKit::get_uncommon_trap_from_success_proj(Node* node) {
4928 if (node->is_IfProj()) {
4929 Node* other_proj = node->as_IfProj()->other_if_proj();
4930 for (DUIterator_Fast jmax, j = other_proj->fast_outs(jmax); j < jmax; j++) {
4931 Node* obs = other_proj->fast_out(j);
4932 if (obs->in(0) == other_proj && obs->is_CallStaticJava() &&
4933 (obs->as_CallStaticJava()->entry_point() == SharedRuntime::uncommon_trap_blob()->entry_point())) {
4934 return obs->as_CallStaticJava();
4935 }
4936 }
4937 }
4938 return nullptr;
4939 }
4940
4941 //-------------inline_encodeISOArray-----------------------------------
4942 // encode char[] to byte[] in ISO_8859_1 or ASCII
4943 bool LibraryCallKit::inline_encodeISOArray(bool ascii) {
4944 assert(callee()->signature()->size() == 5, "encodeISOArray has 5 parameters");
4945 // no receiver since it is static method
4946 Node *src = argument(0);
4947 Node *src_offset = argument(1);
4948 Node *dst = argument(2);
4949 Node *dst_offset = argument(3);
4950 Node *length = argument(4);
4951
4952 src = must_be_not_null(src, true);
4953 dst = must_be_not_null(dst, true);
4954
4955 const Type* src_type = src->Value(&_gvn);
4956 const Type* dst_type = dst->Value(&_gvn);
4957 const TypeAryPtr* top_src = src_type->isa_aryptr();
4958 const TypeAryPtr* top_dest = dst_type->isa_aryptr();
4959 if (top_src == NULL || top_src->klass() == NULL ||
4960 top_dest == NULL || top_dest->klass() == NULL) {
4961 // failed array check
4962 return false;
4963 }
4964
4965 // Figure out the size and type of the elements we will be copying.
4966 BasicType src_elem = top_src->klass()->as_array_klass()->element_type()->basic_type();
4967 BasicType dst_elem = top_dest->klass()->as_array_klass()->element_type()->basic_type();
4968 if (!((src_elem == T_CHAR) || (src_elem== T_BYTE)) || dst_elem != T_BYTE) {
4969 return false;
4970 }
4971
4972 Node* src_start = array_element_address(src, src_offset, T_CHAR);
4973 Node* dst_start = array_element_address(dst, dst_offset, dst_elem);
4974 // 'src_start' points to src array + scaled offset
4975 // 'dst_start' points to dst array + scaled offset
4976
4977 const TypeAryPtr* mtype = TypeAryPtr::BYTES;
4978 Node* enc = new EncodeISOArrayNode(control(), memory(mtype), src_start, dst_start, length, ascii);
4979 enc = _gvn.transform(enc);
4980 Node* res_mem = _gvn.transform(new SCMemProjNode(enc));
4981 set_memory(res_mem, mtype);
4982 set_result(enc);
4983 clear_upper_avx();
4984
4985 return true;
4986 }
4987
4988 //-------------inline_multiplyToLen-----------------------------------
4989 bool LibraryCallKit::inline_multiplyToLen() {
4990 assert(UseMultiplyToLenIntrinsic, "not implemented on this platform");
4991
4992 address stubAddr = StubRoutines::multiplyToLen();
4993 if (stubAddr == nullptr) {
4994 return false; // Intrinsic's stub is not implemented on this platform
4995 }
4996 const char* stubName = "multiplyToLen";
4997
4998 assert(callee()->signature()->size() == 5, "multiplyToLen has 5 parameters");
4999
5000 // no receiver because it is a static method
5001 Node* x = argument(0);
5002 Node* xlen = argument(1);
5003 Node* y = argument(2);
5004 Node* ylen = argument(3);
5005 Node* z = argument(4);
5006
5007 x = must_be_not_null(x, true);
5008 y = must_be_not_null(y, true);
5009
5010 const Type* x_type = x->Value(&_gvn);
5011 const Type* y_type = y->Value(&_gvn);
5012 const TypeAryPtr* top_x = x_type->isa_aryptr();
5013 const TypeAryPtr* top_y = y_type->isa_aryptr();
5014 if (top_x == NULL || top_x->klass() == NULL ||
5015 top_y == NULL || top_y->klass() == NULL) {
5016 // failed array check
5017 return false;
5018 }
5019
5020 BasicType x_elem = top_x->klass()->as_array_klass()->element_type()->basic_type();
5021 BasicType y_elem = top_y->klass()->as_array_klass()->element_type()->basic_type();
5022 if (x_elem != T_INT || y_elem != T_INT) {
5023 return false;
5024 }
5025
5026 // Set the original stack and the reexecute bit for the interpreter to reexecute
5027 // the bytecode that invokes BigInteger.multiplyToLen() if deoptimization happens
5028 // on the return from z array allocation in runtime.
5029 { PreserveReexecuteState preexecs(this);
5030 jvms()->set_should_reexecute(true);
5031
5032 Node* x_start = array_element_address(x, intcon(0), x_elem);
5033 Node* y_start = array_element_address(y, intcon(0), y_elem);
5034 // 'x_start' points to x array + scaled xlen
5035 // 'y_start' points to y array + scaled ylen
5036
5037 // Allocate the result array
5038 Node* zlen = _gvn.transform(new AddINode(xlen, ylen));
5039 ciKlass* klass = ciTypeArrayKlass::make(T_INT);
5040 Node* klass_node = makecon(TypeKlassPtr::make(klass));
5041
5042 IdealKit ideal(this);
5043
5044 #define __ ideal.
5045 Node* one = __ ConI(1);
5046 Node* zero = __ ConI(0);
5047 IdealVariable need_alloc(ideal), z_alloc(ideal); __ declarations_done();
5048 __ set(need_alloc, zero);
5049 __ set(z_alloc, z);
5050 __ if_then(z, BoolTest::eq, null()); {
5051 __ increment (need_alloc, one);
5052 } __ else_(); {
5053 // Update graphKit memory and control from IdealKit.
5054 sync_kit(ideal);
5055 Node *cast = new CastPPNode(z, TypePtr::NOTNULL);
5056 cast->init_req(0, control());
5057 _gvn.set_type(cast, cast->bottom_type());
5058 C->record_for_igvn(cast);
5059
5060 Node* zlen_arg = load_array_length(cast);
5061 // Update IdealKit memory and control from graphKit.
5062 __ sync_kit(this);
5063 __ if_then(zlen_arg, BoolTest::lt, zlen); {
5064 __ increment (need_alloc, one);
5065 } __ end_if();
5066 } __ end_if();
5067
5068 __ if_then(__ value(need_alloc), BoolTest::ne, zero); {
5069 // Update graphKit memory and control from IdealKit.
5070 sync_kit(ideal);
5071 Node * narr = new_array(klass_node, zlen, 1);
5072 // Update IdealKit memory and control from graphKit.
5073 __ sync_kit(this);
5074 __ set(z_alloc, narr);
5075 } __ end_if();
5076
5077 sync_kit(ideal);
5078 z = __ value(z_alloc);
5079 // Can't use TypeAryPtr::INTS which uses Bottom offset.
5080 _gvn.set_type(z, TypeOopPtr::make_from_klass(klass));
5081 // Final sync IdealKit and GraphKit.
5082 final_sync(ideal);
5083 #undef __
5084
5085 Node* z_start = array_element_address(z, intcon(0), T_INT);
5086
5087 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
5088 OptoRuntime::multiplyToLen_Type(),
5089 stubAddr, stubName, TypePtr::BOTTOM,
5090 x_start, xlen, y_start, ylen, z_start, zlen);
5091 } // original reexecute is set back here
5092
5093 C->set_has_split_ifs(true); // Has chance for split-if optimization
5094 set_result(z);
5095 return true;
5096 }
5097
5098 //-------------inline_squareToLen------------------------------------
5099 bool LibraryCallKit::inline_squareToLen() {
5100 assert(UseSquareToLenIntrinsic, "not implemented on this platform");
5101
5102 address stubAddr = StubRoutines::squareToLen();
5103 if (stubAddr == nullptr) {
5104 return false; // Intrinsic's stub is not implemented on this platform
5105 }
5106 const char* stubName = "squareToLen";
5107
5108 assert(callee()->signature()->size() == 4, "implSquareToLen has 4 parameters");
5109
5110 Node* x = argument(0);
5111 Node* len = argument(1);
5112 Node* z = argument(2);
5113 Node* zlen = argument(3);
5114
5115 x = must_be_not_null(x, true);
5116 z = must_be_not_null(z, true);
5117
5118 const Type* x_type = x->Value(&_gvn);
5119 const Type* z_type = z->Value(&_gvn);
5120 const TypeAryPtr* top_x = x_type->isa_aryptr();
5121 const TypeAryPtr* top_z = z_type->isa_aryptr();
5122 if (top_x == NULL || top_x->klass() == NULL ||
5123 top_z == NULL || top_z->klass() == NULL) {
5124 // failed array check
5125 return false;
5126 }
5127
5128 BasicType x_elem = top_x->klass()->as_array_klass()->element_type()->basic_type();
5129 BasicType z_elem = top_z->klass()->as_array_klass()->element_type()->basic_type();
5130 if (x_elem != T_INT || z_elem != T_INT) {
5131 return false;
5132 }
5133
5134
5135 Node* x_start = array_element_address(x, intcon(0), x_elem);
5136 Node* z_start = array_element_address(z, intcon(0), z_elem);
5137
5138 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
5139 OptoRuntime::squareToLen_Type(),
5140 stubAddr, stubName, TypePtr::BOTTOM,
5141 x_start, len, z_start, zlen);
5142
5143 set_result(z);
5144 return true;
5145 }
5146
5147 //-------------inline_mulAdd------------------------------------------
5148 bool LibraryCallKit::inline_mulAdd() {
5149 assert(UseMulAddIntrinsic, "not implemented on this platform");
5150
5151 address stubAddr = StubRoutines::mulAdd();
5152 if (stubAddr == nullptr) {
5153 return false; // Intrinsic's stub is not implemented on this platform
5154 }
5155 const char* stubName = "mulAdd";
5156
5157 assert(callee()->signature()->size() == 5, "mulAdd has 5 parameters");
5158
5159 Node* out = argument(0);
5160 Node* in = argument(1);
5161 Node* offset = argument(2);
5162 Node* len = argument(3);
5163 Node* k = argument(4);
5164
5165 out = must_be_not_null(out, true);
5166
5167 const Type* out_type = out->Value(&_gvn);
5168 const Type* in_type = in->Value(&_gvn);
5169 const TypeAryPtr* top_out = out_type->isa_aryptr();
5170 const TypeAryPtr* top_in = in_type->isa_aryptr();
5171 if (top_out == NULL || top_out->klass() == NULL ||
5172 top_in == NULL || top_in->klass() == NULL) {
5173 // failed array check
5174 return false;
5175 }
5176
5177 BasicType out_elem = top_out->klass()->as_array_klass()->element_type()->basic_type();
5178 BasicType in_elem = top_in->klass()->as_array_klass()->element_type()->basic_type();
5179 if (out_elem != T_INT || in_elem != T_INT) {
5180 return false;
5181 }
5182
5183 Node* outlen = load_array_length(out);
5184 Node* new_offset = _gvn.transform(new SubINode(outlen, offset));
5185 Node* out_start = array_element_address(out, intcon(0), out_elem);
5186 Node* in_start = array_element_address(in, intcon(0), in_elem);
5187
5188 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
5189 OptoRuntime::mulAdd_Type(),
5190 stubAddr, stubName, TypePtr::BOTTOM,
5191 out_start,in_start, new_offset, len, k);
5192 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
5193 set_result(result);
5194 return true;
5195 }
5196
5197 //-------------inline_montgomeryMultiply-----------------------------------
5198 bool LibraryCallKit::inline_montgomeryMultiply() {
5199 address stubAddr = StubRoutines::montgomeryMultiply();
5200 if (stubAddr == nullptr) {
5201 return false; // Intrinsic's stub is not implemented on this platform
5202 }
5203
5204 assert(UseMontgomeryMultiplyIntrinsic, "not implemented on this platform");
5205 const char* stubName = "montgomery_multiply";
5206
5207 assert(callee()->signature()->size() == 7, "montgomeryMultiply has 7 parameters");
5208
5209 Node* a = argument(0);
5210 Node* b = argument(1);
5211 Node* n = argument(2);
5212 Node* len = argument(3);
5213 Node* inv = argument(4);
5214 Node* m = argument(6);
5215
5216 const Type* a_type = a->Value(&_gvn);
5217 const TypeAryPtr* top_a = a_type->isa_aryptr();
5218 const Type* b_type = b->Value(&_gvn);
5219 const TypeAryPtr* top_b = b_type->isa_aryptr();
5220 const Type* n_type = a->Value(&_gvn);
5221 const TypeAryPtr* top_n = n_type->isa_aryptr();
5222 const Type* m_type = a->Value(&_gvn);
5223 const TypeAryPtr* top_m = m_type->isa_aryptr();
5224 if (top_a == NULL || top_a->klass() == NULL ||
5225 top_b == NULL || top_b->klass() == NULL ||
5226 top_n == NULL || top_n->klass() == NULL ||
5227 top_m == NULL || top_m->klass() == NULL) {
5228 // failed array check
5229 return false;
5230 }
5231
5232 BasicType a_elem = top_a->klass()->as_array_klass()->element_type()->basic_type();
5233 BasicType b_elem = top_b->klass()->as_array_klass()->element_type()->basic_type();
5234 BasicType n_elem = top_n->klass()->as_array_klass()->element_type()->basic_type();
5235 BasicType m_elem = top_m->klass()->as_array_klass()->element_type()->basic_type();
5236 if (a_elem != T_INT || b_elem != T_INT || n_elem != T_INT || m_elem != T_INT) {
5237 return false;
5238 }
5239
5240 // Make the call
5241 {
5242 Node* a_start = array_element_address(a, intcon(0), a_elem);
5243 Node* b_start = array_element_address(b, intcon(0), b_elem);
5244 Node* n_start = array_element_address(n, intcon(0), n_elem);
5245 Node* m_start = array_element_address(m, intcon(0), m_elem);
5246
5247 Node* call = make_runtime_call(RC_LEAF,
5248 OptoRuntime::montgomeryMultiply_Type(),
5249 stubAddr, stubName, TypePtr::BOTTOM,
5250 a_start, b_start, n_start, len, inv, top(),
5251 m_start);
5252 set_result(m);
5253 }
5254
5255 return true;
5256 }
5257
5258 bool LibraryCallKit::inline_montgomerySquare() {
5259 address stubAddr = StubRoutines::montgomerySquare();
5260 if (stubAddr == nullptr) {
5261 return false; // Intrinsic's stub is not implemented on this platform
5262 }
5263
5264 assert(UseMontgomerySquareIntrinsic, "not implemented on this platform");
5265 const char* stubName = "montgomery_square";
5266
5267 assert(callee()->signature()->size() == 6, "montgomerySquare has 6 parameters");
5268
5269 Node* a = argument(0);
5270 Node* n = argument(1);
5271 Node* len = argument(2);
5272 Node* inv = argument(3);
5273 Node* m = argument(5);
5274
5275 const Type* a_type = a->Value(&_gvn);
5276 const TypeAryPtr* top_a = a_type->isa_aryptr();
5277 const Type* n_type = a->Value(&_gvn);
5278 const TypeAryPtr* top_n = n_type->isa_aryptr();
5279 const Type* m_type = a->Value(&_gvn);
5280 const TypeAryPtr* top_m = m_type->isa_aryptr();
5281 if (top_a == NULL || top_a->klass() == NULL ||
5282 top_n == NULL || top_n->klass() == NULL ||
5283 top_m == NULL || top_m->klass() == NULL) {
5284 // failed array check
5285 return false;
5286 }
5287
5288 BasicType a_elem = top_a->klass()->as_array_klass()->element_type()->basic_type();
5289 BasicType n_elem = top_n->klass()->as_array_klass()->element_type()->basic_type();
5290 BasicType m_elem = top_m->klass()->as_array_klass()->element_type()->basic_type();
5291 if (a_elem != T_INT || n_elem != T_INT || m_elem != T_INT) {
5292 return false;
5293 }
5294
5295 // Make the call
5296 {
5297 Node* a_start = array_element_address(a, intcon(0), a_elem);
5298 Node* n_start = array_element_address(n, intcon(0), n_elem);
5299 Node* m_start = array_element_address(m, intcon(0), m_elem);
5300
5301 Node* call = make_runtime_call(RC_LEAF,
5302 OptoRuntime::montgomerySquare_Type(),
5303 stubAddr, stubName, TypePtr::BOTTOM,
5304 a_start, n_start, len, inv, top(),
5305 m_start);
5306 set_result(m);
5307 }
5308
5309 return true;
5310 }
5311
5312 bool LibraryCallKit::inline_bigIntegerShift(bool isRightShift) {
5313 address stubAddr = nullptr;
5314 const char* stubName = nullptr;
5315
5316 stubAddr = isRightShift? StubRoutines::bigIntegerRightShift(): StubRoutines::bigIntegerLeftShift();
5317 if (stubAddr == nullptr) {
5318 return false; // Intrinsic's stub is not implemented on this platform
5319 }
5320
5321 stubName = isRightShift? "bigIntegerRightShiftWorker" : "bigIntegerLeftShiftWorker";
5322
5323 assert(callee()->signature()->size() == 5, "expected 5 arguments");
5324
5325 Node* newArr = argument(0);
5326 Node* oldArr = argument(1);
5327 Node* newIdx = argument(2);
5328 Node* shiftCount = argument(3);
5329 Node* numIter = argument(4);
5330
5331 const Type* newArr_type = newArr->Value(&_gvn);
5332 const TypeAryPtr* top_newArr = newArr_type->isa_aryptr();
5333 const Type* oldArr_type = oldArr->Value(&_gvn);
5334 const TypeAryPtr* top_oldArr = oldArr_type->isa_aryptr();
5335 if (top_newArr == NULL || top_newArr->klass() == NULL || top_oldArr == NULL
5336 || top_oldArr->klass() == NULL) {
5337 return false;
5338 }
5339
5340 BasicType newArr_elem = top_newArr->klass()->as_array_klass()->element_type()->basic_type();
5341 BasicType oldArr_elem = top_oldArr->klass()->as_array_klass()->element_type()->basic_type();
5342 if (newArr_elem != T_INT || oldArr_elem != T_INT) {
5343 return false;
5344 }
5345
5346 // Make the call
5347 {
5348 Node* newArr_start = array_element_address(newArr, intcon(0), newArr_elem);
5349 Node* oldArr_start = array_element_address(oldArr, intcon(0), oldArr_elem);
5350
5351 Node* call = make_runtime_call(RC_LEAF,
5352 OptoRuntime::bigIntegerShift_Type(),
5353 stubAddr,
5354 stubName,
5355 TypePtr::BOTTOM,
5356 newArr_start,
5357 oldArr_start,
5358 newIdx,
5359 shiftCount,
5360 numIter);
5361 }
5362
5363 return true;
5364 }
5365
5366 //-------------inline_vectorizedMismatch------------------------------
5367 bool LibraryCallKit::inline_vectorizedMismatch() {
5368 assert(UseVectorizedMismatchIntrinsic, "not implemented on this platform");
5369
5370 assert(callee()->signature()->size() == 8, "vectorizedMismatch has 6 parameters");
5371 Node* obja = argument(0); // Object
5372 Node* aoffset = argument(1); // long
5373 Node* objb = argument(3); // Object
5374 Node* boffset = argument(4); // long
5375 Node* length = argument(6); // int
5376 Node* scale = argument(7); // int
5377
5378 const TypeAryPtr* obja_t = _gvn.type(obja)->isa_aryptr();
5379 const TypeAryPtr* objb_t = _gvn.type(objb)->isa_aryptr();
5380 if (obja_t == NULL || obja_t->klass() == NULL ||
5381 objb_t == NULL || objb_t->klass() == NULL ||
5382 scale == top()) {
5383 return false; // failed input validation
5384 }
5385
5386 Node* obja_adr = make_unsafe_address(obja, aoffset);
5387 Node* objb_adr = make_unsafe_address(objb, boffset);
5388
5389 // Partial inlining handling for inputs smaller than ArrayOperationPartialInlineSize bytes in size.
5390 //
5391 // inline_limit = ArrayOperationPartialInlineSize / element_size;
5392 // if (length <= inline_limit) {
5393 // inline_path:
5394 // vmask = VectorMaskGen length
5395 // vload1 = LoadVectorMasked obja, vmask
5396 // vload2 = LoadVectorMasked objb, vmask
5397 // result1 = VectorCmpMasked vload1, vload2, vmask
5398 // } else {
5399 // call_stub_path:
5400 // result2 = call vectorizedMismatch_stub(obja, objb, length, scale)
5401 // }
5402 // exit_block:
5403 // return Phi(result1, result2);
5404 //
5405 enum { inline_path = 1, // input is small enough to process it all at once
5406 stub_path = 2, // input is too large; call into the VM
5407 PATH_LIMIT = 3
5408 };
5409
5410 Node* exit_block = new RegionNode(PATH_LIMIT);
5411 Node* result_phi = new PhiNode(exit_block, TypeInt::INT);
5412 Node* memory_phi = new PhiNode(exit_block, Type::MEMORY, TypePtr::BOTTOM);
5413
5414 Node* call_stub_path = control();
5415
5416 BasicType elem_bt = T_ILLEGAL;
5417
5418 const TypeInt* scale_t = _gvn.type(scale)->is_int();
5419 if (scale_t->is_con()) {
5420 switch (scale_t->get_con()) {
5421 case 0: elem_bt = T_BYTE; break;
5422 case 1: elem_bt = T_SHORT; break;
5423 case 2: elem_bt = T_INT; break;
5424 case 3: elem_bt = T_LONG; break;
5425
5426 default: elem_bt = T_ILLEGAL; break; // not supported
5427 }
5428 }
5429
5430 int inline_limit = 0;
5431 bool do_partial_inline = false;
5432
5433 if (elem_bt != T_ILLEGAL && ArrayOperationPartialInlineSize > 0) {
5434 inline_limit = ArrayOperationPartialInlineSize / type2aelembytes(elem_bt);
5435 do_partial_inline = inline_limit >= 16;
5436 }
5437
5438 if (do_partial_inline) {
5439 assert(elem_bt != T_ILLEGAL, "sanity");
5440
5441 if (Matcher::match_rule_supported_vector(Op_VectorMaskGen, inline_limit, elem_bt) &&
5442 Matcher::match_rule_supported_vector(Op_LoadVectorMasked, inline_limit, elem_bt) &&
5443 Matcher::match_rule_supported_vector(Op_VectorCmpMasked, inline_limit, elem_bt)) {
5444
5445 const TypeVect* vt = TypeVect::make(elem_bt, inline_limit);
5446 Node* cmp_length = _gvn.transform(new CmpINode(length, intcon(inline_limit)));
5447 Node* bol_gt = _gvn.transform(new BoolNode(cmp_length, BoolTest::gt));
5448
5449 call_stub_path = generate_guard(bol_gt, nullptr, PROB_MIN);
5450
5451 if (!stopped()) {
5452 Node* casted_length = _gvn.transform(new CastIINode(control(), length, TypeInt::make(0, inline_limit, Type::WidenMin)));
5453
5454 const TypePtr* obja_adr_t = _gvn.type(obja_adr)->isa_ptr();
5455 const TypePtr* objb_adr_t = _gvn.type(objb_adr)->isa_ptr();
5456 Node* obja_adr_mem = memory(C->get_alias_index(obja_adr_t));
5457 Node* objb_adr_mem = memory(C->get_alias_index(objb_adr_t));
5458
5459 Node* vmask = _gvn.transform(new VectorMaskGenNode(ConvI2X(casted_length), TypeVect::VECTMASK, elem_bt));
5460 Node* vload_obja = _gvn.transform(new LoadVectorMaskedNode(control(), obja_adr_mem, obja_adr, obja_adr_t, vt, vmask));
5461 Node* vload_objb = _gvn.transform(new LoadVectorMaskedNode(control(), objb_adr_mem, objb_adr, objb_adr_t, vt, vmask));
5462 Node* result = _gvn.transform(new VectorCmpMaskedNode(vload_obja, vload_objb, vmask, TypeInt::INT));
5463
5464 exit_block->init_req(inline_path, control());
5465 memory_phi->init_req(inline_path, map()->memory());
5466 result_phi->init_req(inline_path, result);
5467
5468 C->set_max_vector_size(MAX2((uint)ArrayOperationPartialInlineSize, C->max_vector_size()));
5469 clear_upper_avx();
5470 }
5471 }
5472 }
5473
5474 if (call_stub_path != nullptr) {
5475 set_control(call_stub_path);
5476
5477 Node* call = make_runtime_call(RC_LEAF,
5478 OptoRuntime::vectorizedMismatch_Type(),
5479 StubRoutines::vectorizedMismatch(), "vectorizedMismatch", TypePtr::BOTTOM,
5480 obja_adr, objb_adr, length, scale);
5481
5482 exit_block->init_req(stub_path, control());
5483 memory_phi->init_req(stub_path, map()->memory());
5484 result_phi->init_req(stub_path, _gvn.transform(new ProjNode(call, TypeFunc::Parms)));
5485 }
5486
5487 exit_block = _gvn.transform(exit_block);
5488 memory_phi = _gvn.transform(memory_phi);
5489 result_phi = _gvn.transform(result_phi);
5490
5491 set_control(exit_block);
5492 set_all_memory(memory_phi);
5493 set_result(result_phi);
5494
5495 return true;
5496 }
5497
5498 /**
5499 * Calculate CRC32 for byte.
5500 * int java.util.zip.CRC32.update(int crc, int b)
5501 */
5502 bool LibraryCallKit::inline_updateCRC32() {
5503 assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support");
5504 assert(callee()->signature()->size() == 2, "update has 2 parameters");
5505 // no receiver since it is static method
5506 Node* crc = argument(0); // type: int
5507 Node* b = argument(1); // type: int
5508
5509 /*
5510 * int c = ~ crc;
5511 * b = timesXtoThe32[(b ^ c) & 0xFF];
5512 * b = b ^ (c >>> 8);
5513 * crc = ~b;
5514 */
5515
5516 Node* M1 = intcon(-1);
5517 crc = _gvn.transform(new XorINode(crc, M1));
5518 Node* result = _gvn.transform(new XorINode(crc, b));
5519 result = _gvn.transform(new AndINode(result, intcon(0xFF)));
5520
5521 Node* base = makecon(TypeRawPtr::make(StubRoutines::crc_table_addr()));
5522 Node* offset = _gvn.transform(new LShiftINode(result, intcon(0x2)));
5523 Node* adr = basic_plus_adr(top(), base, ConvI2X(offset));
5524 result = make_load(control(), adr, TypeInt::INT, T_INT, MemNode::unordered);
5525
5526 crc = _gvn.transform(new URShiftINode(crc, intcon(8)));
5527 result = _gvn.transform(new XorINode(crc, result));
5528 result = _gvn.transform(new XorINode(result, M1));
5529 set_result(result);
5530 return true;
5531 }
5532
5533 /**
5534 * Calculate CRC32 for byte[] array.
5535 * int java.util.zip.CRC32.updateBytes(int crc, byte[] buf, int off, int len)
5536 */
5537 bool LibraryCallKit::inline_updateBytesCRC32() {
5538 assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support");
5539 assert(callee()->signature()->size() == 4, "updateBytes has 4 parameters");
5540 // no receiver since it is static method
5541 Node* crc = argument(0); // type: int
5542 Node* src = argument(1); // type: oop
5543 Node* offset = argument(2); // type: int
5544 Node* length = argument(3); // type: int
5545
5546 const Type* src_type = src->Value(&_gvn);
5547 const TypeAryPtr* top_src = src_type->isa_aryptr();
5548 if (top_src == NULL || top_src->klass() == NULL) {
5549 // failed array check
5550 return false;
5551 }
5552
5553 // Figure out the size and type of the elements we will be copying.
5554 BasicType src_elem = top_src->klass()->as_array_klass()->element_type()->basic_type();
5555 if (src_elem != T_BYTE) {
5556 return false;
5557 }
5558
5559 // 'src_start' points to src array + scaled offset
5560 src = must_be_not_null(src, true);
5561 Node* src_start = array_element_address(src, offset, src_elem);
5562
5563 // We assume that range check is done by caller.
5564 // TODO: generate range check (offset+length < src.length) in debug VM.
5565
5566 // Call the stub.
5567 address stubAddr = StubRoutines::updateBytesCRC32();
5568 const char *stubName = "updateBytesCRC32";
5569
5570 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::updateBytesCRC32_Type(),
5571 stubAddr, stubName, TypePtr::BOTTOM,
5572 crc, src_start, length);
5573 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
5574 set_result(result);
5575 return true;
5576 }
5577
5578 /**
5579 * Calculate CRC32 for ByteBuffer.
5580 * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
5581 */
5582 bool LibraryCallKit::inline_updateByteBufferCRC32() {
5583 assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support");
5584 assert(callee()->signature()->size() == 5, "updateByteBuffer has 4 parameters and one is long");
5585 // no receiver since it is static method
5586 Node* crc = argument(0); // type: int
5587 Node* src = argument(1); // type: long
5588 Node* offset = argument(3); // type: int
5589 Node* length = argument(4); // type: int
5590
5591 src = ConvL2X(src); // adjust Java long to machine word
5592 Node* base = _gvn.transform(new CastX2PNode(src));
5593 offset = ConvI2X(offset);
5594
5595 // 'src_start' points to src array + scaled offset
5596 Node* src_start = basic_plus_adr(top(), base, offset);
5597
5598 // Call the stub.
5599 address stubAddr = StubRoutines::updateBytesCRC32();
5600 const char *stubName = "updateBytesCRC32";
5601
5602 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::updateBytesCRC32_Type(),
5603 stubAddr, stubName, TypePtr::BOTTOM,
5604 crc, src_start, length);
5605 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
5606 set_result(result);
5607 return true;
5608 }
5609
5610 //------------------------------get_table_from_crc32c_class-----------------------
5611 Node * LibraryCallKit::get_table_from_crc32c_class(ciInstanceKlass *crc32c_class) {
5612 Node* table = load_field_from_object(nullptr, "byteTable", "[I", /*decorators*/ IN_HEAP, /*is_static*/ true, crc32c_class);
5613 assert (table != nullptr, "wrong version of java.util.zip.CRC32C");
5614
5615 return table;
5616 }
5617
5618 //------------------------------inline_updateBytesCRC32C-----------------------
5619 //
5620 // Calculate CRC32C for byte[] array.
5621 // int java.util.zip.CRC32C.updateBytes(int crc, byte[] buf, int off, int end)
5622 //
5623 bool LibraryCallKit::inline_updateBytesCRC32C() {
5624 assert(UseCRC32CIntrinsics, "need CRC32C instruction support");
5625 assert(callee()->signature()->size() == 4, "updateBytes has 4 parameters");
5626 assert(callee()->holder()->is_loaded(), "CRC32C class must be loaded");
5627 // no receiver since it is a static method
5628 Node* crc = argument(0); // type: int
5629 Node* src = argument(1); // type: oop
5630 Node* offset = argument(2); // type: int
5631 Node* end = argument(3); // type: int
5632
5633 Node* length = _gvn.transform(new SubINode(end, offset));
5634
5635 const Type* src_type = src->Value(&_gvn);
5636 const TypeAryPtr* top_src = src_type->isa_aryptr();
5637 if (top_src == NULL || top_src->klass() == NULL) {
5638 // failed array check
5639 return false;
5640 }
5641
5642 // Figure out the size and type of the elements we will be copying.
5643 BasicType src_elem = top_src->klass()->as_array_klass()->element_type()->basic_type();
5644 if (src_elem != T_BYTE) {
5645 return false;
5646 }
5647
5648 // 'src_start' points to src array + scaled offset
5649 src = must_be_not_null(src, true);
5650 Node* src_start = array_element_address(src, offset, src_elem);
5651
5652 // static final int[] byteTable in class CRC32C
5653 Node* table = get_table_from_crc32c_class(callee()->holder());
5654 table = must_be_not_null(table, true);
5655 Node* table_start = array_element_address(table, intcon(0), T_INT);
5656
5657 // We assume that range check is done by caller.
5658 // TODO: generate range check (offset+length < src.length) in debug VM.
5659
5660 // Call the stub.
5661 address stubAddr = StubRoutines::updateBytesCRC32C();
5662 const char *stubName = "updateBytesCRC32C";
5663
5664 Node* call = make_runtime_call(RC_LEAF, OptoRuntime::updateBytesCRC32C_Type(),
5665 stubAddr, stubName, TypePtr::BOTTOM,
5666 crc, src_start, length, table_start);
5667 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
5668 set_result(result);
5669 return true;
5670 }
5671
5672 //------------------------------inline_updateDirectByteBufferCRC32C-----------------------
5673 //
5674 // Calculate CRC32C for DirectByteBuffer.
5675 // int java.util.zip.CRC32C.updateDirectByteBuffer(int crc, long buf, int off, int end)
5676 //
5677 bool LibraryCallKit::inline_updateDirectByteBufferCRC32C() {
5678 assert(UseCRC32CIntrinsics, "need CRC32C instruction support");
5679 assert(callee()->signature()->size() == 5, "updateDirectByteBuffer has 4 parameters and one is long");
5680 assert(callee()->holder()->is_loaded(), "CRC32C class must be loaded");
5681 // no receiver since it is a static method
5682 Node* crc = argument(0); // type: int
5683 Node* src = argument(1); // type: long
5684 Node* offset = argument(3); // type: int
5685 Node* end = argument(4); // type: int
5686
5687 Node* length = _gvn.transform(new SubINode(end, offset));
5688
5689 src = ConvL2X(src); // adjust Java long to machine word
5690 Node* base = _gvn.transform(new CastX2PNode(src));
5691 offset = ConvI2X(offset);
5692
5693 // 'src_start' points to src array + scaled offset
5694 Node* src_start = basic_plus_adr(top(), base, offset);
5695
5696 // static final int[] byteTable in class CRC32C
5697 Node* table = get_table_from_crc32c_class(callee()->holder());
5698 table = must_be_not_null(table, true);
5699 Node* table_start = array_element_address(table, intcon(0), T_INT);
5700
5701 // Call the stub.
5702 address stubAddr = StubRoutines::updateBytesCRC32C();
5703 const char *stubName = "updateBytesCRC32C";
5704
5705 Node* call = make_runtime_call(RC_LEAF, OptoRuntime::updateBytesCRC32C_Type(),
5706 stubAddr, stubName, TypePtr::BOTTOM,
5707 crc, src_start, length, table_start);
5708 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
5709 set_result(result);
5710 return true;
5711 }
5712
5713 //------------------------------inline_updateBytesAdler32----------------------
5714 //
5715 // Calculate Adler32 checksum for byte[] array.
5716 // int java.util.zip.Adler32.updateBytes(int crc, byte[] buf, int off, int len)
5717 //
5718 bool LibraryCallKit::inline_updateBytesAdler32() {
5719 assert(UseAdler32Intrinsics, "Adler32 Instrinsic support need"); // check if we actually need to check this flag or check a different one
5720 assert(callee()->signature()->size() == 4, "updateBytes has 4 parameters");
5721 assert(callee()->holder()->is_loaded(), "Adler32 class must be loaded");
5722 // no receiver since it is static method
5723 Node* crc = argument(0); // type: int
5724 Node* src = argument(1); // type: oop
5725 Node* offset = argument(2); // type: int
5726 Node* length = argument(3); // type: int
5727
5728 const Type* src_type = src->Value(&_gvn);
5729 const TypeAryPtr* top_src = src_type->isa_aryptr();
5730 if (top_src == NULL || top_src->klass() == NULL) {
5731 // failed array check
5732 return false;
5733 }
5734
5735 // Figure out the size and type of the elements we will be copying.
5736 BasicType src_elem = top_src->klass()->as_array_klass()->element_type()->basic_type();
5737 if (src_elem != T_BYTE) {
5738 return false;
5739 }
5740
5741 // 'src_start' points to src array + scaled offset
5742 Node* src_start = array_element_address(src, offset, src_elem);
5743
5744 // We assume that range check is done by caller.
5745 // TODO: generate range check (offset+length < src.length) in debug VM.
5746
5747 // Call the stub.
5748 address stubAddr = StubRoutines::updateBytesAdler32();
5749 const char *stubName = "updateBytesAdler32";
5750
5751 Node* call = make_runtime_call(RC_LEAF, OptoRuntime::updateBytesAdler32_Type(),
5752 stubAddr, stubName, TypePtr::BOTTOM,
5753 crc, src_start, length);
5754 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
5755 set_result(result);
5756 return true;
5757 }
5758
5759 //------------------------------inline_updateByteBufferAdler32---------------
5760 //
5761 // Calculate Adler32 checksum for DirectByteBuffer.
5762 // int java.util.zip.Adler32.updateByteBuffer(int crc, long buf, int off, int len)
5763 //
5764 bool LibraryCallKit::inline_updateByteBufferAdler32() {
5765 assert(UseAdler32Intrinsics, "Adler32 Instrinsic support need"); // check if we actually need to check this flag or check a different one
5766 assert(callee()->signature()->size() == 5, "updateByteBuffer has 4 parameters and one is long");
5767 assert(callee()->holder()->is_loaded(), "Adler32 class must be loaded");
5768 // no receiver since it is static method
5769 Node* crc = argument(0); // type: int
5770 Node* src = argument(1); // type: long
5771 Node* offset = argument(3); // type: int
5772 Node* length = argument(4); // type: int
5773
5774 src = ConvL2X(src); // adjust Java long to machine word
5775 Node* base = _gvn.transform(new CastX2PNode(src));
5776 offset = ConvI2X(offset);
5777
5778 // 'src_start' points to src array + scaled offset
5779 Node* src_start = basic_plus_adr(top(), base, offset);
5780
5781 // Call the stub.
5782 address stubAddr = StubRoutines::updateBytesAdler32();
5783 const char *stubName = "updateBytesAdler32";
5784
5785 Node* call = make_runtime_call(RC_LEAF, OptoRuntime::updateBytesAdler32_Type(),
5786 stubAddr, stubName, TypePtr::BOTTOM,
5787 crc, src_start, length);
5788
5789 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
5790 set_result(result);
5791 return true;
5792 }
5793
5794 //----------------------------inline_reference_get----------------------------
5795 // public T java.lang.ref.Reference.get();
5796 bool LibraryCallKit::inline_reference_get() {
5797 const int referent_offset = java_lang_ref_Reference::referent_offset();
5798
5799 // Get the argument:
5800 Node* reference_obj = null_check_receiver();
5801 if (stopped()) return true;
5802
5803 DecoratorSet decorators = IN_HEAP | ON_WEAK_OOP_REF;
5804 Node* result = load_field_from_object(reference_obj, "referent", "Ljava/lang/Object;",
5805 decorators, /*is_static*/ false, nullptr);
5806 if (result == nullptr) return false;
5807
5808 // Add memory barrier to prevent commoning reads from this field
5809 // across safepoint since GC can change its value.
5810 insert_mem_bar(Op_MemBarCPUOrder);
5811
5812 set_result(result);
5813 return true;
5814 }
5815
5816 //----------------------------inline_reference_refersTo0----------------------------
5817 // bool java.lang.ref.Reference.refersTo0();
5818 // bool java.lang.ref.PhantomReference.refersTo0();
5819 bool LibraryCallKit::inline_reference_refersTo0(bool is_phantom) {
5820 // Get arguments:
5821 Node* reference_obj = null_check_receiver();
5822 Node* other_obj = argument(1);
5823 if (stopped()) return true;
5824
5825 DecoratorSet decorators = IN_HEAP | AS_NO_KEEPALIVE;
5826 decorators |= (is_phantom ? ON_PHANTOM_OOP_REF : ON_WEAK_OOP_REF);
5827 Node* referent = load_field_from_object(reference_obj, "referent", "Ljava/lang/Object;",
5828 decorators, /*is_static*/ false, nullptr);
5829 if (referent == nullptr) return false;
5830
5831 // Add memory barrier to prevent commoning reads from this field
5832 // across safepoint since GC can change its value.
5833 insert_mem_bar(Op_MemBarCPUOrder);
5834
5835 Node* cmp = _gvn.transform(new CmpPNode(referent, other_obj));
5836 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
5837 IfNode* if_node = create_and_map_if(control(), bol, PROB_FAIR, COUNT_UNKNOWN);
5838
5839 RegionNode* region = new RegionNode(3);
5840 PhiNode* phi = new PhiNode(region, TypeInt::BOOL);
5841
5842 Node* if_true = _gvn.transform(new IfTrueNode(if_node));
5843 region->init_req(1, if_true);
5844 phi->init_req(1, intcon(1));
5845
5846 Node* if_false = _gvn.transform(new IfFalseNode(if_node));
5847 region->init_req(2, if_false);
5848 phi->init_req(2, intcon(0));
5849
5850 set_control(_gvn.transform(region));
5851 record_for_igvn(region);
5852 set_result(_gvn.transform(phi));
5853 return true;
5854 }
5855
5856
5857 Node* LibraryCallKit::load_field_from_object(Node* fromObj, const char* fieldName, const char* fieldTypeString,
5858 DecoratorSet decorators = IN_HEAP, bool is_static = false,
5859 ciInstanceKlass* fromKls = NULL) {
5860 if (fromKls == nullptr) {
5861 const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();
5862 assert(tinst != nullptr, "obj is null");
5863 assert(tinst->klass()->is_loaded(), "obj is not loaded");
5864 fromKls = tinst->klass()->as_instance_klass();
5865 } else {
5866 assert(is_static, "only for static field access");
5867 }
5868 ciField* field = fromKls->get_field_by_name(ciSymbol::make(fieldName),
5869 ciSymbol::make(fieldTypeString),
5870 is_static);
5871
5872 assert (field != NULL, "undefined field");
5873 if (field == nullptr) return (Node *) nullptr;
5874
5875 if (is_static) {
5876 const TypeInstPtr* tip = TypeInstPtr::make(fromKls->java_mirror());
5877 fromObj = makecon(tip);
5878 }
5879
5880 // Next code copied from Parse::do_get_xxx():
5881
5882 // Compute address and memory type.
5883 int offset = field->offset_in_bytes();
5884 bool is_vol = field->is_volatile();
5885 ciType* field_klass = field->type();
5886 assert(field_klass->is_loaded(), "should be loaded");
5887 const TypePtr* adr_type = C->alias_type(field)->adr_type();
5888 Node *adr = basic_plus_adr(fromObj, fromObj, offset);
5889 BasicType bt = field->layout_type();
5890
5891 // Build the resultant type of the load
5892 const Type *type;
5893 if (bt == T_OBJECT) {
5894 type = TypeOopPtr::make_from_klass(field_klass->as_klass());
5895 } else {
5896 type = Type::get_const_basic_type(bt);
5897 }
5898
5899 if (is_vol) {
5900 decorators |= MO_SEQ_CST;
5901 }
5902
5903 return access_load_at(fromObj, adr, adr_type, type, bt, decorators);
5904 }
5905
5906 Node * LibraryCallKit::field_address_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString,
5907 bool is_exact = true, bool is_static = false,
5908 ciInstanceKlass * fromKls = NULL) {
5909 if (fromKls == nullptr) {
5910 const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();
5911 assert(tinst != nullptr, "obj is null");
5912 assert(tinst->klass()->is_loaded(), "obj is not loaded");
5913 assert(!is_exact || tinst->klass_is_exact(), "klass not exact");
5914 fromKls = tinst->klass()->as_instance_klass();
5915 }
5916 else {
5917 assert(is_static, "only for static field access");
5918 }
5919 ciField* field = fromKls->get_field_by_name(ciSymbol::make(fieldName),
5920 ciSymbol::make(fieldTypeString),
5921 is_static);
5922
5923 assert(field != nullptr, "undefined field");
5924 assert(!field->is_volatile(), "not defined for volatile fields");
5925
5926 if (is_static) {
5927 const TypeInstPtr* tip = TypeInstPtr::make(fromKls->java_mirror());
5928 fromObj = makecon(tip);
5929 }
5930
5931 // Next code copied from Parse::do_get_xxx():
5932
5933 // Compute address and memory type.
5934 int offset = field->offset_in_bytes();
5935 Node *adr = basic_plus_adr(fromObj, fromObj, offset);
5936
5937 return adr;
5938 }
5939
5940 //------------------------------inline_aescrypt_Block-----------------------
5941 bool LibraryCallKit::inline_aescrypt_Block(vmIntrinsics::ID id) {
5942 address stubAddr = nullptr;
5943 const char *stubName;
5944 assert(UseAES, "need AES instruction support");
5945
5946 switch(id) {
5947 case vmIntrinsics::_aescrypt_encryptBlock:
5948 stubAddr = StubRoutines::aescrypt_encryptBlock();
5949 stubName = "aescrypt_encryptBlock";
5950 break;
5951 case vmIntrinsics::_aescrypt_decryptBlock:
5952 stubAddr = StubRoutines::aescrypt_decryptBlock();
5953 stubName = "aescrypt_decryptBlock";
5954 break;
5955 default:
5956 break;
5957 }
5958 if (stubAddr == nullptr) return false;
5959
5960 Node* aescrypt_object = argument(0);
5961 Node* src = argument(1);
5962 Node* src_offset = argument(2);
5963 Node* dest = argument(3);
5964 Node* dest_offset = argument(4);
5965
5966 src = must_be_not_null(src, true);
5967 dest = must_be_not_null(dest, true);
5968
5969 // (1) src and dest are arrays.
5970 const Type* src_type = src->Value(&_gvn);
5971 const Type* dest_type = dest->Value(&_gvn);
5972 const TypeAryPtr* top_src = src_type->isa_aryptr();
5973 const TypeAryPtr* top_dest = dest_type->isa_aryptr();
5974 assert (top_src != NULL && top_src->klass() != NULL && top_dest != NULL && top_dest->klass() != NULL, "args are strange");
5975
5976 // for the quick and dirty code we will skip all the checks.
5977 // we are just trying to get the call to be generated.
5978 Node* src_start = src;
5979 Node* dest_start = dest;
5980 if (src_offset != nullptr || dest_offset != nullptr) {
5981 assert(src_offset != nullptr && dest_offset != nullptr, "");
5982 src_start = array_element_address(src, src_offset, T_BYTE);
5983 dest_start = array_element_address(dest, dest_offset, T_BYTE);
5984 }
5985
5986 // now need to get the start of its expanded key array
5987 // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java
5988 Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object);
5989 if (k_start == nullptr) return false;
5990
5991 // Call the stub.
5992 make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::aescrypt_block_Type(),
5993 stubAddr, stubName, TypePtr::BOTTOM,
5994 src_start, dest_start, k_start);
5995
5996 return true;
5997 }
5998
5999 //------------------------------inline_cipherBlockChaining_AESCrypt-----------------------
6000 bool LibraryCallKit::inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id) {
6001 address stubAddr = nullptr;
6002 const char *stubName = nullptr;
6003
6004 assert(UseAES, "need AES instruction support");
6005
6006 switch(id) {
6007 case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
6008 stubAddr = StubRoutines::cipherBlockChaining_encryptAESCrypt();
6009 stubName = "cipherBlockChaining_encryptAESCrypt";
6010 break;
6011 case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
6012 stubAddr = StubRoutines::cipherBlockChaining_decryptAESCrypt();
6013 stubName = "cipherBlockChaining_decryptAESCrypt";
6014 break;
6015 default:
6016 break;
6017 }
6018 if (stubAddr == nullptr) return false;
6019
6020 Node* cipherBlockChaining_object = argument(0);
6021 Node* src = argument(1);
6022 Node* src_offset = argument(2);
6023 Node* len = argument(3);
6024 Node* dest = argument(4);
6025 Node* dest_offset = argument(5);
6026
6027 src = must_be_not_null(src, false);
6028 dest = must_be_not_null(dest, false);
6029
6030 // (1) src and dest are arrays.
6031 const Type* src_type = src->Value(&_gvn);
6032 const Type* dest_type = dest->Value(&_gvn);
6033 const TypeAryPtr* top_src = src_type->isa_aryptr();
6034 const TypeAryPtr* top_dest = dest_type->isa_aryptr();
6035 assert (top_src != NULL && top_src->klass() != NULL
6036 && top_dest != NULL && top_dest->klass() != NULL, "args are strange");
6037
6038 // checks are the responsibility of the caller
6039 Node* src_start = src;
6040 Node* dest_start = dest;
6041 if (src_offset != nullptr || dest_offset != nullptr) {
6042 assert(src_offset != nullptr && dest_offset != nullptr, "");
6043 src_start = array_element_address(src, src_offset, T_BYTE);
6044 dest_start = array_element_address(dest, dest_offset, T_BYTE);
6045 }
6046
6047 // if we are in this set of code, we "know" the embeddedCipher is an AESCrypt object
6048 // (because of the predicated logic executed earlier).
6049 // so we cast it here safely.
6050 // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java
6051
6052 Node* embeddedCipherObj = load_field_from_object(cipherBlockChaining_object, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;");
6053 if (embeddedCipherObj == nullptr) return false;
6054
6055 // cast it to what we know it will be at runtime
6056 const TypeInstPtr* tinst = _gvn.type(cipherBlockChaining_object)->isa_instptr();
6057 assert(tinst != nullptr, "CBC obj is null");
6058 assert(tinst->klass()->is_loaded(), "CBC obj is not loaded");
6059 ciKlass* klass_AESCrypt = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
6060 assert(klass_AESCrypt->is_loaded(), "predicate checks that this class is loaded");
6061
6062 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
6063 const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_AESCrypt);
6064 const TypeOopPtr* xtype = aklass->as_instance_type();
6065 Node* aescrypt_object = new CheckCastPPNode(control(), embeddedCipherObj, xtype);
6066 aescrypt_object = _gvn.transform(aescrypt_object);
6067
6068 // we need to get the start of the aescrypt_object's expanded key array
6069 Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object);
6070 if (k_start == nullptr) return false;
6071
6072 // similarly, get the start address of the r vector
6073 Node* objRvec = load_field_from_object(cipherBlockChaining_object, "r", "[B");
6074 if (objRvec == nullptr) return false;
6075 Node* r_start = array_element_address(objRvec, intcon(0), T_BYTE);
6076
6077 // Call the stub, passing src_start, dest_start, k_start, r_start and src_len
6078 Node* cbcCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
6079 OptoRuntime::cipherBlockChaining_aescrypt_Type(),
6080 stubAddr, stubName, TypePtr::BOTTOM,
6081 src_start, dest_start, k_start, r_start, len);
6082
6083 // return cipher length (int)
6084 Node* retvalue = _gvn.transform(new ProjNode(cbcCrypt, TypeFunc::Parms));
6085 set_result(retvalue);
6086 return true;
6087 }
6088
6089 //------------------------------inline_electronicCodeBook_AESCrypt-----------------------
6090 bool LibraryCallKit::inline_electronicCodeBook_AESCrypt(vmIntrinsics::ID id) {
6091 address stubAddr = nullptr;
6092 const char *stubName = nullptr;
6093
6094 assert(UseAES, "need AES instruction support");
6095
6096 switch (id) {
6097 case vmIntrinsics::_electronicCodeBook_encryptAESCrypt:
6098 stubAddr = StubRoutines::electronicCodeBook_encryptAESCrypt();
6099 stubName = "electronicCodeBook_encryptAESCrypt";
6100 break;
6101 case vmIntrinsics::_electronicCodeBook_decryptAESCrypt:
6102 stubAddr = StubRoutines::electronicCodeBook_decryptAESCrypt();
6103 stubName = "electronicCodeBook_decryptAESCrypt";
6104 break;
6105 default:
6106 break;
6107 }
6108
6109 if (stubAddr == nullptr) return false;
6110
6111 Node* electronicCodeBook_object = argument(0);
6112 Node* src = argument(1);
6113 Node* src_offset = argument(2);
6114 Node* len = argument(3);
6115 Node* dest = argument(4);
6116 Node* dest_offset = argument(5);
6117
6118 // (1) src and dest are arrays.
6119 const Type* src_type = src->Value(&_gvn);
6120 const Type* dest_type = dest->Value(&_gvn);
6121 const TypeAryPtr* top_src = src_type->isa_aryptr();
6122 const TypeAryPtr* top_dest = dest_type->isa_aryptr();
6123 assert(top_src != NULL && top_src->klass() != NULL
6124 && top_dest != NULL && top_dest->klass() != NULL, "args are strange");
6125
6126 // checks are the responsibility of the caller
6127 Node* src_start = src;
6128 Node* dest_start = dest;
6129 if (src_offset != nullptr || dest_offset != nullptr) {
6130 assert(src_offset != nullptr && dest_offset != nullptr, "");
6131 src_start = array_element_address(src, src_offset, T_BYTE);
6132 dest_start = array_element_address(dest, dest_offset, T_BYTE);
6133 }
6134
6135 // if we are in this set of code, we "know" the embeddedCipher is an AESCrypt object
6136 // (because of the predicated logic executed earlier).
6137 // so we cast it here safely.
6138 // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java
6139
6140 Node* embeddedCipherObj = load_field_from_object(electronicCodeBook_object, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;");
6141 if (embeddedCipherObj == nullptr) return false;
6142
6143 // cast it to what we know it will be at runtime
6144 const TypeInstPtr* tinst = _gvn.type(electronicCodeBook_object)->isa_instptr();
6145 assert(tinst != nullptr, "ECB obj is null");
6146 assert(tinst->klass()->is_loaded(), "ECB obj is not loaded");
6147 ciKlass* klass_AESCrypt = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
6148 assert(klass_AESCrypt->is_loaded(), "predicate checks that this class is loaded");
6149
6150 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
6151 const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_AESCrypt);
6152 const TypeOopPtr* xtype = aklass->as_instance_type();
6153 Node* aescrypt_object = new CheckCastPPNode(control(), embeddedCipherObj, xtype);
6154 aescrypt_object = _gvn.transform(aescrypt_object);
6155
6156 // we need to get the start of the aescrypt_object's expanded key array
6157 Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object);
6158 if (k_start == nullptr) return false;
6159
6160 // Call the stub, passing src_start, dest_start, k_start, r_start and src_len
6161 Node* ecbCrypt = make_runtime_call(RC_LEAF | RC_NO_FP,
6162 OptoRuntime::electronicCodeBook_aescrypt_Type(),
6163 stubAddr, stubName, TypePtr::BOTTOM,
6164 src_start, dest_start, k_start, len);
6165
6166 // return cipher length (int)
6167 Node* retvalue = _gvn.transform(new ProjNode(ecbCrypt, TypeFunc::Parms));
6168 set_result(retvalue);
6169 return true;
6170 }
6171
6172 //------------------------------inline_counterMode_AESCrypt-----------------------
6173 bool LibraryCallKit::inline_counterMode_AESCrypt(vmIntrinsics::ID id) {
6174 assert(UseAES, "need AES instruction support");
6175 if (!UseAESCTRIntrinsics) return false;
6176
6177 address stubAddr = nullptr;
6178 const char *stubName = nullptr;
6179 if (id == vmIntrinsics::_counterMode_AESCrypt) {
6180 stubAddr = StubRoutines::counterMode_AESCrypt();
6181 stubName = "counterMode_AESCrypt";
6182 }
6183 if (stubAddr == nullptr) return false;
6184
6185 Node* counterMode_object = argument(0);
6186 Node* src = argument(1);
6187 Node* src_offset = argument(2);
6188 Node* len = argument(3);
6189 Node* dest = argument(4);
6190 Node* dest_offset = argument(5);
6191
6192 // (1) src and dest are arrays.
6193 const Type* src_type = src->Value(&_gvn);
6194 const Type* dest_type = dest->Value(&_gvn);
6195 const TypeAryPtr* top_src = src_type->isa_aryptr();
6196 const TypeAryPtr* top_dest = dest_type->isa_aryptr();
6197 assert(top_src != NULL && top_src->klass() != NULL &&
6198 top_dest != NULL && top_dest->klass() != NULL, "args are strange");
6199
6200 // checks are the responsibility of the caller
6201 Node* src_start = src;
6202 Node* dest_start = dest;
6203 if (src_offset != nullptr || dest_offset != nullptr) {
6204 assert(src_offset != nullptr && dest_offset != nullptr, "");
6205 src_start = array_element_address(src, src_offset, T_BYTE);
6206 dest_start = array_element_address(dest, dest_offset, T_BYTE);
6207 }
6208
6209 // if we are in this set of code, we "know" the embeddedCipher is an AESCrypt object
6210 // (because of the predicated logic executed earlier).
6211 // so we cast it here safely.
6212 // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java
6213 Node* embeddedCipherObj = load_field_from_object(counterMode_object, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;");
6214 if (embeddedCipherObj == nullptr) return false;
6215 // cast it to what we know it will be at runtime
6216 const TypeInstPtr* tinst = _gvn.type(counterMode_object)->isa_instptr();
6217 assert(tinst != nullptr, "CTR obj is null");
6218 assert(tinst->klass()->is_loaded(), "CTR obj is not loaded");
6219 ciKlass* klass_AESCrypt = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
6220 assert(klass_AESCrypt->is_loaded(), "predicate checks that this class is loaded");
6221 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
6222 const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_AESCrypt);
6223 const TypeOopPtr* xtype = aklass->as_instance_type();
6224 Node* aescrypt_object = new CheckCastPPNode(control(), embeddedCipherObj, xtype);
6225 aescrypt_object = _gvn.transform(aescrypt_object);
6226 // we need to get the start of the aescrypt_object's expanded key array
6227 Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object);
6228 if (k_start == nullptr) return false;
6229 // similarly, get the start address of the r vector
6230 Node* obj_counter = load_field_from_object(counterMode_object, "counter", "[B");
6231 if (obj_counter == nullptr) return false;
6232 Node* cnt_start = array_element_address(obj_counter, intcon(0), T_BYTE);
6233
6234 Node* saved_encCounter = load_field_from_object(counterMode_object, "encryptedCounter", "[B");
6235 if (saved_encCounter == nullptr) return false;
6236 Node* saved_encCounter_start = array_element_address(saved_encCounter, intcon(0), T_BYTE);
6237 Node* used = field_address_from_object(counterMode_object, "used", "I", /*is_exact*/ false);
6238
6239 // Call the stub, passing src_start, dest_start, k_start, r_start and src_len
6240 Node* ctrCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
6241 OptoRuntime::counterMode_aescrypt_Type(),
6242 stubAddr, stubName, TypePtr::BOTTOM,
6243 src_start, dest_start, k_start, cnt_start, len, saved_encCounter_start, used);
6244
6245 // return cipher length (int)
6246 Node* retvalue = _gvn.transform(new ProjNode(ctrCrypt, TypeFunc::Parms));
6247 set_result(retvalue);
6248 return true;
6249 }
6250
6251 //------------------------------get_key_start_from_aescrypt_object-----------------------
6252 Node * LibraryCallKit::get_key_start_from_aescrypt_object(Node *aescrypt_object) {
6253 #if defined(PPC64) || defined(S390)
6254 // MixColumns for decryption can be reduced by preprocessing MixColumns with round keys.
6255 // Intel's extention is based on this optimization and AESCrypt generates round keys by preprocessing MixColumns.
6256 // However, ppc64 vncipher processes MixColumns and requires the same round keys with encryption.
6257 // The ppc64 stubs of encryption and decryption use the same round keys (sessionK[0]).
6258 Node* objSessionK = load_field_from_object(aescrypt_object, "sessionK", "[[I");
6259 assert (objSessionK != nullptr, "wrong version of com.sun.crypto.provider.AESCrypt");
6260 if (objSessionK == nullptr) {
6261 return (Node *) nullptr;
6262 }
6263 Node* objAESCryptKey = load_array_element(objSessionK, intcon(0), TypeAryPtr::OOPS, /* set_ctrl */ true);
6264 #else
6265 Node* objAESCryptKey = load_field_from_object(aescrypt_object, "K", "[I");
6266 #endif // PPC64
6267 assert (objAESCryptKey != nullptr, "wrong version of com.sun.crypto.provider.AESCrypt");
6268 if (objAESCryptKey == nullptr) return (Node *) nullptr;
6269
6270 // now have the array, need to get the start address of the K array
6271 Node* k_start = array_element_address(objAESCryptKey, intcon(0), T_INT);
6272 return k_start;
6273 }
6274
6275 //----------------------------inline_cipherBlockChaining_AESCrypt_predicate----------------------------
6276 // Return node representing slow path of predicate check.
6277 // the pseudo code we want to emulate with this predicate is:
6278 // for encryption:
6279 // if (embeddedCipherObj instanceof AESCrypt) do_intrinsic, else do_javapath
6280 // for decryption:
6281 // if ((embeddedCipherObj instanceof AESCrypt) && (cipher!=plain)) do_intrinsic, else do_javapath
6282 // note cipher==plain is more conservative than the original java code but that's OK
6283 //
6284 Node* LibraryCallKit::inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting) {
6285 // The receiver was checked for null already.
6286 Node* objCBC = argument(0);
6287
6288 Node* src = argument(1);
6289 Node* dest = argument(4);
6290
6291 // Load embeddedCipher field of CipherBlockChaining object.
6292 Node* embeddedCipherObj = load_field_from_object(objCBC, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;");
6293
6294 // get AESCrypt klass for instanceOf check
6295 // AESCrypt might not be loaded yet if some other SymmetricCipher got us to this compile point
6296 // will have same classloader as CipherBlockChaining object
6297 const TypeInstPtr* tinst = _gvn.type(objCBC)->isa_instptr();
6298 assert(tinst != nullptr, "CBCobj is null");
6299 assert(tinst->klass()->is_loaded(), "CBCobj is not loaded");
6300
6301 // we want to do an instanceof comparison against the AESCrypt class
6302 ciKlass* klass_AESCrypt = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
6303 if (!klass_AESCrypt->is_loaded()) {
6304 // if AESCrypt is not even loaded, we never take the intrinsic fast path
6305 Node* ctrl = control();
6306 set_control(top()); // no regular fast path
6307 return ctrl;
6308 }
6309
6310 src = must_be_not_null(src, true);
6311 dest = must_be_not_null(dest, true);
6312
6313 // Resolve oops to stable for CmpP below.
6314 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
6315
6316 Node* instof = gen_instanceof(embeddedCipherObj, makecon(TypeKlassPtr::make(instklass_AESCrypt)));
6317 Node* cmp_instof = _gvn.transform(new CmpINode(instof, intcon(1)));
6318 Node* bool_instof = _gvn.transform(new BoolNode(cmp_instof, BoolTest::ne));
6319
6320 Node* instof_false = generate_guard(bool_instof, nullptr, PROB_MIN);
6321
6322 // for encryption, we are done
6323 if (!decrypting)
6324 return instof_false; // even if it is null
6325
6326 // for decryption, we need to add a further check to avoid
6327 // taking the intrinsic path when cipher and plain are the same
6328 // see the original java code for why.
6329 RegionNode* region = new RegionNode(3);
6330 region->init_req(1, instof_false);
6331
6332 Node* cmp_src_dest = _gvn.transform(new CmpPNode(src, dest));
6333 Node* bool_src_dest = _gvn.transform(new BoolNode(cmp_src_dest, BoolTest::eq));
6334 Node* src_dest_conjoint = generate_guard(bool_src_dest, nullptr, PROB_MIN);
6335 region->init_req(2, src_dest_conjoint);
6336
6337 record_for_igvn(region);
6338 return _gvn.transform(region);
6339 }
6340
6341 //----------------------------inline_electronicCodeBook_AESCrypt_predicate----------------------------
6342 // Return node representing slow path of predicate check.
6343 // the pseudo code we want to emulate with this predicate is:
6344 // for encryption:
6345 // if (embeddedCipherObj instanceof AESCrypt) do_intrinsic, else do_javapath
6346 // for decryption:
6347 // if ((embeddedCipherObj instanceof AESCrypt) && (cipher!=plain)) do_intrinsic, else do_javapath
6348 // note cipher==plain is more conservative than the original java code but that's OK
6349 //
6350 Node* LibraryCallKit::inline_electronicCodeBook_AESCrypt_predicate(bool decrypting) {
6351 // The receiver was checked for null already.
6352 Node* objECB = argument(0);
6353
6354 // Load embeddedCipher field of ElectronicCodeBook object.
6355 Node* embeddedCipherObj = load_field_from_object(objECB, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;");
6356
6357 // get AESCrypt klass for instanceOf check
6358 // AESCrypt might not be loaded yet if some other SymmetricCipher got us to this compile point
6359 // will have same classloader as ElectronicCodeBook object
6360 const TypeInstPtr* tinst = _gvn.type(objECB)->isa_instptr();
6361 assert(tinst != nullptr, "ECBobj is null");
6362 assert(tinst->klass()->is_loaded(), "ECBobj is not loaded");
6363
6364 // we want to do an instanceof comparison against the AESCrypt class
6365 ciKlass* klass_AESCrypt = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
6366 if (!klass_AESCrypt->is_loaded()) {
6367 // if AESCrypt is not even loaded, we never take the intrinsic fast path
6368 Node* ctrl = control();
6369 set_control(top()); // no regular fast path
6370 return ctrl;
6371 }
6372 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
6373
6374 Node* instof = gen_instanceof(embeddedCipherObj, makecon(TypeKlassPtr::make(instklass_AESCrypt)));
6375 Node* cmp_instof = _gvn.transform(new CmpINode(instof, intcon(1)));
6376 Node* bool_instof = _gvn.transform(new BoolNode(cmp_instof, BoolTest::ne));
6377
6378 Node* instof_false = generate_guard(bool_instof, nullptr, PROB_MIN);
6379
6380 // for encryption, we are done
6381 if (!decrypting)
6382 return instof_false; // even if it is null
6383
6384 // for decryption, we need to add a further check to avoid
6385 // taking the intrinsic path when cipher and plain are the same
6386 // see the original java code for why.
6387 RegionNode* region = new RegionNode(3);
6388 region->init_req(1, instof_false);
6389 Node* src = argument(1);
6390 Node* dest = argument(4);
6391 Node* cmp_src_dest = _gvn.transform(new CmpPNode(src, dest));
6392 Node* bool_src_dest = _gvn.transform(new BoolNode(cmp_src_dest, BoolTest::eq));
6393 Node* src_dest_conjoint = generate_guard(bool_src_dest, nullptr, PROB_MIN);
6394 region->init_req(2, src_dest_conjoint);
6395
6396 record_for_igvn(region);
6397 return _gvn.transform(region);
6398 }
6399
6400 //----------------------------inline_counterMode_AESCrypt_predicate----------------------------
6401 // Return node representing slow path of predicate check.
6402 // the pseudo code we want to emulate with this predicate is:
6403 // for encryption:
6404 // if (embeddedCipherObj instanceof AESCrypt) do_intrinsic, else do_javapath
6405 // for decryption:
6406 // if ((embeddedCipherObj instanceof AESCrypt) && (cipher!=plain)) do_intrinsic, else do_javapath
6407 // note cipher==plain is more conservative than the original java code but that's OK
6408 //
6409
6410 Node* LibraryCallKit::inline_counterMode_AESCrypt_predicate() {
6411 // The receiver was checked for null already.
6412 Node* objCTR = argument(0);
6413
6414 // Load embeddedCipher field of CipherBlockChaining object.
6415 Node* embeddedCipherObj = load_field_from_object(objCTR, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;");
6416
6417 // get AESCrypt klass for instanceOf check
6418 // AESCrypt might not be loaded yet if some other SymmetricCipher got us to this compile point
6419 // will have same classloader as CipherBlockChaining object
6420 const TypeInstPtr* tinst = _gvn.type(objCTR)->isa_instptr();
6421 assert(tinst != nullptr, "CTRobj is null");
6422 assert(tinst->klass()->is_loaded(), "CTRobj is not loaded");
6423
6424 // we want to do an instanceof comparison against the AESCrypt class
6425 ciKlass* klass_AESCrypt = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
6426 if (!klass_AESCrypt->is_loaded()) {
6427 // if AESCrypt is not even loaded, we never take the intrinsic fast path
6428 Node* ctrl = control();
6429 set_control(top()); // no regular fast path
6430 return ctrl;
6431 }
6432
6433 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
6434 Node* instof = gen_instanceof(embeddedCipherObj, makecon(TypeKlassPtr::make(instklass_AESCrypt)));
6435 Node* cmp_instof = _gvn.transform(new CmpINode(instof, intcon(1)));
6436 Node* bool_instof = _gvn.transform(new BoolNode(cmp_instof, BoolTest::ne));
6437 Node* instof_false = generate_guard(bool_instof, nullptr, PROB_MIN);
6438
6439 return instof_false; // even if it is null
6440 }
6441
6442 //------------------------------inline_ghash_processBlocks
6443 bool LibraryCallKit::inline_ghash_processBlocks() {
6444 address stubAddr;
6445 const char *stubName;
6446 assert(UseGHASHIntrinsics, "need GHASH intrinsics support");
6447
6448 stubAddr = StubRoutines::ghash_processBlocks();
6449 stubName = "ghash_processBlocks";
6450
6451 Node* data = argument(0);
6452 Node* offset = argument(1);
6453 Node* len = argument(2);
6454 Node* state = argument(3);
6455 Node* subkeyH = argument(4);
6456
6457 state = must_be_not_null(state, true);
6458 subkeyH = must_be_not_null(subkeyH, true);
6459 data = must_be_not_null(data, true);
6460
6461 Node* state_start = array_element_address(state, intcon(0), T_LONG);
6462 assert(state_start, "state is null");
6463 Node* subkeyH_start = array_element_address(subkeyH, intcon(0), T_LONG);
6464 assert(subkeyH_start, "subkeyH is null");
6465 Node* data_start = array_element_address(data, offset, T_BYTE);
6466 assert(data_start, "data is null");
6467
6468 Node* ghash = make_runtime_call(RC_LEAF|RC_NO_FP,
6469 OptoRuntime::ghash_processBlocks_Type(),
6470 stubAddr, stubName, TypePtr::BOTTOM,
6471 state_start, subkeyH_start, data_start, len);
6472 return true;
6473 }
6474
6475 bool LibraryCallKit::inline_base64_encodeBlock() {
6476 address stubAddr;
6477 const char *stubName;
6478 assert(UseBASE64Intrinsics, "need Base64 intrinsics support");
6479 assert(callee()->signature()->size() == 6, "base64_encodeBlock has 6 parameters");
6480 stubAddr = StubRoutines::base64_encodeBlock();
6481 stubName = "encodeBlock";
6482
6483 if (!stubAddr) return false;
6484 Node* base64obj = argument(0);
6485 Node* src = argument(1);
6486 Node* offset = argument(2);
6487 Node* len = argument(3);
6488 Node* dest = argument(4);
6489 Node* dp = argument(5);
6490 Node* isURL = argument(6);
6491
6492 src = must_be_not_null(src, true);
6493 dest = must_be_not_null(dest, true);
6494
6495 Node* src_start = array_element_address(src, intcon(0), T_BYTE);
6496 assert(src_start, "source array is null");
6497 Node* dest_start = array_element_address(dest, intcon(0), T_BYTE);
6498 assert(dest_start, "destination array is null");
6499
6500 Node* base64 = make_runtime_call(RC_LEAF,
6501 OptoRuntime::base64_encodeBlock_Type(),
6502 stubAddr, stubName, TypePtr::BOTTOM,
6503 src_start, offset, len, dest_start, dp, isURL);
6504 return true;
6505 }
6506
6507 bool LibraryCallKit::inline_base64_decodeBlock() {
6508 address stubAddr;
6509 const char *stubName;
6510 assert(UseBASE64Intrinsics, "need Base64 intrinsics support");
6511 assert(callee()->signature()->size() == 7, "base64_decodeBlock has 7 parameters");
6512 stubAddr = StubRoutines::base64_decodeBlock();
6513 stubName = "decodeBlock";
6514
6515 if (!stubAddr) return false;
6516 Node* base64obj = argument(0);
6517 Node* src = argument(1);
6518 Node* src_offset = argument(2);
6519 Node* len = argument(3);
6520 Node* dest = argument(4);
6521 Node* dest_offset = argument(5);
6522 Node* isURL = argument(6);
6523 Node* isMIME = argument(7);
6524
6525 src = must_be_not_null(src, true);
6526 dest = must_be_not_null(dest, true);
6527
6528 Node* src_start = array_element_address(src, intcon(0), T_BYTE);
6529 assert(src_start, "source array is null");
6530 Node* dest_start = array_element_address(dest, intcon(0), T_BYTE);
6531 assert(dest_start, "destination array is null");
6532
6533 Node* call = make_runtime_call(RC_LEAF,
6534 OptoRuntime::base64_decodeBlock_Type(),
6535 stubAddr, stubName, TypePtr::BOTTOM,
6536 src_start, src_offset, len, dest_start, dest_offset, isURL, isMIME);
6537 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
6538 set_result(result);
6539 return true;
6540 }
6541
6542 //------------------------------inline_digestBase_implCompress-----------------------
6543 //
6544 // Calculate MD5 for single-block byte[] array.
6545 // void com.sun.security.provider.MD5.implCompress(byte[] buf, int ofs)
6546 //
6547 // Calculate SHA (i.e., SHA-1) for single-block byte[] array.
6548 // void com.sun.security.provider.SHA.implCompress(byte[] buf, int ofs)
6549 //
6550 // Calculate SHA2 (i.e., SHA-244 or SHA-256) for single-block byte[] array.
6551 // void com.sun.security.provider.SHA2.implCompress(byte[] buf, int ofs)
6552 //
6553 // Calculate SHA5 (i.e., SHA-384 or SHA-512) for single-block byte[] array.
6554 // void com.sun.security.provider.SHA5.implCompress(byte[] buf, int ofs)
6555 //
6556 // Calculate SHA3 (i.e., SHA3-224 or SHA3-256 or SHA3-384 or SHA3-512) for single-block byte[] array.
6557 // void com.sun.security.provider.SHA3.implCompress(byte[] buf, int ofs)
6558 //
6559 bool LibraryCallKit::inline_digestBase_implCompress(vmIntrinsics::ID id) {
6560 assert(callee()->signature()->size() == 2, "sha_implCompress has 2 parameters");
6561
6562 Node* digestBase_obj = argument(0);
6563 Node* src = argument(1); // type oop
6564 Node* ofs = argument(2); // type int
6565
6566 const Type* src_type = src->Value(&_gvn);
6567 const TypeAryPtr* top_src = src_type->isa_aryptr();
6568 if (top_src == NULL || top_src->klass() == NULL) {
6569 // failed array check
6570 return false;
6571 }
6572 // Figure out the size and type of the elements we will be copying.
6573 BasicType src_elem = top_src->klass()->as_array_klass()->element_type()->basic_type();
6574 if (src_elem != T_BYTE) {
6575 return false;
6576 }
6577 // 'src_start' points to src array + offset
6578 src = must_be_not_null(src, true);
6579 Node* src_start = array_element_address(src, ofs, src_elem);
6580 Node* state = nullptr;
6581 Node* digest_length = nullptr;
6582 address stubAddr;
6583 const char *stubName;
6584
6585 switch(id) {
6586 case vmIntrinsics::_md5_implCompress:
6587 assert(UseMD5Intrinsics, "need MD5 instruction support");
6588 state = get_state_from_digest_object(digestBase_obj, T_INT);
6589 stubAddr = StubRoutines::md5_implCompress();
6590 stubName = "md5_implCompress";
6591 break;
6592 case vmIntrinsics::_sha_implCompress:
6593 assert(UseSHA1Intrinsics, "need SHA1 instruction support");
6594 state = get_state_from_digest_object(digestBase_obj, T_INT);
6595 stubAddr = StubRoutines::sha1_implCompress();
6596 stubName = "sha1_implCompress";
6597 break;
6598 case vmIntrinsics::_sha2_implCompress:
6599 assert(UseSHA256Intrinsics, "need SHA256 instruction support");
6600 state = get_state_from_digest_object(digestBase_obj, T_INT);
6601 stubAddr = StubRoutines::sha256_implCompress();
6602 stubName = "sha256_implCompress";
6603 break;
6604 case vmIntrinsics::_sha5_implCompress:
6605 assert(UseSHA512Intrinsics, "need SHA512 instruction support");
6606 state = get_state_from_digest_object(digestBase_obj, T_LONG);
6607 stubAddr = StubRoutines::sha512_implCompress();
6608 stubName = "sha512_implCompress";
6609 break;
6610 case vmIntrinsics::_sha3_implCompress:
6611 assert(UseSHA3Intrinsics, "need SHA3 instruction support");
6612 state = get_state_from_digest_object(digestBase_obj, T_BYTE);
6613 stubAddr = StubRoutines::sha3_implCompress();
6614 stubName = "sha3_implCompress";
6615 digest_length = get_digest_length_from_digest_object(digestBase_obj);
6616 if (digest_length == nullptr) return false;
6617 break;
6618 default:
6619 fatal_unexpected_iid(id);
6620 return false;
6621 }
6622 if (state == nullptr) return false;
6623
6624 assert(stubAddr != nullptr, "Stub is generated");
6625 if (stubAddr == nullptr) return false;
6626
6627 // Call the stub.
6628 Node* call;
6629 if (digest_length == nullptr) {
6630 call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::digestBase_implCompress_Type(false),
6631 stubAddr, stubName, TypePtr::BOTTOM,
6632 src_start, state);
6633 } else {
6634 call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::digestBase_implCompress_Type(true),
6635 stubAddr, stubName, TypePtr::BOTTOM,
6636 src_start, state, digest_length);
6637 }
6638
6639 return true;
6640 }
6641
6642 //------------------------------inline_digestBase_implCompressMB-----------------------
6643 //
6644 // Calculate MD5/SHA/SHA2/SHA5/SHA3 for multi-block byte[] array.
6645 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit)
6646 //
6647 bool LibraryCallKit::inline_digestBase_implCompressMB(int predicate) {
6648 assert(UseMD5Intrinsics || UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics || UseSHA3Intrinsics,
6649 "need MD5/SHA1/SHA256/SHA512/SHA3 instruction support");
6650 assert((uint)predicate < 5, "sanity");
6651 assert(callee()->signature()->size() == 3, "digestBase_implCompressMB has 3 parameters");
6652
6653 Node* digestBase_obj = argument(0); // The receiver was checked for null already.
6654 Node* src = argument(1); // byte[] array
6655 Node* ofs = argument(2); // type int
6656 Node* limit = argument(3); // type int
6657
6658 const Type* src_type = src->Value(&_gvn);
6659 const TypeAryPtr* top_src = src_type->isa_aryptr();
6660 if (top_src == NULL || top_src->klass() == NULL) {
6661 // failed array check
6662 return false;
6663 }
6664 // Figure out the size and type of the elements we will be copying.
6665 BasicType src_elem = top_src->klass()->as_array_klass()->element_type()->basic_type();
6666 if (src_elem != T_BYTE) {
6667 return false;
6668 }
6669 // 'src_start' points to src array + offset
6670 src = must_be_not_null(src, false);
6671 Node* src_start = array_element_address(src, ofs, src_elem);
6672
6673 const char* klass_digestBase_name = nullptr;
6674 const char* stub_name = nullptr;
6675 address stub_addr = nullptr;
6676 BasicType elem_type = T_INT;
6677
6678 switch (predicate) {
6679 case 0:
6680 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_md5_implCompress)) {
6681 klass_digestBase_name = "sun/security/provider/MD5";
6682 stub_name = "md5_implCompressMB";
6683 stub_addr = StubRoutines::md5_implCompressMB();
6684 }
6685 break;
6686 case 1:
6687 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_sha_implCompress)) {
6688 klass_digestBase_name = "sun/security/provider/SHA";
6689 stub_name = "sha1_implCompressMB";
6690 stub_addr = StubRoutines::sha1_implCompressMB();
6691 }
6692 break;
6693 case 2:
6694 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_sha2_implCompress)) {
6695 klass_digestBase_name = "sun/security/provider/SHA2";
6696 stub_name = "sha256_implCompressMB";
6697 stub_addr = StubRoutines::sha256_implCompressMB();
6698 }
6699 break;
6700 case 3:
6701 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_sha5_implCompress)) {
6702 klass_digestBase_name = "sun/security/provider/SHA5";
6703 stub_name = "sha512_implCompressMB";
6704 stub_addr = StubRoutines::sha512_implCompressMB();
6705 elem_type = T_LONG;
6706 }
6707 break;
6708 case 4:
6709 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_sha3_implCompress)) {
6710 klass_digestBase_name = "sun/security/provider/SHA3";
6711 stub_name = "sha3_implCompressMB";
6712 stub_addr = StubRoutines::sha3_implCompressMB();
6713 elem_type = T_BYTE;
6714 }
6715 break;
6716 default:
6717 fatal("unknown DigestBase intrinsic predicate: %d", predicate);
6718 }
6719 if (klass_digestBase_name != nullptr) {
6720 assert(stub_addr != nullptr, "Stub is generated");
6721 if (stub_addr == nullptr) return false;
6722
6723 // get DigestBase klass to lookup for SHA klass
6724 const TypeInstPtr* tinst = _gvn.type(digestBase_obj)->isa_instptr();
6725 assert(tinst != nullptr, "digestBase_obj is not instance???");
6726 assert(tinst->klass()->is_loaded(), "DigestBase is not loaded");
6727
6728 ciKlass* klass_digestBase = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make(klass_digestBase_name));
6729 assert(klass_digestBase->is_loaded(), "predicate checks that this class is loaded");
6730 ciInstanceKlass* instklass_digestBase = klass_digestBase->as_instance_klass();
6731 return inline_digestBase_implCompressMB(digestBase_obj, instklass_digestBase, elem_type, stub_addr, stub_name, src_start, ofs, limit);
6732 }
6733 return false;
6734 }
6735
6736 //------------------------------inline_digestBase_implCompressMB-----------------------
6737 bool LibraryCallKit::inline_digestBase_implCompressMB(Node* digestBase_obj, ciInstanceKlass* instklass_digestBase,
6738 BasicType elem_type, address stubAddr, const char *stubName,
6739 Node* src_start, Node* ofs, Node* limit) {
6740 const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_digestBase);
6741 const TypeOopPtr* xtype = aklass->as_instance_type();
6742 Node* digest_obj = new CheckCastPPNode(control(), digestBase_obj, xtype);
6743 digest_obj = _gvn.transform(digest_obj);
6744
6745 Node* state = get_state_from_digest_object(digest_obj, elem_type);
6746 if (state == nullptr) return false;
6747
6748 Node* digest_length = nullptr;
6749 if (strcmp("sha3_implCompressMB", stubName) == 0) {
6750 digest_length = get_digest_length_from_digest_object(digest_obj);
6751 if (digest_length == nullptr) return false;
6752 }
6753
6754 // Call the stub.
6755 Node* call;
6756 if (digest_length == nullptr) {
6757 call = make_runtime_call(RC_LEAF|RC_NO_FP,
6758 OptoRuntime::digestBase_implCompressMB_Type(false),
6759 stubAddr, stubName, TypePtr::BOTTOM,
6760 src_start, state, ofs, limit);
6761 } else {
6762 call = make_runtime_call(RC_LEAF|RC_NO_FP,
6763 OptoRuntime::digestBase_implCompressMB_Type(true),
6764 stubAddr, stubName, TypePtr::BOTTOM,
6765 src_start, state, digest_length, ofs, limit);
6766 }
6767
6768 // return ofs (int)
6769 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
6770 set_result(result);
6771
6772 return true;
6773 }
6774
6775 //------------------------------get_state_from_digest_object-----------------------
6776 Node * LibraryCallKit::get_state_from_digest_object(Node *digest_object, BasicType elem_type) {
6777 const char* state_type;
6778 switch (elem_type) {
6779 case T_BYTE: state_type = "[B"; break;
6780 case T_INT: state_type = "[I"; break;
6781 case T_LONG: state_type = "[J"; break;
6782 default: ShouldNotReachHere();
6783 }
6784 Node* digest_state = load_field_from_object(digest_object, "state", state_type);
6785 assert (digest_state != nullptr, "wrong version of sun.security.provider.MD5/SHA/SHA2/SHA5/SHA3");
6786 if (digest_state == nullptr) return (Node *) nullptr;
6787
6788 // now have the array, need to get the start address of the state array
6789 Node* state = array_element_address(digest_state, intcon(0), elem_type);
6790 return state;
6791 }
6792
6793 //------------------------------get_digest_length_from_sha3_object----------------------------------
6794 Node * LibraryCallKit::get_digest_length_from_digest_object(Node *digest_object) {
6795 Node* digest_length = load_field_from_object(digest_object, "digestLength", "I");
6796 assert (digest_length != nullptr, "sanity");
6797 return digest_length;
6798 }
6799
6800 //----------------------------inline_digestBase_implCompressMB_predicate----------------------------
6801 // Return node representing slow path of predicate check.
6802 // the pseudo code we want to emulate with this predicate is:
6803 // if (digestBaseObj instanceof MD5/SHA/SHA2/SHA5/SHA3) do_intrinsic, else do_javapath
6804 //
6805 Node* LibraryCallKit::inline_digestBase_implCompressMB_predicate(int predicate) {
6806 assert(UseMD5Intrinsics || UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics || UseSHA3Intrinsics,
6807 "need MD5/SHA1/SHA256/SHA512/SHA3 instruction support");
6808 assert((uint)predicate < 5, "sanity");
6809
6810 // The receiver was checked for null already.
6811 Node* digestBaseObj = argument(0);
6812
6813 // get DigestBase klass for instanceOf check
6814 const TypeInstPtr* tinst = _gvn.type(digestBaseObj)->isa_instptr();
6815 assert(tinst != nullptr, "digestBaseObj is null");
6816 assert(tinst->klass()->is_loaded(), "DigestBase is not loaded");
6817
6818 const char* klass_name = nullptr;
6819 switch (predicate) {
6820 case 0:
6821 if (UseMD5Intrinsics) {
6822 // we want to do an instanceof comparison against the MD5 class
6823 klass_name = "sun/security/provider/MD5";
6824 }
6825 break;
6826 case 1:
6827 if (UseSHA1Intrinsics) {
6828 // we want to do an instanceof comparison against the SHA class
6829 klass_name = "sun/security/provider/SHA";
6830 }
6831 break;
6832 case 2:
6833 if (UseSHA256Intrinsics) {
6834 // we want to do an instanceof comparison against the SHA2 class
6835 klass_name = "sun/security/provider/SHA2";
6836 }
6837 break;
6838 case 3:
6839 if (UseSHA512Intrinsics) {
6840 // we want to do an instanceof comparison against the SHA5 class
6841 klass_name = "sun/security/provider/SHA5";
6842 }
6843 break;
6844 case 4:
6845 if (UseSHA3Intrinsics) {
6846 // we want to do an instanceof comparison against the SHA3 class
6847 klass_name = "sun/security/provider/SHA3";
6848 }
6849 break;
6850 default:
6851 fatal("unknown SHA intrinsic predicate: %d", predicate);
6852 }
6853
6854 ciKlass* klass = nullptr;
6855 if (klass_name != nullptr) {
6856 klass = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make(klass_name));
6857 }
6858 if ((klass == nullptr) || !klass->is_loaded()) {
6859 // if none of MD5/SHA/SHA2/SHA5 is loaded, we never take the intrinsic fast path
6860 Node* ctrl = control();
6861 set_control(top()); // no intrinsic path
6862 return ctrl;
6863 }
6864 ciInstanceKlass* instklass = klass->as_instance_klass();
6865
6866 Node* instof = gen_instanceof(digestBaseObj, makecon(TypeKlassPtr::make(instklass)));
6867 Node* cmp_instof = _gvn.transform(new CmpINode(instof, intcon(1)));
6868 Node* bool_instof = _gvn.transform(new BoolNode(cmp_instof, BoolTest::ne));
6869 Node* instof_false = generate_guard(bool_instof, nullptr, PROB_MIN);
6870
6871 return instof_false; // even if it is null
6872 }
6873
6874 //-------------inline_fma-----------------------------------
6875 bool LibraryCallKit::inline_fma(vmIntrinsics::ID id) {
6876 Node *a = nullptr;
6877 Node *b = nullptr;
6878 Node *c = nullptr;
6879 Node* result = nullptr;
6880 switch (id) {
6881 case vmIntrinsics::_fmaD:
6882 assert(callee()->signature()->size() == 6, "fma has 3 parameters of size 2 each.");
6883 // no receiver since it is static method
6884 a = round_double_node(argument(0));
6885 b = round_double_node(argument(2));
6886 c = round_double_node(argument(4));
6887 result = _gvn.transform(new FmaDNode(control(), a, b, c));
6888 break;
6889 case vmIntrinsics::_fmaF:
6890 assert(callee()->signature()->size() == 3, "fma has 3 parameters of size 1 each.");
6891 a = argument(0);
6892 b = argument(1);
6893 c = argument(2);
6894 result = _gvn.transform(new FmaFNode(control(), a, b, c));
6895 break;
6896 default:
6897 fatal_unexpected_iid(id); break;
6898 }
6899 set_result(result);
6900 return true;
6901 }
6902
6903 bool LibraryCallKit::inline_character_compare(vmIntrinsics::ID id) {
6904 // argument(0) is receiver
6905 Node* codePoint = argument(1);
6906 Node* n = nullptr;
6907
6908 switch (id) {
6909 case vmIntrinsics::_isDigit :
6910 n = new DigitNode(control(), codePoint);
6911 break;
6912 case vmIntrinsics::_isLowerCase :
6913 n = new LowerCaseNode(control(), codePoint);
6914 break;
6915 case vmIntrinsics::_isUpperCase :
6916 n = new UpperCaseNode(control(), codePoint);
6917 break;
6918 case vmIntrinsics::_isWhitespace :
6919 n = new WhitespaceNode(control(), codePoint);
6920 break;
6921 default:
6922 fatal_unexpected_iid(id);
6923 }
6924
6925 set_result(_gvn.transform(n));
6926 return true;
6927 }
6928
6929 //------------------------------inline_fp_min_max------------------------------
6930 bool LibraryCallKit::inline_fp_min_max(vmIntrinsics::ID id) {
6931 /* DISABLED BECAUSE METHOD DATA ISN'T COLLECTED PER CALL-SITE, SEE JDK-8015416.
6932
6933 // The intrinsic should be used only when the API branches aren't predictable,
6934 // the last one performing the most important comparison. The following heuristic
6935 // uses the branch statistics to eventually bail out if necessary.
6936
6937 ciMethodData *md = callee()->method_data();
6938
6939 if ( md != nullptr && md->is_mature() && md->invocation_count() > 0 ) {
6940 ciCallProfile cp = caller()->call_profile_at_bci(bci());
6941
6942 if ( ((double)cp.count()) / ((double)md->invocation_count()) < 0.8 ) {
6943 // Bail out if the call-site didn't contribute enough to the statistics.
6944 return false;
6945 }
6946
6947 uint taken = 0, not_taken = 0;
6948
6949 for (ciProfileData *p = md->first_data(); md->is_valid(p); p = md->next_data(p)) {
6950 if (p->is_BranchData()) {
6951 taken = ((ciBranchData*)p)->taken();
6952 not_taken = ((ciBranchData*)p)->not_taken();
6953 }
6954 }
6955
6956 double balance = (((double)taken) - ((double)not_taken)) / ((double)md->invocation_count());
6957 balance = balance < 0 ? -balance : balance;
6958 if ( balance > 0.2 ) {
6959 // Bail out if the most important branch is predictable enough.
6960 return false;
6961 }
6962 }
6963 */
6964
6965 Node *a = nullptr;
6966 Node *b = nullptr;
6967 Node *n = nullptr;
6968 switch (id) {
6969 case vmIntrinsics::_maxF:
6970 case vmIntrinsics::_minF:
6971 assert(callee()->signature()->size() == 2, "minF/maxF has 2 parameters of size 1 each.");
6972 a = argument(0);
6973 b = argument(1);
6974 break;
6975 case vmIntrinsics::_maxD:
6976 case vmIntrinsics::_minD:
6977 assert(callee()->signature()->size() == 4, "minD/maxD has 2 parameters of size 2 each.");
6978 a = round_double_node(argument(0));
6979 b = round_double_node(argument(2));
6980 break;
6981 default:
6982 fatal_unexpected_iid(id);
6983 break;
6984 }
6985 switch (id) {
6986 case vmIntrinsics::_maxF: n = new MaxFNode(a, b); break;
6987 case vmIntrinsics::_minF: n = new MinFNode(a, b); break;
6988 case vmIntrinsics::_maxD: n = new MaxDNode(a, b); break;
6989 case vmIntrinsics::_minD: n = new MinDNode(a, b); break;
6990 default: fatal_unexpected_iid(id); break;
6991 }
6992 set_result(_gvn.transform(n));
6993 return true;
6994 }
6995
6996 bool LibraryCallKit::inline_profileBoolean() {
6997 Node* counts = argument(1);
6998 const TypeAryPtr* ary = nullptr;
6999 ciArray* aobj = nullptr;
7000 if (counts->is_Con()
7001 && (ary = counts->bottom_type()->isa_aryptr()) != nullptr
7002 && (aobj = ary->const_oop()->as_array()) != nullptr
7003 && (aobj->length() == 2)) {
7004 // Profile is int[2] where [0] and [1] correspond to false and true value occurrences respectively.
7005 jint false_cnt = aobj->element_value(0).as_int();
7006 jint true_cnt = aobj->element_value(1).as_int();
7007
7008 if (C->log() != nullptr) {
7009 C->log()->elem("observe source='profileBoolean' false='%d' true='%d'",
7010 false_cnt, true_cnt);
7011 }
7012
7013 if (false_cnt + true_cnt == 0) {
7014 // According to profile, never executed.
7015 uncommon_trap_exact(Deoptimization::Reason_intrinsic,
7016 Deoptimization::Action_reinterpret);
7017 return true;
7018 }
7019
7020 // result is a boolean (0 or 1) and its profile (false_cnt & true_cnt)
7021 // is a number of each value occurrences.
7022 Node* result = argument(0);
7023 if (false_cnt == 0 || true_cnt == 0) {
7024 // According to profile, one value has been never seen.
7025 int expected_val = (false_cnt == 0) ? 1 : 0;
7026
7027 Node* cmp = _gvn.transform(new CmpINode(result, intcon(expected_val)));
7028 Node* test = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
7029
7030 IfNode* check = create_and_map_if(control(), test, PROB_ALWAYS, COUNT_UNKNOWN);
7031 Node* fast_path = _gvn.transform(new IfTrueNode(check));
7032 Node* slow_path = _gvn.transform(new IfFalseNode(check));
7033
7034 { // Slow path: uncommon trap for never seen value and then reexecute
7035 // MethodHandleImpl::profileBoolean() to bump the count, so JIT knows
7036 // the value has been seen at least once.
7037 PreserveJVMState pjvms(this);
7038 PreserveReexecuteState preexecs(this);
7039 jvms()->set_should_reexecute(true);
7040
7041 set_control(slow_path);
7042 set_i_o(i_o());
7043
7044 uncommon_trap_exact(Deoptimization::Reason_intrinsic,
7045 Deoptimization::Action_reinterpret);
7046 }
7047 // The guard for never seen value enables sharpening of the result and
7048 // returning a constant. It allows to eliminate branches on the same value
7049 // later on.
7050 set_control(fast_path);
7051 result = intcon(expected_val);
7052 }
7053 // Stop profiling.
7054 // MethodHandleImpl::profileBoolean() has profiling logic in its bytecode.
7055 // By replacing method body with profile data (represented as ProfileBooleanNode
7056 // on IR level) we effectively disable profiling.
7057 // It enables full speed execution once optimized code is generated.
7058 Node* profile = _gvn.transform(new ProfileBooleanNode(result, false_cnt, true_cnt));
7059 C->record_for_igvn(profile);
7060 set_result(profile);
7061 return true;
7062 } else {
7063 // Continue profiling.
7064 // Profile data isn't available at the moment. So, execute method's bytecode version.
7065 // Usually, when GWT LambdaForms are profiled it means that a stand-alone nmethod
7066 // is compiled and counters aren't available since corresponding MethodHandle
7067 // isn't a compile-time constant.
7068 return false;
7069 }
7070 }
7071
7072 bool LibraryCallKit::inline_isCompileConstant() {
7073 Node* n = argument(0);
7074 set_result(n->is_Con() ? intcon(1) : intcon(0));
7075 return true;
7076 }
7077
7078 //------------------------------- inline_getObjectSize --------------------------------------
7079 //
7080 // Calculate the runtime size of the object/array.
7081 // native long sun.instrument.InstrumentationImpl.getObjectSize0(long nativeAgent, Object objectToSize);
7082 //
7083 bool LibraryCallKit::inline_getObjectSize() {
7084 Node* obj = argument(3);
7085 Node* klass_node = load_object_klass(obj);
7086
7087 jint layout_con = Klass::_lh_neutral_value;
7088 Node* layout_val = get_layout_helper(klass_node, layout_con);
7089 int layout_is_con = (layout_val == nullptr);
7090
7091 if (layout_is_con) {
7092 // Layout helper is constant, can figure out things at compile time.
7093
7094 if (Klass::layout_helper_is_instance(layout_con)) {
7095 // Instance case: layout_con contains the size itself.
7096 Node *size = longcon(Klass::layout_helper_size_in_bytes(layout_con));
7097 set_result(size);
7098 } else {
7099 // Array case: size is round(header + element_size*arraylength).
7100 // Since arraylength is different for every array instance, we have to
7101 // compute the whole thing at runtime.
7102
7103 Node* arr_length = load_array_length(obj);
7104
7105 int round_mask = MinObjAlignmentInBytes - 1;
7106 int hsize = Klass::layout_helper_header_size(layout_con);
7107 int eshift = Klass::layout_helper_log2_element_size(layout_con);
7108
7109 if ((round_mask & ~right_n_bits(eshift)) == 0) {
7110 round_mask = 0; // strength-reduce it if it goes away completely
7111 }
7112 assert((hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded");
7113 Node* header_size = intcon(hsize + round_mask);
7114
7115 Node* lengthx = ConvI2X(arr_length);
7116 Node* headerx = ConvI2X(header_size);
7117
7118 Node* abody = lengthx;
7119 if (eshift != 0) {
7120 abody = _gvn.transform(new LShiftXNode(lengthx, intcon(eshift)));
7121 }
7122 Node* size = _gvn.transform( new AddXNode(headerx, abody) );
7123 if (round_mask != 0) {
7124 size = _gvn.transform( new AndXNode(size, MakeConX(~round_mask)) );
7125 }
7126 size = ConvX2L(size);
7127 set_result(size);
7128 }
7129 } else {
7130 // Layout helper is not constant, need to test for array-ness at runtime.
7131
7132 enum { _instance_path = 1, _array_path, PATH_LIMIT };
7133 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
7134 PhiNode* result_val = new PhiNode(result_reg, TypeLong::LONG);
7135 record_for_igvn(result_reg);
7136
7137 Node* array_ctl = generate_array_guard(klass_node, nullptr);
7138 if (array_ctl != nullptr) {
7139 // Array case: size is round(header + element_size*arraylength).
7140 // Since arraylength is different for every array instance, we have to
7141 // compute the whole thing at runtime.
7142
7143 PreserveJVMState pjvms(this);
7144 set_control(array_ctl);
7145 Node* arr_length = load_array_length(obj);
7146
7147 int round_mask = MinObjAlignmentInBytes - 1;
7148 Node* mask = intcon(round_mask);
7149
7150 Node* hss = intcon(Klass::_lh_header_size_shift);
7151 Node* hsm = intcon(Klass::_lh_header_size_mask);
7152 Node* header_size = _gvn.transform(new URShiftINode(layout_val, hss));
7153 header_size = _gvn.transform(new AndINode(header_size, hsm));
7154 header_size = _gvn.transform(new AddINode(header_size, mask));
7155
7156 // There is no need to mask or shift this value.
7157 // The semantics of LShiftINode include an implicit mask to 0x1F.
7158 assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place");
7159 Node* elem_shift = layout_val;
7160
7161 Node* lengthx = ConvI2X(arr_length);
7162 Node* headerx = ConvI2X(header_size);
7163
7164 Node* abody = _gvn.transform(new LShiftXNode(lengthx, elem_shift));
7165 Node* size = _gvn.transform(new AddXNode(headerx, abody));
7166 if (round_mask != 0) {
7167 size = _gvn.transform(new AndXNode(size, MakeConX(~round_mask)));
7168 }
7169 size = ConvX2L(size);
7170
7171 result_reg->init_req(_array_path, control());
7172 result_val->init_req(_array_path, size);
7173 }
7174
7175 if (!stopped()) {
7176 // Instance case: the layout helper gives us instance size almost directly,
7177 // but we need to mask out the _lh_instance_slow_path_bit.
7178 Node* size = ConvI2X(layout_val);
7179 assert((int) Klass::_lh_instance_slow_path_bit < BytesPerLong, "clear bit");
7180 Node* mask = MakeConX(~(intptr_t) right_n_bits(LogBytesPerLong));
7181 size = _gvn.transform(new AndXNode(size, mask));
7182 size = ConvX2L(size);
7183
7184 result_reg->init_req(_instance_path, control());
7185 result_val->init_req(_instance_path, size);
7186 }
7187
7188 set_result(result_reg, result_val);
7189 }
7190
7191 return true;
7192 }
7193
7194 //------------------------------- inline_blackhole --------------------------------------
7195 //
7196 // Make sure all arguments to this node are alive.
7197 // This matches methods that were requested to be blackholed through compile commands.
7198 //
7199 bool LibraryCallKit::inline_blackhole() {
7200 assert(callee()->is_static(), "Should have been checked before: only static methods here");
7201 assert(callee()->is_empty(), "Should have been checked before: only empty methods here");
7202 assert(callee()->holder()->is_loaded(), "Should have been checked before: only methods for loaded classes here");
7203
7204 // Blackhole node pinches only the control, not memory. This allows
7205 // the blackhole to be pinned in the loop that computes blackholed
7206 // values, but have no other side effects, like breaking the optimizations
7207 // across the blackhole.
7208
7209 Node* bh = _gvn.transform(new BlackholeNode(control()));
7210 set_control(_gvn.transform(new ProjNode(bh, TypeFunc::Control)));
7211
7212 // Bind call arguments as blackhole arguments to keep them alive
7213 uint nargs = callee()->arg_size();
7214 for (uint i = 0; i < nargs; i++) {
7215 bh->add_req(argument(i));
7216 }
7217
7218 return true;
7219 }