7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "ci/ciUtilities.inline.hpp"
28 #include "classfile/vmIntrinsics.hpp"
29 #include "compiler/compileBroker.hpp"
30 #include "compiler/compileLog.hpp"
31 #include "gc/shared/barrierSet.hpp"
32 #include "jfr/support/jfrIntrinsics.hpp"
33 #include "memory/resourceArea.hpp"
34 #include "oops/klass.inline.hpp"
35 #include "oops/objArrayKlass.hpp"
36 #include "opto/addnode.hpp"
37 #include "opto/arraycopynode.hpp"
38 #include "opto/c2compiler.hpp"
39 #include "opto/castnode.hpp"
40 #include "opto/cfgnode.hpp"
41 #include "opto/convertnode.hpp"
42 #include "opto/countbitsnode.hpp"
43 #include "opto/idealKit.hpp"
44 #include "opto/library_call.hpp"
45 #include "opto/mathexactnode.hpp"
46 #include "opto/mulnode.hpp"
307 case vmIntrinsics::_indexOfIL: return inline_string_indexOfI(StrIntrinsicNode::LL);
308 case vmIntrinsics::_indexOfIU: return inline_string_indexOfI(StrIntrinsicNode::UU);
309 case vmIntrinsics::_indexOfIUL: return inline_string_indexOfI(StrIntrinsicNode::UL);
310 case vmIntrinsics::_indexOfU_char: return inline_string_indexOfChar(StrIntrinsicNode::U);
311 case vmIntrinsics::_indexOfL_char: return inline_string_indexOfChar(StrIntrinsicNode::L);
312
313 case vmIntrinsics::_equalsL: return inline_string_equals(StrIntrinsicNode::LL);
314
315 case vmIntrinsics::_vectorizedHashCode: return inline_vectorizedHashCode();
316
317 case vmIntrinsics::_toBytesStringU: return inline_string_toBytesU();
318 case vmIntrinsics::_getCharsStringU: return inline_string_getCharsU();
319 case vmIntrinsics::_getCharStringU: return inline_string_char_access(!is_store);
320 case vmIntrinsics::_putCharStringU: return inline_string_char_access( is_store);
321
322 case vmIntrinsics::_compressStringC:
323 case vmIntrinsics::_compressStringB: return inline_string_copy( is_compress);
324 case vmIntrinsics::_inflateStringC:
325 case vmIntrinsics::_inflateStringB: return inline_string_copy(!is_compress);
326
327 case vmIntrinsics::_getReference: return inline_unsafe_access(!is_store, T_OBJECT, Relaxed, false);
328 case vmIntrinsics::_getBoolean: return inline_unsafe_access(!is_store, T_BOOLEAN, Relaxed, false);
329 case vmIntrinsics::_getByte: return inline_unsafe_access(!is_store, T_BYTE, Relaxed, false);
330 case vmIntrinsics::_getShort: return inline_unsafe_access(!is_store, T_SHORT, Relaxed, false);
331 case vmIntrinsics::_getChar: return inline_unsafe_access(!is_store, T_CHAR, Relaxed, false);
332 case vmIntrinsics::_getInt: return inline_unsafe_access(!is_store, T_INT, Relaxed, false);
333 case vmIntrinsics::_getLong: return inline_unsafe_access(!is_store, T_LONG, Relaxed, false);
334 case vmIntrinsics::_getFloat: return inline_unsafe_access(!is_store, T_FLOAT, Relaxed, false);
335 case vmIntrinsics::_getDouble: return inline_unsafe_access(!is_store, T_DOUBLE, Relaxed, false);
336
337 case vmIntrinsics::_putReference: return inline_unsafe_access( is_store, T_OBJECT, Relaxed, false);
338 case vmIntrinsics::_putBoolean: return inline_unsafe_access( is_store, T_BOOLEAN, Relaxed, false);
339 case vmIntrinsics::_putByte: return inline_unsafe_access( is_store, T_BYTE, Relaxed, false);
340 case vmIntrinsics::_putShort: return inline_unsafe_access( is_store, T_SHORT, Relaxed, false);
341 case vmIntrinsics::_putChar: return inline_unsafe_access( is_store, T_CHAR, Relaxed, false);
342 case vmIntrinsics::_putInt: return inline_unsafe_access( is_store, T_INT, Relaxed, false);
343 case vmIntrinsics::_putLong: return inline_unsafe_access( is_store, T_LONG, Relaxed, false);
344 case vmIntrinsics::_putFloat: return inline_unsafe_access( is_store, T_FLOAT, Relaxed, false);
345 case vmIntrinsics::_putDouble: return inline_unsafe_access( is_store, T_DOUBLE, Relaxed, false);
346
347 case vmIntrinsics::_getReferenceVolatile: return inline_unsafe_access(!is_store, T_OBJECT, Volatile, false);
348 case vmIntrinsics::_getBooleanVolatile: return inline_unsafe_access(!is_store, T_BOOLEAN, Volatile, false);
349 case vmIntrinsics::_getByteVolatile: return inline_unsafe_access(!is_store, T_BYTE, Volatile, false);
350 case vmIntrinsics::_getShortVolatile: return inline_unsafe_access(!is_store, T_SHORT, Volatile, false);
351 case vmIntrinsics::_getCharVolatile: return inline_unsafe_access(!is_store, T_CHAR, Volatile, false);
352 case vmIntrinsics::_getIntVolatile: return inline_unsafe_access(!is_store, T_INT, Volatile, false);
353 case vmIntrinsics::_getLongVolatile: return inline_unsafe_access(!is_store, T_LONG, Volatile, false);
354 case vmIntrinsics::_getFloatVolatile: return inline_unsafe_access(!is_store, T_FLOAT, Volatile, false);
355 case vmIntrinsics::_getDoubleVolatile: return inline_unsafe_access(!is_store, T_DOUBLE, Volatile, false);
356
357 case vmIntrinsics::_putReferenceVolatile: return inline_unsafe_access( is_store, T_OBJECT, Volatile, false);
358 case vmIntrinsics::_putBooleanVolatile: return inline_unsafe_access( is_store, T_BOOLEAN, Volatile, false);
359 case vmIntrinsics::_putByteVolatile: return inline_unsafe_access( is_store, T_BYTE, Volatile, false);
360 case vmIntrinsics::_putShortVolatile: return inline_unsafe_access( is_store, T_SHORT, Volatile, false);
361 case vmIntrinsics::_putCharVolatile: return inline_unsafe_access( is_store, T_CHAR, Volatile, false);
362 case vmIntrinsics::_putIntVolatile: return inline_unsafe_access( is_store, T_INT, Volatile, false);
363 case vmIntrinsics::_putLongVolatile: return inline_unsafe_access( is_store, T_LONG, Volatile, false);
364 case vmIntrinsics::_putFloatVolatile: return inline_unsafe_access( is_store, T_FLOAT, Volatile, false);
365 case vmIntrinsics::_putDoubleVolatile: return inline_unsafe_access( is_store, T_DOUBLE, Volatile, false);
490 case vmIntrinsics::_notifyJvmtiVThreadMount: return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_mount()),
491 "notifyJvmtiMount", false, false);
492 case vmIntrinsics::_notifyJvmtiVThreadUnmount: return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_unmount()),
493 "notifyJvmtiUnmount", false, false);
494 case vmIntrinsics::_notifyJvmtiVThreadHideFrames: return inline_native_notify_jvmti_hide();
495 case vmIntrinsics::_notifyJvmtiVThreadDisableSuspend: return inline_native_notify_jvmti_sync();
496 #endif
497
498 #ifdef JFR_HAVE_INTRINSICS
499 case vmIntrinsics::_counterTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, JfrTime::time_function()), "counterTime");
500 case vmIntrinsics::_getEventWriter: return inline_native_getEventWriter();
501 case vmIntrinsics::_jvm_commit: return inline_native_jvm_commit();
502 #endif
503 case vmIntrinsics::_currentTimeMillis: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
504 case vmIntrinsics::_nanoTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
505 case vmIntrinsics::_writeback0: return inline_unsafe_writeback0();
506 case vmIntrinsics::_writebackPreSync0: return inline_unsafe_writebackSync0(true);
507 case vmIntrinsics::_writebackPostSync0: return inline_unsafe_writebackSync0(false);
508 case vmIntrinsics::_allocateInstance: return inline_unsafe_allocate();
509 case vmIntrinsics::_copyMemory: return inline_unsafe_copyMemory();
510 case vmIntrinsics::_getLength: return inline_native_getLength();
511 case vmIntrinsics::_copyOf: return inline_array_copyOf(false);
512 case vmIntrinsics::_copyOfRange: return inline_array_copyOf(true);
513 case vmIntrinsics::_equalsB: return inline_array_equals(StrIntrinsicNode::LL);
514 case vmIntrinsics::_equalsC: return inline_array_equals(StrIntrinsicNode::UU);
515 case vmIntrinsics::_Preconditions_checkIndex: return inline_preconditions_checkIndex(T_INT);
516 case vmIntrinsics::_Preconditions_checkLongIndex: return inline_preconditions_checkIndex(T_LONG);
517 case vmIntrinsics::_clone: return inline_native_clone(intrinsic()->is_virtual());
518
519 case vmIntrinsics::_allocateUninitializedArray: return inline_unsafe_newArray(true);
520 case vmIntrinsics::_newArray: return inline_unsafe_newArray(false);
521
522 case vmIntrinsics::_isAssignableFrom: return inline_native_subtype_check();
523
524 case vmIntrinsics::_isInstance:
525 case vmIntrinsics::_getModifiers:
526 case vmIntrinsics::_isInterface:
527 case vmIntrinsics::_isArray:
528 case vmIntrinsics::_isPrimitive:
529 case vmIntrinsics::_isHidden:
530 case vmIntrinsics::_getSuperclass:
531 case vmIntrinsics::_getClassAccessFlags: return inline_native_Class_query(intrinsic_id());
532
533 case vmIntrinsics::_floatToRawIntBits:
534 case vmIntrinsics::_floatToIntBits:
535 case vmIntrinsics::_intBitsToFloat:
536 case vmIntrinsics::_doubleToRawLongBits:
537 case vmIntrinsics::_doubleToLongBits:
538 case vmIntrinsics::_longBitsToDouble:
539 case vmIntrinsics::_floatToFloat16:
540 case vmIntrinsics::_float16ToFloat: return inline_fp_conversions(intrinsic_id());
2192 case vmIntrinsics::_remainderUnsigned_l: {
2193 zero_check_long(argument(2));
2194 // Compile-time detect of null-exception
2195 if (stopped()) {
2196 return true; // keep the graph constructed so far
2197 }
2198 n = new UModLNode(control(), argument(0), argument(2));
2199 break;
2200 }
2201 default: fatal_unexpected_iid(id); break;
2202 }
2203 set_result(_gvn.transform(n));
2204 return true;
2205 }
2206
2207 //----------------------------inline_unsafe_access----------------------------
2208
2209 const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type) {
2210 // Attempt to infer a sharper value type from the offset and base type.
2211 ciKlass* sharpened_klass = nullptr;
2212
2213 // See if it is an instance field, with an object type.
2214 if (alias_type->field() != nullptr) {
2215 if (alias_type->field()->type()->is_klass()) {
2216 sharpened_klass = alias_type->field()->type()->as_klass();
2217 }
2218 }
2219
2220 const TypeOopPtr* result = nullptr;
2221 // See if it is a narrow oop array.
2222 if (adr_type->isa_aryptr()) {
2223 if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
2224 const TypeOopPtr* elem_type = adr_type->is_aryptr()->elem()->make_oopptr();
2225 if (elem_type != nullptr && elem_type->is_loaded()) {
2226 // Sharpen the value type.
2227 result = elem_type;
2228 }
2229 }
2230 }
2231
2232 // The sharpened class might be unloaded if there is no class loader
2233 // contraint in place.
2234 if (result == nullptr && sharpened_klass != nullptr && sharpened_klass->is_loaded()) {
2235 // Sharpen the value type.
2236 result = TypeOopPtr::make_from_klass(sharpened_klass);
2237 }
2238 if (result != nullptr) {
2239 #ifndef PRODUCT
2240 if (C->print_intrinsics() || C->print_inlining()) {
2241 tty->print(" from base type: "); adr_type->dump(); tty->cr();
2242 tty->print(" sharpened value: "); result->dump(); tty->cr();
2243 }
2244 #endif
2245 }
2246 return result;
2247 }
2248
2249 DecoratorSet LibraryCallKit::mo_decorator_for_access_kind(AccessKind kind) {
2250 switch (kind) {
2251 case Relaxed:
2252 return MO_UNORDERED;
2253 case Opaque:
2254 return MO_RELAXED;
2255 case Acquire:
2256 return MO_ACQUIRE;
2257 case Release:
2258 return MO_RELEASE;
2259 case Volatile:
2260 return MO_SEQ_CST;
2261 default:
2262 ShouldNotReachHere();
2263 return 0;
2264 }
2265 }
2266
2267 bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, const AccessKind kind, const bool unaligned) {
2268 if (callee()->is_static()) return false; // caller must have the capability!
2269 DecoratorSet decorators = C2_UNSAFE_ACCESS;
2270 guarantee(!is_store || kind != Acquire, "Acquire accesses can be produced only for loads");
2271 guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
2272 assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
2273
2274 if (is_reference_type(type)) {
2275 decorators |= ON_UNKNOWN_OOP_REF;
2276 }
2277
2278 if (unaligned) {
2279 decorators |= C2_UNALIGNED;
2280 }
2281
2282 #ifndef PRODUCT
2283 {
2284 ResourceMark rm;
2285 // Check the signatures.
2286 ciSignature* sig = callee()->signature();
2287 #ifdef ASSERT
2288 if (!is_store) {
2289 // Object getReference(Object base, int/long offset), etc.
2290 BasicType rtype = sig->return_type()->basic_type();
2291 assert(rtype == type, "getter must return the expected value");
2292 assert(sig->count() == 2, "oop getter has 2 arguments");
2293 assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2294 assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2295 } else {
2296 // void putReference(Object base, int/long offset, Object x), etc.
2297 assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
2298 assert(sig->count() == 3, "oop putter has 3 arguments");
2299 assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
2300 assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
2301 BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
2302 assert(vtype == type, "putter must accept the expected value");
2303 }
2304 #endif // ASSERT
2305 }
2306 #endif //PRODUCT
2307
2308 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
2309
2310 Node* receiver = argument(0); // type: oop
2311
2312 // Build address expression.
2313 Node* heap_base_oop = top();
2314
2315 // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2316 Node* base = argument(1); // type: oop
2317 // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2318 Node* offset = argument(2); // type: long
2319 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2320 // to be plain byte offsets, which are also the same as those accepted
2321 // by oopDesc::field_addr.
2322 assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2323 "fieldOffset must be byte-scaled");
2324 // 32-bit machines ignore the high half!
2325 offset = ConvL2X(offset);
2326
2327 // Save state and restore on bailout
2328 uint old_sp = sp();
2329 SafePointNode* old_map = clone_map();
2330
2331 Node* adr = make_unsafe_address(base, offset, type, kind == Relaxed);
2332
2333 if (_gvn.type(base)->isa_ptr() == TypePtr::NULL_PTR) {
2334 if (type != T_OBJECT) {
2335 decorators |= IN_NATIVE; // off-heap primitive access
2336 } else {
2337 set_map(old_map);
2338 set_sp(old_sp);
2339 return false; // off-heap oop accesses are not supported
2340 }
2341 } else {
2342 heap_base_oop = base; // on-heap or mixed access
2343 }
2344
2345 // Can base be null? Otherwise, always on-heap access.
2346 bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(base));
2347
2348 if (!can_access_non_heap) {
2349 decorators |= IN_HEAP;
2350 }
2351
2352 Node* val = is_store ? argument(4) : nullptr;
2353
2354 const TypePtr* adr_type = _gvn.type(adr)->isa_ptr();
2355 if (adr_type == TypePtr::NULL_PTR) {
2356 set_map(old_map);
2357 set_sp(old_sp);
2358 return false; // off-heap access with zero address
2359 }
2360
2361 // Try to categorize the address.
2362 Compile::AliasType* alias_type = C->alias_type(adr_type);
2363 assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2364
2365 if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2366 alias_type->adr_type() == TypeAryPtr::RANGE) {
2367 set_map(old_map);
2368 set_sp(old_sp);
2369 return false; // not supported
2370 }
2371
2372 bool mismatched = false;
2373 BasicType bt = alias_type->basic_type();
2374 if (bt != T_ILLEGAL) {
2375 assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
2376 if (bt == T_BYTE && adr_type->isa_aryptr()) {
2377 // Alias type doesn't differentiate between byte[] and boolean[]).
2378 // Use address type to get the element type.
2379 bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2380 }
2381 if (is_reference_type(bt, true)) {
2382 // accessing an array field with getReference is not a mismatch
2383 bt = T_OBJECT;
2384 }
2385 if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2386 // Don't intrinsify mismatched object accesses
2387 set_map(old_map);
2388 set_sp(old_sp);
2389 return false;
2390 }
2391 mismatched = (bt != type);
2392 } else if (alias_type->adr_type()->isa_oopptr()) {
2393 mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
2394 }
2395
2396 destruct_map_clone(old_map);
2397 assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2398
2399 if (mismatched) {
2400 decorators |= C2_MISMATCHED;
2401 }
2402
2403 // First guess at the value type.
2404 const Type *value_type = Type::get_const_basic_type(type);
2405
2406 // Figure out the memory ordering.
2407 decorators |= mo_decorator_for_access_kind(kind);
2408
2409 if (!is_store && type == T_OBJECT) {
2410 const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2411 if (tjp != nullptr) {
2412 value_type = tjp;
2413 }
2414 }
2415
2416 receiver = null_check(receiver);
2417 if (stopped()) {
2418 return true;
2419 }
2420 // Heap pointers get a null-check from the interpreter,
2421 // as a courtesy. However, this is not guaranteed by Unsafe,
2422 // and it is not possible to fully distinguish unintended nulls
2423 // from intended ones in this API.
2424
2425 if (!is_store) {
2426 Node* p = nullptr;
2427 // Try to constant fold a load from a constant field
2428 ciField* field = alias_type->field();
2429 if (heap_base_oop != top() && field != nullptr && field->is_constant() && !mismatched) {
2430 // final or stable field
2431 p = make_constant_from_field(field, heap_base_oop);
2432 }
2433
2434 if (p == nullptr) { // Could not constant fold the load
2435 p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);
2436 // Normalize the value returned by getBoolean in the following cases
2437 if (type == T_BOOLEAN &&
2438 (mismatched ||
2439 heap_base_oop == top() || // - heap_base_oop is null or
2440 (can_access_non_heap && field == nullptr)) // - heap_base_oop is potentially null
2441 // and the unsafe access is made to large offset
2442 // (i.e., larger than the maximum offset necessary for any
2443 // field access)
2444 ) {
2445 IdealKit ideal = IdealKit(this);
2446 #define __ ideal.
2447 IdealVariable normalized_result(ideal);
2448 __ declarations_done();
2449 __ set(normalized_result, p);
2450 __ if_then(p, BoolTest::ne, ideal.ConI(0));
2451 __ set(normalized_result, ideal.ConI(1));
2452 ideal.end_if();
2453 final_sync(ideal);
2454 p = __ value(normalized_result);
2455 #undef __
2456 }
2457 }
2458 if (type == T_ADDRESS) {
2459 p = gvn().transform(new CastP2XNode(nullptr, p));
2460 p = ConvX2UL(p);
2461 }
2462 // The load node has the control of the preceding MemBarCPUOrder. All
2463 // following nodes will have the control of the MemBarCPUOrder inserted at
2464 // the end of this method. So, pushing the load onto the stack at a later
2465 // point is fine.
2466 set_result(p);
2467 } else {
2468 if (bt == T_ADDRESS) {
2469 // Repackage the long as a pointer.
2470 val = ConvL2X(val);
2471 val = gvn().transform(new CastX2PNode(val));
2472 }
2473 access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators);
2474 }
2475
2476 return true;
2477 }
2478
2479 //----------------------------inline_unsafe_load_store----------------------------
2480 // This method serves a couple of different customers (depending on LoadStoreKind):
2481 //
2482 // LS_cmp_swap:
2483 //
2484 // boolean compareAndSetReference(Object o, long offset, Object expected, Object x);
2485 // boolean compareAndSetInt( Object o, long offset, int expected, int x);
2486 // boolean compareAndSetLong( Object o, long offset, long expected, long x);
2487 //
2488 // LS_cmp_swap_weak:
2489 //
2490 // boolean weakCompareAndSetReference( Object o, long offset, Object expected, Object x);
2491 // boolean weakCompareAndSetReferencePlain( Object o, long offset, Object expected, Object x);
2492 // boolean weakCompareAndSetReferenceAcquire(Object o, long offset, Object expected, Object x);
2493 // boolean weakCompareAndSetReferenceRelease(Object o, long offset, Object expected, Object x);
2494 //
2495 // boolean weakCompareAndSetInt( Object o, long offset, int expected, int x);
2496 // boolean weakCompareAndSetIntPlain( Object o, long offset, int expected, int x);
2497 // boolean weakCompareAndSetIntAcquire( Object o, long offset, int expected, int x);
2498 // boolean weakCompareAndSetIntRelease( Object o, long offset, int expected, int x);
2664 }
2665 case LS_cmp_swap:
2666 case LS_cmp_swap_weak:
2667 case LS_get_add:
2668 break;
2669 default:
2670 ShouldNotReachHere();
2671 }
2672
2673 // Null check receiver.
2674 receiver = null_check(receiver);
2675 if (stopped()) {
2676 return true;
2677 }
2678
2679 int alias_idx = C->get_alias_index(adr_type);
2680
2681 if (is_reference_type(type)) {
2682 decorators |= IN_HEAP | ON_UNKNOWN_OOP_REF;
2683
2684 // Transformation of a value which could be null pointer (CastPP #null)
2685 // could be delayed during Parse (for example, in adjust_map_after_if()).
2686 // Execute transformation here to avoid barrier generation in such case.
2687 if (_gvn.type(newval) == TypePtr::NULL_PTR)
2688 newval = _gvn.makecon(TypePtr::NULL_PTR);
2689
2690 if (oldval != nullptr && _gvn.type(oldval) == TypePtr::NULL_PTR) {
2691 // Refine the value to a null constant, when it is known to be null
2692 oldval = _gvn.makecon(TypePtr::NULL_PTR);
2693 }
2694 }
2695
2696 Node* result = nullptr;
2697 switch (kind) {
2698 case LS_cmp_exchange: {
2699 result = access_atomic_cmpxchg_val_at(base, adr, adr_type, alias_idx,
2700 oldval, newval, value_type, type, decorators);
2701 break;
2702 }
2703 case LS_cmp_swap_weak:
2850 Deoptimization::Action_make_not_entrant);
2851 }
2852 if (stopped()) {
2853 return true;
2854 }
2855 #endif //INCLUDE_JVMTI
2856
2857 Node* test = nullptr;
2858 if (LibraryCallKit::klass_needs_init_guard(kls)) {
2859 // Note: The argument might still be an illegal value like
2860 // Serializable.class or Object[].class. The runtime will handle it.
2861 // But we must make an explicit check for initialization.
2862 Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset()));
2863 // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
2864 // can generate code to load it as unsigned byte.
2865 Node* inst = make_load(nullptr, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered);
2866 Node* bits = intcon(InstanceKlass::fully_initialized);
2867 test = _gvn.transform(new SubINode(inst, bits));
2868 // The 'test' is non-zero if we need to take a slow path.
2869 }
2870
2871 Node* obj = new_instance(kls, test);
2872 set_result(obj);
2873 return true;
2874 }
2875
2876 //------------------------inline_native_time_funcs--------------
2877 // inline code for System.currentTimeMillis() and System.nanoTime()
2878 // these have the same type and signature
2879 bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
2880 const TypeFunc* tf = OptoRuntime::void_long_Type();
2881 const TypePtr* no_memory_effects = nullptr;
2882 Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
2883 Node* value = _gvn.transform(new ProjNode(time, TypeFunc::Parms+0));
2884 #ifdef ASSERT
2885 Node* value_top = _gvn.transform(new ProjNode(time, TypeFunc::Parms+1));
2886 assert(value_top == top(), "second value must be top");
2887 #endif
2888 set_result(value);
2889 return true;
2890 }
2891
3627
3628 //------------------------inline_native_setVthread------------------
3629 bool LibraryCallKit::inline_native_setCurrentThread() {
3630 assert(C->method()->changes_current_thread(),
3631 "method changes current Thread but is not annotated ChangesCurrentThread");
3632 Node* arr = argument(1);
3633 Node* thread = _gvn.transform(new ThreadLocalNode());
3634 Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::vthread_offset()));
3635 Node* thread_obj_handle
3636 = make_load(nullptr, p, p->bottom_type()->is_ptr(), T_OBJECT, MemNode::unordered);
3637 thread_obj_handle = _gvn.transform(thread_obj_handle);
3638 const TypePtr *adr_type = _gvn.type(thread_obj_handle)->isa_ptr();
3639 access_store_at(nullptr, thread_obj_handle, adr_type, arr, _gvn.type(arr), T_OBJECT, IN_NATIVE | MO_UNORDERED);
3640 JFR_ONLY(extend_setCurrentThread(thread, arr);)
3641 return true;
3642 }
3643
3644 const Type* LibraryCallKit::scopedValueCache_type() {
3645 ciKlass* objects_klass = ciObjArrayKlass::make(env()->Object_klass());
3646 const TypeOopPtr* etype = TypeOopPtr::make_from_klass(env()->Object_klass());
3647 const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS);
3648
3649 // Because we create the scopedValue cache lazily we have to make the
3650 // type of the result BotPTR.
3651 bool xk = etype->klass_is_exact();
3652 const Type* objects_type = TypeAryPtr::make(TypePtr::BotPTR, arr0, objects_klass, xk, 0);
3653 return objects_type;
3654 }
3655
3656 Node* LibraryCallKit::scopedValueCache_helper() {
3657 Node* thread = _gvn.transform(new ThreadLocalNode());
3658 Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::scopedValueCache_offset()));
3659 // We cannot use immutable_memory() because we might flip onto a
3660 // different carrier thread, at which point we'll need to use that
3661 // carrier thread's cache.
3662 // return _gvn.transform(LoadNode::make(_gvn, nullptr, immutable_memory(), p, p->bottom_type()->is_ptr(),
3663 // TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered));
3664 return make_load(nullptr, p, p->bottom_type()->is_ptr(), T_ADDRESS, MemNode::unordered);
3665 }
3666
3667 //------------------------inline_native_scopedValueCache------------------
3668 bool LibraryCallKit::inline_native_scopedValueCache() {
3669 Node* cache_obj_handle = scopedValueCache_helper();
3670 const Type* objects_type = scopedValueCache_type();
3671 set_result(access_load(cache_obj_handle, objects_type, T_OBJECT, IN_NATIVE));
3672
3673 return true;
3674 }
3675
3676 //------------------------inline_native_setScopedValueCache------------------
3677 bool LibraryCallKit::inline_native_setScopedValueCache() {
3678 Node* arr = argument(0);
3679 Node* cache_obj_handle = scopedValueCache_helper();
3680 const Type* objects_type = scopedValueCache_type();
3681
3682 const TypePtr *adr_type = _gvn.type(cache_obj_handle)->isa_ptr();
3683 access_store_at(nullptr, cache_obj_handle, adr_type, arr, objects_type, T_OBJECT, IN_NATIVE | MO_UNORDERED);
3684
3685 return true;
3686 }
3687
3688 //---------------------------load_mirror_from_klass----------------------------
3689 // Given a klass oop, load its java mirror (a java.lang.Class oop).
3690 Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
3691 Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
3692 Node* load = make_load(nullptr, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
3693 // mirror = ((OopHandle)mirror)->resolve();
3694 return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE);
3695 }
3696
3697 //-----------------------load_klass_from_mirror_common-------------------------
3698 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
3699 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
3700 // and branch to the given path on the region.
3701 // If never_see_null, take an uncommon trap on null, so we can optimistically
3702 // compile for the non-null case.
3703 // If the region is null, force never_see_null = true.
3704 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
3705 bool never_see_null,
3706 RegionNode* region,
3707 int null_path,
3708 int offset) {
3709 if (region == nullptr) never_see_null = true;
3710 Node* p = basic_plus_adr(mirror, offset);
3711 const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
3712 Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
3713 Node* null_ctl = top();
3714 kls = null_check_oop(kls, &null_ctl, never_see_null);
3715 if (region != nullptr) {
3716 // Set region->in(null_path) if the mirror is a primitive (e.g, int.class).
3719 assert(null_ctl == top(), "no loose ends");
3720 }
3721 return kls;
3722 }
3723
3724 //--------------------(inline_native_Class_query helpers)---------------------
3725 // Use this for JVM_ACC_INTERFACE, JVM_ACC_IS_CLONEABLE_FAST, JVM_ACC_HAS_FINALIZER.
3726 // Fall through if (mods & mask) == bits, take the guard otherwise.
3727 Node* LibraryCallKit::generate_access_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
3728 // Branch around if the given klass has the given modifier bit set.
3729 // Like generate_guard, adds a new path onto the region.
3730 Node* modp = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3731 Node* mods = make_load(nullptr, modp, TypeInt::INT, T_INT, MemNode::unordered);
3732 Node* mask = intcon(modifier_mask);
3733 Node* bits = intcon(modifier_bits);
3734 Node* mbit = _gvn.transform(new AndINode(mods, mask));
3735 Node* cmp = _gvn.transform(new CmpINode(mbit, bits));
3736 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
3737 return generate_fair_guard(bol, region);
3738 }
3739 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
3740 return generate_access_flags_guard(kls, JVM_ACC_INTERFACE, 0, region);
3741 }
3742 Node* LibraryCallKit::generate_hidden_class_guard(Node* kls, RegionNode* region) {
3743 return generate_access_flags_guard(kls, JVM_ACC_IS_HIDDEN_CLASS, 0, region);
3744 }
3745
3746 //-------------------------inline_native_Class_query-------------------
3747 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
3748 const Type* return_type = TypeInt::BOOL;
3749 Node* prim_return_value = top(); // what happens if it's a primitive class?
3750 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3751 bool expect_prim = false; // most of these guys expect to work on refs
3752
3753 enum { _normal_path = 1, _prim_path = 2, PATH_LIMIT };
3754
3755 Node* mirror = argument(0);
3756 Node* obj = top();
3757
3758 switch (id) {
3912
3913 case vmIntrinsics::_getClassAccessFlags:
3914 p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3915 query_value = make_load(nullptr, p, TypeInt::INT, T_INT, MemNode::unordered);
3916 break;
3917
3918 default:
3919 fatal_unexpected_iid(id);
3920 break;
3921 }
3922
3923 // Fall-through is the normal case of a query to a real class.
3924 phi->init_req(1, query_value);
3925 region->init_req(1, control());
3926
3927 C->set_has_split_ifs(true); // Has chance for split-if optimization
3928 set_result(region, phi);
3929 return true;
3930 }
3931
3932 //-------------------------inline_Class_cast-------------------
3933 bool LibraryCallKit::inline_Class_cast() {
3934 Node* mirror = argument(0); // Class
3935 Node* obj = argument(1);
3936 const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
3937 if (mirror_con == nullptr) {
3938 return false; // dead path (mirror->is_top()).
3939 }
3940 if (obj == nullptr || obj->is_top()) {
3941 return false; // dead path
3942 }
3943 const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr();
3944
3945 // First, see if Class.cast() can be folded statically.
3946 // java_mirror_type() returns non-null for compile-time Class constants.
3947 ciType* tm = mirror_con->java_mirror_type();
3948 if (tm != nullptr && tm->is_klass() &&
3949 tp != nullptr) {
3950 if (!tp->is_loaded()) {
3951 // Don't use intrinsic when class is not loaded.
3952 return false;
3953 } else {
3954 int static_res = C->static_subtype_check(TypeKlassPtr::make(tm->as_klass(), Type::trust_interfaces), tp->as_klass_type());
3955 if (static_res == Compile::SSC_always_true) {
3956 // isInstance() is true - fold the code.
3957 set_result(obj);
3958 return true;
3959 } else if (static_res == Compile::SSC_always_false) {
3960 // Don't use intrinsic, have to throw ClassCastException.
3961 // If the reference is null, the non-intrinsic bytecode will
3962 // be optimized appropriately.
3963 return false;
3964 }
3965 }
3966 }
3967
3968 // Bailout intrinsic and do normal inlining if exception path is frequent.
3969 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
3970 return false;
3971 }
3972
3973 // Generate dynamic checks.
3974 // Class.cast() is java implementation of _checkcast bytecode.
3975 // Do checkcast (Parse::do_checkcast()) optimizations here.
3976
3977 mirror = null_check(mirror);
3978 // If mirror is dead, only null-path is taken.
3979 if (stopped()) {
3980 return true;
3981 }
3982
3983 // Not-subtype or the mirror's klass ptr is null (in case it is a primitive).
3984 enum { _bad_type_path = 1, _prim_path = 2, PATH_LIMIT };
3985 RegionNode* region = new RegionNode(PATH_LIMIT);
3986 record_for_igvn(region);
3987
3988 // Now load the mirror's klass metaobject, and null-check it.
3989 // If kls is null, we have a primitive mirror and
3990 // nothing is an instance of a primitive type.
3991 Node* kls = load_klass_from_mirror(mirror, false, region, _prim_path);
3992
3993 Node* res = top();
3994 if (!stopped()) {
3995 Node* bad_type_ctrl = top();
3996 // Do checkcast optimizations.
3997 res = gen_checkcast(obj, kls, &bad_type_ctrl);
3998 region->init_req(_bad_type_path, bad_type_ctrl);
3999 }
4000 if (region->in(_prim_path) != top() ||
4001 region->in(_bad_type_path) != top()) {
4002 // Let Interpreter throw ClassCastException.
4003 PreserveJVMState pjvms(this);
4004 set_control(_gvn.transform(region));
4005 uncommon_trap(Deoptimization::Reason_intrinsic,
4006 Deoptimization::Action_maybe_recompile);
4007 }
4008 if (!stopped()) {
4009 set_result(res);
4010 }
4011 return true;
4012 }
4013
4014
4015 //--------------------------inline_native_subtype_check------------------------
4016 // This intrinsic takes the JNI calls out of the heart of
4017 // UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
4018 bool LibraryCallKit::inline_native_subtype_check() {
4019 // Pull both arguments off the stack.
4020 Node* args[2]; // two java.lang.Class mirrors: superc, subc
4021 args[0] = argument(0);
4022 args[1] = argument(1);
4023 Node* klasses[2]; // corresponding Klasses: superk, subk
4024 klasses[0] = klasses[1] = top();
4025
4026 enum {
4027 // A full decision tree on {superc is prim, subc is prim}:
4028 _prim_0_path = 1, // {P,N} => false
4029 // {P,P} & superc!=subc => false
4030 _prim_same_path, // {P,P} & superc==subc => true
4031 _prim_1_path, // {N,P} => false
4032 _ref_subtype_path, // {N,N} & subtype check wins => true
4033 _both_ref_path, // {N,N} & subtype check loses => false
4034 PATH_LIMIT
4035 };
4036
4037 RegionNode* region = new RegionNode(PATH_LIMIT);
4038 Node* phi = new PhiNode(region, TypeInt::BOOL);
4039 record_for_igvn(region);
4040
4041 const TypePtr* adr_type = TypeRawPtr::BOTTOM; // memory type of loads
4042 const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
4043 int class_klass_offset = java_lang_Class::klass_offset();
4044
4045 // First null-check both mirrors and load each mirror's klass metaobject.
4046 int which_arg;
4047 for (which_arg = 0; which_arg <= 1; which_arg++) {
4048 Node* arg = args[which_arg];
4049 arg = null_check(arg);
4050 if (stopped()) break;
4051 args[which_arg] = arg;
4052
4053 Node* p = basic_plus_adr(arg, class_klass_offset);
4054 Node* kls = LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, adr_type, kls_type);
4055 klasses[which_arg] = _gvn.transform(kls);
4056 }
4057
4058 // Having loaded both klasses, test each for null.
4059 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
4060 for (which_arg = 0; which_arg <= 1; which_arg++) {
4061 Node* kls = klasses[which_arg];
4062 Node* null_ctl = top();
4063 kls = null_check_oop(kls, &null_ctl, never_see_null);
4064 int prim_path = (which_arg == 0 ? _prim_0_path : _prim_1_path);
4065 region->init_req(prim_path, null_ctl);
4066 if (stopped()) break;
4067 klasses[which_arg] = kls;
4068 }
4069
4070 if (!stopped()) {
4071 // now we have two reference types, in klasses[0..1]
4072 Node* subk = klasses[1]; // the argument to isAssignableFrom
4073 Node* superk = klasses[0]; // the receiver
4074 region->set_req(_both_ref_path, gen_subtype_check(subk, superk));
4075 // now we have a successful reference subtype check
4076 region->set_req(_ref_subtype_path, control());
4077 }
4078
4079 // If both operands are primitive (both klasses null), then
4080 // we must return true when they are identical primitives.
4081 // It is convenient to test this after the first null klass check.
4082 set_control(region->in(_prim_0_path)); // go back to first null check
4083 if (!stopped()) {
4084 // Since superc is primitive, make a guard for the superc==subc case.
4085 Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1]));
4086 Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq));
4087 generate_guard(bol_eq, region, PROB_FAIR);
4088 if (region->req() == PATH_LIMIT+1) {
4089 // A guard was added. If the added guard is taken, superc==subc.
4090 region->swap_edges(PATH_LIMIT, _prim_same_path);
4091 region->del_req(PATH_LIMIT);
4092 }
4093 region->set_req(_prim_0_path, control()); // Not equal after all.
4094 }
4095
4096 // these are the only paths that produce 'true':
4097 phi->set_req(_prim_same_path, intcon(1));
4098 phi->set_req(_ref_subtype_path, intcon(1));
4099
4100 // pull together the cases:
4101 assert(region->req() == PATH_LIMIT, "sane region");
4102 for (uint i = 1; i < region->req(); i++) {
4103 Node* ctl = region->in(i);
4104 if (ctl == nullptr || ctl == top()) {
4105 region->set_req(i, top());
4106 phi ->set_req(i, top());
4107 } else if (phi->in(i) == nullptr) {
4108 phi->set_req(i, intcon(0)); // all other paths produce 'false'
4109 }
4110 }
4111
4112 set_control(_gvn.transform(region));
4113 set_result(_gvn.transform(phi));
4114 return true;
4115 }
4116
4117 //---------------------generate_array_guard_common------------------------
4118 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region,
4119 bool obj_array, bool not_array) {
4120
4121 if (stopped()) {
4122 return nullptr;
4123 }
4124
4125 // If obj_array/non_array==false/false:
4126 // Branch around if the given klass is in fact an array (either obj or prim).
4127 // If obj_array/non_array==false/true:
4128 // Branch around if the given klass is not an array klass of any kind.
4129 // If obj_array/non_array==true/true:
4130 // Branch around if the kls is not an oop array (kls is int[], String, etc.)
4131 // If obj_array/non_array==true/false:
4132 // Branch around if the kls is an oop array (Object[] or subtype)
4133 //
4134 // Like generate_guard, adds a new path onto the region.
4135 jint layout_con = 0;
4136 Node* layout_val = get_layout_helper(kls, layout_con);
4137 if (layout_val == nullptr) {
4138 bool query = (obj_array
4139 ? Klass::layout_helper_is_objArray(layout_con)
4140 : Klass::layout_helper_is_array(layout_con));
4141 if (query == not_array) {
4142 return nullptr; // never a branch
4143 } else { // always a branch
4144 Node* always_branch = control();
4145 if (region != nullptr)
4146 region->add_req(always_branch);
4147 set_control(top());
4148 return always_branch;
4149 }
4150 }
4151 // Now test the correct condition.
4152 jint nval = (obj_array
4153 ? (jint)(Klass::_lh_array_tag_type_value
4154 << Klass::_lh_array_tag_shift)
4155 : Klass::_lh_neutral_value);
4156 Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval)));
4157 BoolTest::mask btest = BoolTest::lt; // correct for testing is_[obj]array
4158 // invert the test if we are looking for a non-array
4159 if (not_array) btest = BoolTest(btest).negate();
4160 Node* bol = _gvn.transform(new BoolNode(cmp, btest));
4161 return generate_fair_guard(bol, region);
4162 }
4163
4164
4165 //-----------------------inline_native_newArray--------------------------
4166 // private static native Object java.lang.reflect.newArray(Class<?> componentType, int length);
4167 // private native Object Unsafe.allocateUninitializedArray0(Class<?> cls, int size);
4168 bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) {
4169 Node* mirror;
4170 Node* count_val;
4171 if (uninitialized) {
4172 null_check_receiver();
4173 mirror = argument(1);
4174 count_val = argument(2);
4175 } else {
4176 mirror = argument(0);
4177 count_val = argument(1);
4178 }
4179
4180 mirror = null_check(mirror);
4181 // If mirror or obj is dead, only null-path is taken.
4182 if (stopped()) return true;
4183
4184 enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
4185 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4186 PhiNode* result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
4292 // the bytecode that invokes Arrays.copyOf if deoptimization happens.
4293 { PreserveReexecuteState preexecs(this);
4294 jvms()->set_should_reexecute(true);
4295
4296 array_type_mirror = null_check(array_type_mirror);
4297 original = null_check(original);
4298
4299 // Check if a null path was taken unconditionally.
4300 if (stopped()) return true;
4301
4302 Node* orig_length = load_array_length(original);
4303
4304 Node* klass_node = load_klass_from_mirror(array_type_mirror, false, nullptr, 0);
4305 klass_node = null_check(klass_node);
4306
4307 RegionNode* bailout = new RegionNode(1);
4308 record_for_igvn(bailout);
4309
4310 // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
4311 // Bail out if that is so.
4312 Node* not_objArray = generate_non_objArray_guard(klass_node, bailout);
4313 if (not_objArray != nullptr) {
4314 // Improve the klass node's type from the new optimistic assumption:
4315 ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
4316 const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/);
4317 Node* cast = new CastPPNode(control(), klass_node, akls);
4318 klass_node = _gvn.transform(cast);
4319 }
4320
4321 // Bail out if either start or end is negative.
4322 generate_negative_guard(start, bailout, &start);
4323 generate_negative_guard(end, bailout, &end);
4324
4325 Node* length = end;
4326 if (_gvn.type(start) != TypeInt::ZERO) {
4327 length = _gvn.transform(new SubINode(end, start));
4328 }
4329
4330 // Bail out if length is negative.
4331 // Without this the new_array would throw
4332 // NegativeArraySizeException but IllegalArgumentException is what
4333 // should be thrown
4334 generate_negative_guard(length, bailout, &length);
4335
4336 if (bailout->req() > 1) {
4337 PreserveJVMState pjvms(this);
4338 set_control(_gvn.transform(bailout));
4339 uncommon_trap(Deoptimization::Reason_intrinsic,
4340 Deoptimization::Action_maybe_recompile);
4341 }
4342
4343 if (!stopped()) {
4344 // How many elements will we copy from the original?
4345 // The answer is MinI(orig_length - start, length).
4346 Node* orig_tail = _gvn.transform(new SubINode(orig_length, start));
4347 Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
4348
4349 // Generate a direct call to the right arraycopy function(s).
4350 // We know the copy is disjoint but we might not know if the
4351 // oop stores need checking.
4352 // Extreme case: Arrays.copyOf((Integer[])x, 10, String[].class).
4353 // This will fail a store-check if x contains any non-nulls.
4354
4355 // ArrayCopyNode:Ideal may transform the ArrayCopyNode to
4358 // to the copyOf to be validated, including that the copy to the
4359 // new array won't trigger an ArrayStoreException. That subtype
4360 // check can be optimized if we know something on the type of
4361 // the input array from type speculation.
4362 if (_gvn.type(klass_node)->singleton()) {
4363 const TypeKlassPtr* subk = _gvn.type(load_object_klass(original))->is_klassptr();
4364 const TypeKlassPtr* superk = _gvn.type(klass_node)->is_klassptr();
4365
4366 int test = C->static_subtype_check(superk, subk);
4367 if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
4368 const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();
4369 if (t_original->speculative_type() != nullptr) {
4370 original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true);
4371 }
4372 }
4373 }
4374
4375 bool validated = false;
4376 // Reason_class_check rather than Reason_intrinsic because we
4377 // want to intrinsify even if this traps.
4378 if (!too_many_traps(Deoptimization::Reason_class_check)) {
4379 Node* not_subtype_ctrl = gen_subtype_check(original, klass_node);
4380
4381 if (not_subtype_ctrl != top()) {
4382 PreserveJVMState pjvms(this);
4383 set_control(not_subtype_ctrl);
4384 uncommon_trap(Deoptimization::Reason_class_check,
4385 Deoptimization::Action_make_not_entrant);
4386 assert(stopped(), "Should be stopped");
4387 }
4388 validated = true;
4389 }
4390
4391 if (!stopped()) {
4392 newcopy = new_array(klass_node, length, 0); // no arguments to push
4393
4394 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true, false,
4395 load_object_klass(original), klass_node);
4396 if (!is_copyOfRange) {
4397 ac->set_copyof(validated);
4398 } else {
4444
4445 //-----------------------generate_method_call----------------------------
4446 // Use generate_method_call to make a slow-call to the real
4447 // method if the fast path fails. An alternative would be to
4448 // use a stub like OptoRuntime::slow_arraycopy_Java.
4449 // This only works for expanding the current library call,
4450 // not another intrinsic. (E.g., don't use this for making an
4451 // arraycopy call inside of the copyOf intrinsic.)
4452 CallJavaNode*
4453 LibraryCallKit::generate_method_call(vmIntrinsicID method_id, bool is_virtual, bool is_static, bool res_not_null) {
4454 // When compiling the intrinsic method itself, do not use this technique.
4455 guarantee(callee() != C->method(), "cannot make slow-call to self");
4456
4457 ciMethod* method = callee();
4458 // ensure the JVMS we have will be correct for this call
4459 guarantee(method_id == method->intrinsic_id(), "must match");
4460
4461 const TypeFunc* tf = TypeFunc::make(method);
4462 if (res_not_null) {
4463 assert(tf->return_type() == T_OBJECT, "");
4464 const TypeTuple* range = tf->range();
4465 const Type** fields = TypeTuple::fields(range->cnt());
4466 fields[TypeFunc::Parms] = range->field_at(TypeFunc::Parms)->filter_speculative(TypePtr::NOTNULL);
4467 const TypeTuple* new_range = TypeTuple::make(range->cnt(), fields);
4468 tf = TypeFunc::make(tf->domain(), new_range);
4469 }
4470 CallJavaNode* slow_call;
4471 if (is_static) {
4472 assert(!is_virtual, "");
4473 slow_call = new CallStaticJavaNode(C, tf,
4474 SharedRuntime::get_resolve_static_call_stub(), method);
4475 } else if (is_virtual) {
4476 assert(!gvn().type(argument(0))->maybe_null(), "should not be null");
4477 int vtable_index = Method::invalid_vtable_index;
4478 if (UseInlineCaches) {
4479 // Suppress the vtable call
4480 } else {
4481 // hashCode and clone are not a miranda methods,
4482 // so the vtable index is fixed.
4483 // No need to use the linkResolver to get it.
4484 vtable_index = method->vtable_index();
4485 assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
4486 "bad index %d", vtable_index);
4487 }
4488 slow_call = new CallDynamicJavaNode(tf,
4505 set_edges_for_java_call(slow_call);
4506 return slow_call;
4507 }
4508
4509
4510 /**
4511 * Build special case code for calls to hashCode on an object. This call may
4512 * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
4513 * slightly different code.
4514 */
4515 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
4516 assert(is_static == callee()->is_static(), "correct intrinsic selection");
4517 assert(!(is_virtual && is_static), "either virtual, special, or static");
4518
4519 enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
4520
4521 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4522 PhiNode* result_val = new PhiNode(result_reg, TypeInt::INT);
4523 PhiNode* result_io = new PhiNode(result_reg, Type::ABIO);
4524 PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4525 Node* obj = nullptr;
4526 if (!is_static) {
4527 // Check for hashing null object
4528 obj = null_check_receiver();
4529 if (stopped()) return true; // unconditionally null
4530 result_reg->init_req(_null_path, top());
4531 result_val->init_req(_null_path, top());
4532 } else {
4533 // Do a null check, and return zero if null.
4534 // System.identityHashCode(null) == 0
4535 obj = argument(0);
4536 Node* null_ctl = top();
4537 obj = null_check_oop(obj, &null_ctl);
4538 result_reg->init_req(_null_path, null_ctl);
4539 result_val->init_req(_null_path, _gvn.intcon(0));
4540 }
4541
4542 // Unconditionally null? Then return right away.
4543 if (stopped()) {
4544 set_control( result_reg->in(_null_path));
4545 if (!stopped())
4546 set_result(result_val->in(_null_path));
4547 return true;
4548 }
4549
4550 // We only go to the fast case code if we pass a number of guards. The
4551 // paths which do not pass are accumulated in the slow_region.
4552 RegionNode* slow_region = new RegionNode(1);
4553 record_for_igvn(slow_region);
4554
4555 // If this is a virtual call, we generate a funny guard. We pull out
4556 // the vtable entry corresponding to hashCode() from the target object.
4557 // If the target method which we are calling happens to be the native
4558 // Object hashCode() method, we pass the guard. We do not need this
4559 // guard for non-virtual calls -- the caller is known to be the native
4560 // Object hashCode().
4561 if (is_virtual) {
4562 // After null check, get the object's klass.
4563 Node* obj_klass = load_object_klass(obj);
4564 generate_virtual_guard(obj_klass, slow_region);
4565 }
4566
4567 // Get the header out of the object, use LoadMarkNode when available
4568 Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
4569 // The control of the load must be null. Otherwise, the load can move before
4570 // the null check after castPP removal.
4571 Node* no_ctrl = nullptr;
4572 Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
4573
4574 // Test the header to see if it is safe to read w.r.t. locking.
4575 Node *lock_mask = _gvn.MakeConX(markWord::lock_mask_in_place);
4576 Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
4577 if (LockingMode == LM_LIGHTWEIGHT) {
4578 Node *monitor_val = _gvn.MakeConX(markWord::monitor_value);
4579 Node *chk_monitor = _gvn.transform(new CmpXNode(lmasked_header, monitor_val));
4580 Node *test_monitor = _gvn.transform(new BoolNode(chk_monitor, BoolTest::eq));
4581
4582 generate_slow_guard(test_monitor, slow_region);
4583 } else {
4584 Node *unlocked_val = _gvn.MakeConX(markWord::unlocked_value);
4585 Node *chk_unlocked = _gvn.transform(new CmpXNode(lmasked_header, unlocked_val));
4586 Node *test_not_unlocked = _gvn.transform(new BoolNode(chk_unlocked, BoolTest::ne));
4587
4588 generate_slow_guard(test_not_unlocked, slow_region);
4589 }
4590
4591 // Get the hash value and check to see that it has been properly assigned.
4592 // We depend on hash_mask being at most 32 bits and avoid the use of
4593 // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
4594 // vm: see markWord.hpp.
4595 Node *hash_mask = _gvn.intcon(markWord::hash_mask);
4629 // this->control() comes from set_results_for_java_call
4630 result_reg->init_req(_slow_path, control());
4631 result_val->init_req(_slow_path, slow_result);
4632 result_io ->set_req(_slow_path, i_o());
4633 result_mem ->set_req(_slow_path, reset_memory());
4634 }
4635
4636 // Return the combined state.
4637 set_i_o( _gvn.transform(result_io) );
4638 set_all_memory( _gvn.transform(result_mem));
4639
4640 set_result(result_reg, result_val);
4641 return true;
4642 }
4643
4644 //---------------------------inline_native_getClass----------------------------
4645 // public final native Class<?> java.lang.Object.getClass();
4646 //
4647 // Build special case code for calls to getClass on an object.
4648 bool LibraryCallKit::inline_native_getClass() {
4649 Node* obj = null_check_receiver();
4650 if (stopped()) return true;
4651 set_result(load_mirror_from_klass(load_object_klass(obj)));
4652 return true;
4653 }
4654
4655 //-----------------inline_native_Reflection_getCallerClass---------------------
4656 // public static native Class<?> sun.reflect.Reflection.getCallerClass();
4657 //
4658 // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
4659 //
4660 // NOTE: This code must perform the same logic as JVM_GetCallerClass
4661 // in that it must skip particular security frames and checks for
4662 // caller sensitive methods.
4663 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
4664 #ifndef PRODUCT
4665 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4666 tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
4667 }
4668 #endif
4669
4930 if (C->get_alias_index(src_type) == C->get_alias_index(dst_type)) {
4931 flags |= RC_NARROW_MEM; // narrow in memory
4932 }
4933 }
4934
4935 // Call it. Note that the length argument is not scaled.
4936 make_runtime_call(flags,
4937 OptoRuntime::fast_arraycopy_Type(),
4938 StubRoutines::unsafe_arraycopy(),
4939 "unsafe_arraycopy",
4940 dst_type,
4941 src_addr, dst_addr, size XTOP);
4942
4943 store_to_memory(control(), doing_unsafe_access_addr, intcon(0), doing_unsafe_access_bt, Compile::AliasIdxRaw, MemNode::unordered);
4944
4945 return true;
4946 }
4947
4948 #undef XTOP
4949
4950 //------------------------clone_coping-----------------------------------
4951 // Helper function for inline_native_clone.
4952 void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array) {
4953 assert(obj_size != nullptr, "");
4954 Node* raw_obj = alloc_obj->in(1);
4955 assert(alloc_obj->is_CheckCastPP() && raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
4956
4957 AllocateNode* alloc = nullptr;
4958 if (ReduceBulkZeroing) {
4959 // We will be completely responsible for initializing this object -
4960 // mark Initialize node as complete.
4961 alloc = AllocateNode::Ideal_allocation(alloc_obj);
4962 // The object was just allocated - there should be no any stores!
4963 guarantee(alloc != nullptr && alloc->maybe_set_complete(&_gvn), "");
4964 // Mark as complete_with_arraycopy so that on AllocateNode
4965 // expansion, we know this AllocateNode is initialized by an array
4966 // copy and a StoreStore barrier exists after the array copy.
4967 alloc->initialization()->set_complete_with_arraycopy();
4968 }
4969
4994 // not cloneable or finalizer => slow path to out-of-line Object.clone
4995 //
4996 // The general case has two steps, allocation and copying.
4997 // Allocation has two cases, and uses GraphKit::new_instance or new_array.
4998 //
4999 // Copying also has two cases, oop arrays and everything else.
5000 // Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
5001 // Everything else uses the tight inline loop supplied by CopyArrayNode.
5002 //
5003 // These steps fold up nicely if and when the cloned object's klass
5004 // can be sharply typed as an object array, a type array, or an instance.
5005 //
5006 bool LibraryCallKit::inline_native_clone(bool is_virtual) {
5007 PhiNode* result_val;
5008
5009 // Set the reexecute bit for the interpreter to reexecute
5010 // the bytecode that invokes Object.clone if deoptimization happens.
5011 { PreserveReexecuteState preexecs(this);
5012 jvms()->set_should_reexecute(true);
5013
5014 Node* obj = null_check_receiver();
5015 if (stopped()) return true;
5016
5017 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
5018
5019 // If we are going to clone an instance, we need its exact type to
5020 // know the number and types of fields to convert the clone to
5021 // loads/stores. Maybe a speculative type can help us.
5022 if (!obj_type->klass_is_exact() &&
5023 obj_type->speculative_type() != nullptr &&
5024 obj_type->speculative_type()->is_instance_klass()) {
5025 ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass();
5026 if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem &&
5027 !spec_ik->has_injected_fields()) {
5028 if (!obj_type->isa_instptr() ||
5029 obj_type->is_instptr()->instance_klass()->has_subklass()) {
5030 obj = maybe_cast_profiled_obj(obj, obj_type->speculative_type(), false);
5031 }
5032 }
5033 }
5034
5035 // Conservatively insert a memory barrier on all memory slices.
5036 // Do not let writes into the original float below the clone.
5037 insert_mem_bar(Op_MemBarCPUOrder);
5038
5039 // paths into result_reg:
5040 enum {
5041 _slow_path = 1, // out-of-line call to clone method (virtual or not)
5042 _objArray_path, // plain array allocation, plus arrayof_oop_arraycopy
5043 _array_path, // plain array allocation, plus arrayof_long_arraycopy
5044 _instance_path, // plain instance allocation, plus arrayof_long_arraycopy
5045 PATH_LIMIT
5046 };
5047 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
5048 result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
5049 PhiNode* result_i_o = new PhiNode(result_reg, Type::ABIO);
5050 PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
5051 record_for_igvn(result_reg);
5052
5053 Node* obj_klass = load_object_klass(obj);
5054 Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)nullptr);
5055 if (array_ctl != nullptr) {
5056 // It's an array.
5057 PreserveJVMState pjvms(this);
5058 set_control(array_ctl);
5059 Node* obj_length = load_array_length(obj);
5060 Node* array_size = nullptr; // Size of the array without object alignment padding.
5061 Node* alloc_obj = new_array(obj_klass, obj_length, 0, &array_size, /*deoptimize_on_exception=*/true);
5062
5063 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
5064 if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Parsing)) {
5065 // If it is an oop array, it requires very special treatment,
5066 // because gc barriers are required when accessing the array.
5067 Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)nullptr);
5068 if (is_obja != nullptr) {
5069 PreserveJVMState pjvms2(this);
5070 set_control(is_obja);
5071 // Generate a direct call to the right arraycopy function(s).
5072 // Clones are always tightly coupled.
5073 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, true, false);
5074 ac->set_clone_oop_array();
5075 Node* n = _gvn.transform(ac);
5076 assert(n == ac, "cannot disappear");
5077 ac->connect_outputs(this, /*deoptimize_on_exception=*/true);
5078
5079 result_reg->init_req(_objArray_path, control());
5080 result_val->init_req(_objArray_path, alloc_obj);
5081 result_i_o ->set_req(_objArray_path, i_o());
5082 result_mem ->set_req(_objArray_path, reset_memory());
5083 }
5084 }
5085 // Otherwise, there are no barriers to worry about.
5086 // (We can dispense with card marks if we know the allocation
5087 // comes out of eden (TLAB)... In fact, ReduceInitialCardMarks
5088 // causes the non-eden paths to take compensating steps to
5089 // simulate a fresh allocation, so that no further
5090 // card marks are required in compiled code to initialize
5091 // the object.)
5092
5093 if (!stopped()) {
5094 copy_to_clone(obj, alloc_obj, array_size, true);
5095
5096 // Present the results of the copy.
5097 result_reg->init_req(_array_path, control());
5098 result_val->init_req(_array_path, alloc_obj);
5099 result_i_o ->set_req(_array_path, i_o());
5100 result_mem ->set_req(_array_path, reset_memory());
5101 }
5102 }
5103
5104 // We only go to the instance fast case code if we pass a number of guards.
5105 // The paths which do not pass are accumulated in the slow_region.
5106 RegionNode* slow_region = new RegionNode(1);
5107 record_for_igvn(slow_region);
5108 if (!stopped()) {
5109 // It's an instance (we did array above). Make the slow-path tests.
5110 // If this is a virtual call, we generate a funny guard. We grab
5111 // the vtable entry corresponding to clone() from the target object.
5112 // If the target method which we are calling happens to be the
5113 // Object clone() method, we pass the guard. We do not need this
5114 // guard for non-virtual calls; the caller is known to be the native
5115 // Object clone().
5116 if (is_virtual) {
5117 generate_virtual_guard(obj_klass, slow_region);
5118 }
5119
5120 // The object must be easily cloneable and must not have a finalizer.
5121 // Both of these conditions may be checked in a single test.
5122 // We could optimize the test further, but we don't care.
5123 generate_access_flags_guard(obj_klass,
5124 // Test both conditions:
5125 JVM_ACC_IS_CLONEABLE_FAST | JVM_ACC_HAS_FINALIZER,
5126 // Must be cloneable but not finalizer:
5127 JVM_ACC_IS_CLONEABLE_FAST,
5219 set_jvms(sfpt->jvms());
5220 _reexecute_sp = jvms()->sp();
5221
5222 return saved_jvms;
5223 }
5224 }
5225 }
5226 return nullptr;
5227 }
5228
5229 // Clone the JVMState of the array allocation and create a new safepoint with it. Re-push the array length to the stack
5230 // such that uncommon traps can be emitted to re-execute the array allocation in the interpreter.
5231 SafePointNode* LibraryCallKit::create_safepoint_with_state_before_array_allocation(const AllocateArrayNode* alloc) const {
5232 JVMState* old_jvms = alloc->jvms()->clone_shallow(C);
5233 uint size = alloc->req();
5234 SafePointNode* sfpt = new SafePointNode(size, old_jvms);
5235 old_jvms->set_map(sfpt);
5236 for (uint i = 0; i < size; i++) {
5237 sfpt->init_req(i, alloc->in(i));
5238 }
5239 // re-push array length for deoptimization
5240 sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp(), alloc->in(AllocateNode::ALength));
5241 old_jvms->set_sp(old_jvms->sp()+1);
5242 old_jvms->set_monoff(old_jvms->monoff()+1);
5243 old_jvms->set_scloff(old_jvms->scloff()+1);
5244 old_jvms->set_endoff(old_jvms->endoff()+1);
5245 old_jvms->set_should_reexecute(true);
5246
5247 sfpt->set_i_o(map()->i_o());
5248 sfpt->set_memory(map()->memory());
5249 sfpt->set_control(map()->control());
5250 return sfpt;
5251 }
5252
5253 // In case of a deoptimization, we restart execution at the
5254 // allocation, allocating a new array. We would leave an uninitialized
5255 // array in the heap that GCs wouldn't expect. Move the allocation
5256 // after the traps so we don't allocate the array if we
5257 // deoptimize. This is possible because tightly_coupled_allocation()
5258 // guarantees there's no observer of the allocated array at this point
5259 // and the control flow is simple enough.
5260 void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms_before_guards,
5261 int saved_reexecute_sp, uint new_idx) {
5262 if (saved_jvms_before_guards != nullptr && !stopped()) {
5263 replace_unrelated_uncommon_traps_with_alloc_state(alloc, saved_jvms_before_guards);
5264
5265 assert(alloc != nullptr, "only with a tightly coupled allocation");
5266 // restore JVM state to the state at the arraycopy
5267 saved_jvms_before_guards->map()->set_control(map()->control());
5268 assert(saved_jvms_before_guards->map()->memory() == map()->memory(), "memory state changed?");
5269 assert(saved_jvms_before_guards->map()->i_o() == map()->i_o(), "IO state changed?");
5270 // If we've improved the types of some nodes (null check) while
5271 // emitting the guards, propagate them to the current state
5272 map()->replaced_nodes().apply(saved_jvms_before_guards->map(), new_idx);
5273 set_jvms(saved_jvms_before_guards);
5274 _reexecute_sp = saved_reexecute_sp;
5275
5276 // Remove the allocation from above the guards
5277 CallProjections callprojs;
5278 alloc->extract_projections(&callprojs, true);
5279 InitializeNode* init = alloc->initialization();
5280 Node* alloc_mem = alloc->in(TypeFunc::Memory);
5281 C->gvn_replace_by(callprojs.fallthrough_ioproj, alloc->in(TypeFunc::I_O));
5282 C->gvn_replace_by(init->proj_out(TypeFunc::Memory), alloc_mem);
5283
5284 // The CastIINode created in GraphKit::new_array (in AllocateArrayNode::make_ideal_length) must stay below
5285 // the allocation (i.e. is only valid if the allocation succeeds):
5286 // 1) replace CastIINode with AllocateArrayNode's length here
5287 // 2) Create CastIINode again once allocation has moved (see below) at the end of this method
5288 //
5289 // Multiple identical CastIINodes might exist here. Each GraphKit::load_array_length() call will generate
5290 // new separate CastIINode (arraycopy guard checks or any array length use between array allocation and ararycopy)
5291 Node* init_control = init->proj_out(TypeFunc::Control);
5292 Node* alloc_length = alloc->Ideal_length();
5293 #ifdef ASSERT
5294 Node* prev_cast = nullptr;
5295 #endif
5296 for (uint i = 0; i < init_control->outcnt(); i++) {
5297 Node* init_out = init_control->raw_out(i);
5298 if (init_out->is_CastII() && init_out->in(TypeFunc::Control) == init_control && init_out->in(1) == alloc_length) {
5299 #ifdef ASSERT
5300 if (prev_cast == nullptr) {
5301 prev_cast = init_out;
5303 if (prev_cast->cmp(*init_out) == false) {
5304 prev_cast->dump();
5305 init_out->dump();
5306 assert(false, "not equal CastIINode");
5307 }
5308 }
5309 #endif
5310 C->gvn_replace_by(init_out, alloc_length);
5311 }
5312 }
5313 C->gvn_replace_by(init->proj_out(TypeFunc::Control), alloc->in(0));
5314
5315 // move the allocation here (after the guards)
5316 _gvn.hash_delete(alloc);
5317 alloc->set_req(TypeFunc::Control, control());
5318 alloc->set_req(TypeFunc::I_O, i_o());
5319 Node *mem = reset_memory();
5320 set_all_memory(mem);
5321 alloc->set_req(TypeFunc::Memory, mem);
5322 set_control(init->proj_out_or_null(TypeFunc::Control));
5323 set_i_o(callprojs.fallthrough_ioproj);
5324
5325 // Update memory as done in GraphKit::set_output_for_allocation()
5326 const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
5327 const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
5328 if (ary_type->isa_aryptr() && length_type != nullptr) {
5329 ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
5330 }
5331 const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot);
5332 int elemidx = C->get_alias_index(telemref);
5333 set_memory(init->proj_out_or_null(TypeFunc::Memory), Compile::AliasIdxRaw);
5334 set_memory(init->proj_out_or_null(TypeFunc::Memory), elemidx);
5335
5336 Node* allocx = _gvn.transform(alloc);
5337 assert(allocx == alloc, "where has the allocation gone?");
5338 assert(dest->is_CheckCastPP(), "not an allocation result?");
5339
5340 _gvn.hash_delete(dest);
5341 dest->set_req(0, control());
5342 Node* destx = _gvn.transform(dest);
5343 assert(destx == dest, "where has the allocation result gone?");
5613 top_src = src_type->isa_aryptr();
5614 has_src = (top_src != nullptr && top_src->elem() != Type::BOTTOM);
5615 src_spec = true;
5616 }
5617 if (!has_dest) {
5618 dest = maybe_cast_profiled_obj(dest, dest_k, true);
5619 dest_type = _gvn.type(dest);
5620 top_dest = dest_type->isa_aryptr();
5621 has_dest = (top_dest != nullptr && top_dest->elem() != Type::BOTTOM);
5622 dest_spec = true;
5623 }
5624 }
5625 }
5626
5627 if (has_src && has_dest && can_emit_guards) {
5628 BasicType src_elem = top_src->isa_aryptr()->elem()->array_element_basic_type();
5629 BasicType dest_elem = top_dest->isa_aryptr()->elem()->array_element_basic_type();
5630 if (is_reference_type(src_elem, true)) src_elem = T_OBJECT;
5631 if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT;
5632
5633 if (src_elem == dest_elem && src_elem == T_OBJECT) {
5634 // If both arrays are object arrays then having the exact types
5635 // for both will remove the need for a subtype check at runtime
5636 // before the call and may make it possible to pick a faster copy
5637 // routine (without a subtype check on every element)
5638 // Do we have the exact type of src?
5639 bool could_have_src = src_spec;
5640 // Do we have the exact type of dest?
5641 bool could_have_dest = dest_spec;
5642 ciKlass* src_k = nullptr;
5643 ciKlass* dest_k = nullptr;
5644 if (!src_spec) {
5645 src_k = src_type->speculative_type_not_null();
5646 if (src_k != nullptr && src_k->is_array_klass()) {
5647 could_have_src = true;
5648 }
5649 }
5650 if (!dest_spec) {
5651 dest_k = dest_type->speculative_type_not_null();
5652 if (dest_k != nullptr && dest_k->is_array_klass()) {
5653 could_have_dest = true;
5654 }
5655 }
5656 if (could_have_src && could_have_dest) {
5657 // If we can have both exact types, emit the missing guards
5658 if (could_have_src && !src_spec) {
5659 src = maybe_cast_profiled_obj(src, src_k, true);
5660 }
5661 if (could_have_dest && !dest_spec) {
5662 dest = maybe_cast_profiled_obj(dest, dest_k, true);
5663 }
5664 }
5665 }
5666 }
5667
5668 ciMethod* trap_method = method();
5669 int trap_bci = bci();
5670 if (saved_jvms_before_guards != nullptr) {
5671 trap_method = alloc->jvms()->method();
5672 trap_bci = alloc->jvms()->bci();
5673 }
5674
5675 bool negative_length_guard_generated = false;
5676
5677 if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
5678 can_emit_guards &&
5679 !src->is_top() && !dest->is_top()) {
5680 // validate arguments: enables transformation the ArrayCopyNode
5681 validated = true;
5682
5683 RegionNode* slow_region = new RegionNode(1);
5684 record_for_igvn(slow_region);
5685
5686 // (1) src and dest are arrays.
5687 generate_non_array_guard(load_object_klass(src), slow_region);
5688 generate_non_array_guard(load_object_klass(dest), slow_region);
5689
5690 // (2) src and dest arrays must have elements of the same BasicType
5691 // done at macro expansion or at Ideal transformation time
5692
5693 // (4) src_offset must not be negative.
5694 generate_negative_guard(src_offset, slow_region);
5695
5696 // (5) dest_offset must not be negative.
5697 generate_negative_guard(dest_offset, slow_region);
5698
5699 // (7) src_offset + length must not exceed length of src.
5702 slow_region);
5703
5704 // (8) dest_offset + length must not exceed length of dest.
5705 generate_limit_guard(dest_offset, length,
5706 load_array_length(dest),
5707 slow_region);
5708
5709 // (6) length must not be negative.
5710 // This is also checked in generate_arraycopy() during macro expansion, but
5711 // we also have to check it here for the case where the ArrayCopyNode will
5712 // be eliminated by Escape Analysis.
5713 if (EliminateAllocations) {
5714 generate_negative_guard(length, slow_region);
5715 negative_length_guard_generated = true;
5716 }
5717
5718 // (9) each element of an oop array must be assignable
5719 Node* dest_klass = load_object_klass(dest);
5720 if (src != dest) {
5721 Node* not_subtype_ctrl = gen_subtype_check(src, dest_klass);
5722
5723 if (not_subtype_ctrl != top()) {
5724 PreserveJVMState pjvms(this);
5725 set_control(not_subtype_ctrl);
5726 uncommon_trap(Deoptimization::Reason_intrinsic,
5727 Deoptimization::Action_make_not_entrant);
5728 assert(stopped(), "Should be stopped");
5729 }
5730 }
5731 {
5732 PreserveJVMState pjvms(this);
5733 set_control(_gvn.transform(slow_region));
5734 uncommon_trap(Deoptimization::Reason_intrinsic,
5735 Deoptimization::Action_make_not_entrant);
5736 assert(stopped(), "Should be stopped");
5737 }
5738
5739 const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr();
5740 const Type *toop = dest_klass_t->cast_to_exactness(false)->as_instance_type();
5741 src = _gvn.transform(new CheckCastPPNode(control(), src, toop));
5742 arraycopy_move_allocation_here(alloc, dest, saved_jvms_before_guards, saved_reexecute_sp, new_idx);
5743 }
5744
5745 if (stopped()) {
5746 return true;
5747 }
5748
5749 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != nullptr, negative_length_guard_generated,
5750 // Create LoadRange and LoadKlass nodes for use during macro expansion here
5751 // so the compiler has a chance to eliminate them: during macro expansion,
5752 // we have to set their control (CastPP nodes are eliminated).
5753 load_object_klass(src), load_object_klass(dest),
5754 load_array_length(src), load_array_length(dest));
5755
5756 ac->set_arraycopy(validated);
5757
5758 Node* n = _gvn.transform(ac);
5759 if (n == ac) {
5760 ac->connect_outputs(this);
5761 } else {
|
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "ci/ciFlatArrayKlass.hpp"
28 #include "ci/ciUtilities.inline.hpp"
29 #include "classfile/vmIntrinsics.hpp"
30 #include "compiler/compileBroker.hpp"
31 #include "compiler/compileLog.hpp"
32 #include "gc/shared/barrierSet.hpp"
33 #include "jfr/support/jfrIntrinsics.hpp"
34 #include "memory/resourceArea.hpp"
35 #include "oops/klass.inline.hpp"
36 #include "oops/objArrayKlass.hpp"
37 #include "opto/addnode.hpp"
38 #include "opto/arraycopynode.hpp"
39 #include "opto/c2compiler.hpp"
40 #include "opto/castnode.hpp"
41 #include "opto/cfgnode.hpp"
42 #include "opto/convertnode.hpp"
43 #include "opto/countbitsnode.hpp"
44 #include "opto/idealKit.hpp"
45 #include "opto/library_call.hpp"
46 #include "opto/mathexactnode.hpp"
47 #include "opto/mulnode.hpp"
308 case vmIntrinsics::_indexOfIL: return inline_string_indexOfI(StrIntrinsicNode::LL);
309 case vmIntrinsics::_indexOfIU: return inline_string_indexOfI(StrIntrinsicNode::UU);
310 case vmIntrinsics::_indexOfIUL: return inline_string_indexOfI(StrIntrinsicNode::UL);
311 case vmIntrinsics::_indexOfU_char: return inline_string_indexOfChar(StrIntrinsicNode::U);
312 case vmIntrinsics::_indexOfL_char: return inline_string_indexOfChar(StrIntrinsicNode::L);
313
314 case vmIntrinsics::_equalsL: return inline_string_equals(StrIntrinsicNode::LL);
315
316 case vmIntrinsics::_vectorizedHashCode: return inline_vectorizedHashCode();
317
318 case vmIntrinsics::_toBytesStringU: return inline_string_toBytesU();
319 case vmIntrinsics::_getCharsStringU: return inline_string_getCharsU();
320 case vmIntrinsics::_getCharStringU: return inline_string_char_access(!is_store);
321 case vmIntrinsics::_putCharStringU: return inline_string_char_access( is_store);
322
323 case vmIntrinsics::_compressStringC:
324 case vmIntrinsics::_compressStringB: return inline_string_copy( is_compress);
325 case vmIntrinsics::_inflateStringC:
326 case vmIntrinsics::_inflateStringB: return inline_string_copy(!is_compress);
327
328 case vmIntrinsics::_makePrivateBuffer: return inline_unsafe_make_private_buffer();
329 case vmIntrinsics::_finishPrivateBuffer: return inline_unsafe_finish_private_buffer();
330 case vmIntrinsics::_getReference: return inline_unsafe_access(!is_store, T_OBJECT, Relaxed, false);
331 case vmIntrinsics::_getBoolean: return inline_unsafe_access(!is_store, T_BOOLEAN, Relaxed, false);
332 case vmIntrinsics::_getByte: return inline_unsafe_access(!is_store, T_BYTE, Relaxed, false);
333 case vmIntrinsics::_getShort: return inline_unsafe_access(!is_store, T_SHORT, Relaxed, false);
334 case vmIntrinsics::_getChar: return inline_unsafe_access(!is_store, T_CHAR, Relaxed, false);
335 case vmIntrinsics::_getInt: return inline_unsafe_access(!is_store, T_INT, Relaxed, false);
336 case vmIntrinsics::_getLong: return inline_unsafe_access(!is_store, T_LONG, Relaxed, false);
337 case vmIntrinsics::_getFloat: return inline_unsafe_access(!is_store, T_FLOAT, Relaxed, false);
338 case vmIntrinsics::_getDouble: return inline_unsafe_access(!is_store, T_DOUBLE, Relaxed, false);
339 case vmIntrinsics::_getValue: return inline_unsafe_access(!is_store, T_OBJECT, Relaxed, false, true);
340
341 case vmIntrinsics::_putReference: return inline_unsafe_access( is_store, T_OBJECT, Relaxed, false);
342 case vmIntrinsics::_putBoolean: return inline_unsafe_access( is_store, T_BOOLEAN, Relaxed, false);
343 case vmIntrinsics::_putByte: return inline_unsafe_access( is_store, T_BYTE, Relaxed, false);
344 case vmIntrinsics::_putShort: return inline_unsafe_access( is_store, T_SHORT, Relaxed, false);
345 case vmIntrinsics::_putChar: return inline_unsafe_access( is_store, T_CHAR, Relaxed, false);
346 case vmIntrinsics::_putInt: return inline_unsafe_access( is_store, T_INT, Relaxed, false);
347 case vmIntrinsics::_putLong: return inline_unsafe_access( is_store, T_LONG, Relaxed, false);
348 case vmIntrinsics::_putFloat: return inline_unsafe_access( is_store, T_FLOAT, Relaxed, false);
349 case vmIntrinsics::_putDouble: return inline_unsafe_access( is_store, T_DOUBLE, Relaxed, false);
350 case vmIntrinsics::_putValue: return inline_unsafe_access( is_store, T_OBJECT, Relaxed, false, true);
351
352 case vmIntrinsics::_getReferenceVolatile: return inline_unsafe_access(!is_store, T_OBJECT, Volatile, false);
353 case vmIntrinsics::_getBooleanVolatile: return inline_unsafe_access(!is_store, T_BOOLEAN, Volatile, false);
354 case vmIntrinsics::_getByteVolatile: return inline_unsafe_access(!is_store, T_BYTE, Volatile, false);
355 case vmIntrinsics::_getShortVolatile: return inline_unsafe_access(!is_store, T_SHORT, Volatile, false);
356 case vmIntrinsics::_getCharVolatile: return inline_unsafe_access(!is_store, T_CHAR, Volatile, false);
357 case vmIntrinsics::_getIntVolatile: return inline_unsafe_access(!is_store, T_INT, Volatile, false);
358 case vmIntrinsics::_getLongVolatile: return inline_unsafe_access(!is_store, T_LONG, Volatile, false);
359 case vmIntrinsics::_getFloatVolatile: return inline_unsafe_access(!is_store, T_FLOAT, Volatile, false);
360 case vmIntrinsics::_getDoubleVolatile: return inline_unsafe_access(!is_store, T_DOUBLE, Volatile, false);
361
362 case vmIntrinsics::_putReferenceVolatile: return inline_unsafe_access( is_store, T_OBJECT, Volatile, false);
363 case vmIntrinsics::_putBooleanVolatile: return inline_unsafe_access( is_store, T_BOOLEAN, Volatile, false);
364 case vmIntrinsics::_putByteVolatile: return inline_unsafe_access( is_store, T_BYTE, Volatile, false);
365 case vmIntrinsics::_putShortVolatile: return inline_unsafe_access( is_store, T_SHORT, Volatile, false);
366 case vmIntrinsics::_putCharVolatile: return inline_unsafe_access( is_store, T_CHAR, Volatile, false);
367 case vmIntrinsics::_putIntVolatile: return inline_unsafe_access( is_store, T_INT, Volatile, false);
368 case vmIntrinsics::_putLongVolatile: return inline_unsafe_access( is_store, T_LONG, Volatile, false);
369 case vmIntrinsics::_putFloatVolatile: return inline_unsafe_access( is_store, T_FLOAT, Volatile, false);
370 case vmIntrinsics::_putDoubleVolatile: return inline_unsafe_access( is_store, T_DOUBLE, Volatile, false);
495 case vmIntrinsics::_notifyJvmtiVThreadMount: return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_mount()),
496 "notifyJvmtiMount", false, false);
497 case vmIntrinsics::_notifyJvmtiVThreadUnmount: return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_unmount()),
498 "notifyJvmtiUnmount", false, false);
499 case vmIntrinsics::_notifyJvmtiVThreadHideFrames: return inline_native_notify_jvmti_hide();
500 case vmIntrinsics::_notifyJvmtiVThreadDisableSuspend: return inline_native_notify_jvmti_sync();
501 #endif
502
503 #ifdef JFR_HAVE_INTRINSICS
504 case vmIntrinsics::_counterTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, JfrTime::time_function()), "counterTime");
505 case vmIntrinsics::_getEventWriter: return inline_native_getEventWriter();
506 case vmIntrinsics::_jvm_commit: return inline_native_jvm_commit();
507 #endif
508 case vmIntrinsics::_currentTimeMillis: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
509 case vmIntrinsics::_nanoTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
510 case vmIntrinsics::_writeback0: return inline_unsafe_writeback0();
511 case vmIntrinsics::_writebackPreSync0: return inline_unsafe_writebackSync0(true);
512 case vmIntrinsics::_writebackPostSync0: return inline_unsafe_writebackSync0(false);
513 case vmIntrinsics::_allocateInstance: return inline_unsafe_allocate();
514 case vmIntrinsics::_copyMemory: return inline_unsafe_copyMemory();
515 case vmIntrinsics::_isFlatArray: return inline_unsafe_isFlatArray();
516 case vmIntrinsics::_getLength: return inline_native_getLength();
517 case vmIntrinsics::_copyOf: return inline_array_copyOf(false);
518 case vmIntrinsics::_copyOfRange: return inline_array_copyOf(true);
519 case vmIntrinsics::_equalsB: return inline_array_equals(StrIntrinsicNode::LL);
520 case vmIntrinsics::_equalsC: return inline_array_equals(StrIntrinsicNode::UU);
521 case vmIntrinsics::_Preconditions_checkIndex: return inline_preconditions_checkIndex(T_INT);
522 case vmIntrinsics::_Preconditions_checkLongIndex: return inline_preconditions_checkIndex(T_LONG);
523 case vmIntrinsics::_clone: return inline_native_clone(intrinsic()->is_virtual());
524
525 case vmIntrinsics::_allocateUninitializedArray: return inline_unsafe_newArray(true);
526 case vmIntrinsics::_newArray: return inline_unsafe_newArray(false);
527 case vmIntrinsics::_newNullRestrictedArray: return inline_newNullRestrictedArray();
528
529 case vmIntrinsics::_isAssignableFrom: return inline_native_subtype_check();
530
531 case vmIntrinsics::_isInstance:
532 case vmIntrinsics::_getModifiers:
533 case vmIntrinsics::_isInterface:
534 case vmIntrinsics::_isArray:
535 case vmIntrinsics::_isPrimitive:
536 case vmIntrinsics::_isHidden:
537 case vmIntrinsics::_getSuperclass:
538 case vmIntrinsics::_getClassAccessFlags: return inline_native_Class_query(intrinsic_id());
539
540 case vmIntrinsics::_floatToRawIntBits:
541 case vmIntrinsics::_floatToIntBits:
542 case vmIntrinsics::_intBitsToFloat:
543 case vmIntrinsics::_doubleToRawLongBits:
544 case vmIntrinsics::_doubleToLongBits:
545 case vmIntrinsics::_longBitsToDouble:
546 case vmIntrinsics::_floatToFloat16:
547 case vmIntrinsics::_float16ToFloat: return inline_fp_conversions(intrinsic_id());
2199 case vmIntrinsics::_remainderUnsigned_l: {
2200 zero_check_long(argument(2));
2201 // Compile-time detect of null-exception
2202 if (stopped()) {
2203 return true; // keep the graph constructed so far
2204 }
2205 n = new UModLNode(control(), argument(0), argument(2));
2206 break;
2207 }
2208 default: fatal_unexpected_iid(id); break;
2209 }
2210 set_result(_gvn.transform(n));
2211 return true;
2212 }
2213
2214 //----------------------------inline_unsafe_access----------------------------
2215
2216 const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type) {
2217 // Attempt to infer a sharper value type from the offset and base type.
2218 ciKlass* sharpened_klass = nullptr;
2219 bool null_free = false;
2220
2221 // See if it is an instance field, with an object type.
2222 if (alias_type->field() != nullptr) {
2223 if (alias_type->field()->type()->is_klass()) {
2224 sharpened_klass = alias_type->field()->type()->as_klass();
2225 null_free = alias_type->field()->is_null_free();
2226 }
2227 }
2228
2229 const TypeOopPtr* result = nullptr;
2230 // See if it is a narrow oop array.
2231 if (adr_type->isa_aryptr()) {
2232 if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
2233 const TypeOopPtr* elem_type = adr_type->is_aryptr()->elem()->make_oopptr();
2234 null_free = adr_type->is_aryptr()->is_null_free();
2235 if (elem_type != nullptr && elem_type->is_loaded()) {
2236 // Sharpen the value type.
2237 result = elem_type;
2238 }
2239 }
2240 }
2241
2242 // The sharpened class might be unloaded if there is no class loader
2243 // contraint in place.
2244 if (result == nullptr && sharpened_klass != nullptr && sharpened_klass->is_loaded()) {
2245 // Sharpen the value type.
2246 result = TypeOopPtr::make_from_klass(sharpened_klass);
2247 if (null_free) {
2248 result = result->join_speculative(TypePtr::NOTNULL)->is_oopptr();
2249 }
2250 }
2251 if (result != nullptr) {
2252 #ifndef PRODUCT
2253 if (C->print_intrinsics() || C->print_inlining()) {
2254 tty->print(" from base type: "); adr_type->dump(); tty->cr();
2255 tty->print(" sharpened value: "); result->dump(); tty->cr();
2256 }
2257 #endif
2258 }
2259 return result;
2260 }
2261
2262 DecoratorSet LibraryCallKit::mo_decorator_for_access_kind(AccessKind kind) {
2263 switch (kind) {
2264 case Relaxed:
2265 return MO_UNORDERED;
2266 case Opaque:
2267 return MO_RELAXED;
2268 case Acquire:
2269 return MO_ACQUIRE;
2270 case Release:
2271 return MO_RELEASE;
2272 case Volatile:
2273 return MO_SEQ_CST;
2274 default:
2275 ShouldNotReachHere();
2276 return 0;
2277 }
2278 }
2279
2280 bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, const AccessKind kind, const bool unaligned, const bool is_flat) {
2281 if (callee()->is_static()) return false; // caller must have the capability!
2282 DecoratorSet decorators = C2_UNSAFE_ACCESS;
2283 guarantee(!is_store || kind != Acquire, "Acquire accesses can be produced only for loads");
2284 guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
2285 assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
2286
2287 if (is_reference_type(type)) {
2288 decorators |= ON_UNKNOWN_OOP_REF;
2289 }
2290
2291 if (unaligned) {
2292 decorators |= C2_UNALIGNED;
2293 }
2294
2295 #ifndef PRODUCT
2296 {
2297 ResourceMark rm;
2298 // Check the signatures.
2299 ciSignature* sig = callee()->signature();
2300 #ifdef ASSERT
2301 if (!is_store) {
2302 // Object getReference(Object base, int/long offset), etc.
2303 BasicType rtype = sig->return_type()->basic_type();
2304 assert(rtype == type, "getter must return the expected value");
2305 assert(sig->count() == 2 || (is_flat && sig->count() == 3), "oop getter has 2 or 3 arguments");
2306 assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2307 assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2308 } else {
2309 // void putReference(Object base, int/long offset, Object x), etc.
2310 assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
2311 assert(sig->count() == 3 || (is_flat && sig->count() == 4), "oop putter has 3 arguments");
2312 assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
2313 assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
2314 BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
2315 assert(vtype == type, "putter must accept the expected value");
2316 }
2317 #endif // ASSERT
2318 }
2319 #endif //PRODUCT
2320
2321 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
2322
2323 Node* receiver = argument(0); // type: oop
2324
2325 // Build address expression.
2326 Node* heap_base_oop = top();
2327
2328 // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2329 Node* base = argument(1); // type: oop
2330 // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2331 Node* offset = argument(2); // type: long
2332 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2333 // to be plain byte offsets, which are also the same as those accepted
2334 // by oopDesc::field_addr.
2335 assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2336 "fieldOffset must be byte-scaled");
2337
2338 ciInlineKlass* inline_klass = nullptr;
2339 if (is_flat) {
2340 const TypeInstPtr* cls = _gvn.type(argument(4))->isa_instptr();
2341 if (cls == nullptr || cls->const_oop() == nullptr) {
2342 return false;
2343 }
2344 ciType* mirror_type = cls->const_oop()->as_instance()->java_mirror_type();
2345 if (!mirror_type->is_inlinetype()) {
2346 return false;
2347 }
2348 inline_klass = mirror_type->as_inline_klass();
2349 }
2350
2351 if (base->is_InlineType()) {
2352 InlineTypeNode* vt = base->as_InlineType();
2353 if (is_store) {
2354 if (!vt->is_allocated(&_gvn)) {
2355 return false;
2356 }
2357 base = vt->get_oop();
2358 } else {
2359 if (offset->is_Con()) {
2360 long off = find_long_con(offset, 0);
2361 ciInlineKlass* vk = vt->type()->inline_klass();
2362 if ((long)(int)off != off || !vk->contains_field_offset(off)) {
2363 return false;
2364 }
2365
2366 ciField* field = vk->get_non_flat_field_by_offset(off);
2367 if (field != nullptr) {
2368 BasicType bt = type2field[field->type()->basic_type()];
2369 if (bt == T_ARRAY || bt == T_NARROWOOP) {
2370 bt = T_OBJECT;
2371 }
2372 if (bt == type && (!field->is_flat() || field->type() == inline_klass)) {
2373 Node* value = vt->field_value_by_offset(off, false);
2374 if (value->is_InlineType()) {
2375 value = value->as_InlineType()->adjust_scalarization_depth(this);
2376 }
2377 set_result(value);
2378 return true;
2379 }
2380 }
2381 }
2382 {
2383 // Re-execute the unsafe access if allocation triggers deoptimization.
2384 PreserveReexecuteState preexecs(this);
2385 jvms()->set_should_reexecute(true);
2386 vt = vt->buffer(this);
2387 }
2388 base = vt->get_oop();
2389 }
2390 }
2391
2392 // 32-bit machines ignore the high half!
2393 offset = ConvL2X(offset);
2394
2395 // Save state and restore on bailout
2396 uint old_sp = sp();
2397 SafePointNode* old_map = clone_map();
2398
2399 Node* adr = make_unsafe_address(base, offset, type, kind == Relaxed);
2400
2401 if (_gvn.type(base)->isa_ptr() == TypePtr::NULL_PTR) {
2402 if (type != T_OBJECT && (inline_klass == nullptr || !inline_klass->has_object_fields())) {
2403 decorators |= IN_NATIVE; // off-heap primitive access
2404 } else {
2405 set_map(old_map);
2406 set_sp(old_sp);
2407 return false; // off-heap oop accesses are not supported
2408 }
2409 } else {
2410 heap_base_oop = base; // on-heap or mixed access
2411 }
2412
2413 // Can base be null? Otherwise, always on-heap access.
2414 bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(base));
2415
2416 if (!can_access_non_heap) {
2417 decorators |= IN_HEAP;
2418 }
2419
2420 Node* val = is_store ? argument(4 + (is_flat ? 1 : 0)) : nullptr;
2421
2422 const TypePtr* adr_type = _gvn.type(adr)->isa_ptr();
2423 if (adr_type == TypePtr::NULL_PTR) {
2424 set_map(old_map);
2425 set_sp(old_sp);
2426 return false; // off-heap access with zero address
2427 }
2428
2429 // Try to categorize the address.
2430 Compile::AliasType* alias_type = C->alias_type(adr_type);
2431 assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2432
2433 if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2434 alias_type->adr_type() == TypeAryPtr::RANGE) {
2435 set_map(old_map);
2436 set_sp(old_sp);
2437 return false; // not supported
2438 }
2439
2440 bool mismatched = false;
2441 BasicType bt = T_ILLEGAL;
2442 ciField* field = nullptr;
2443 if (adr_type->isa_instptr()) {
2444 const TypeInstPtr* instptr = adr_type->is_instptr();
2445 ciInstanceKlass* k = instptr->instance_klass();
2446 int off = instptr->offset();
2447 if (instptr->const_oop() != nullptr &&
2448 k == ciEnv::current()->Class_klass() &&
2449 instptr->offset() >= (k->size_helper() * wordSize)) {
2450 k = instptr->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
2451 field = k->get_field_by_offset(off, true);
2452 } else {
2453 field = k->get_non_flat_field_by_offset(off);
2454 }
2455 if (field != nullptr) {
2456 bt = type2field[field->type()->basic_type()];
2457 }
2458 assert(bt == alias_type->basic_type() || is_flat, "should match");
2459 } else {
2460 bt = alias_type->basic_type();
2461 }
2462
2463 if (bt != T_ILLEGAL) {
2464 assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
2465 if (bt == T_BYTE && adr_type->isa_aryptr()) {
2466 // Alias type doesn't differentiate between byte[] and boolean[]).
2467 // Use address type to get the element type.
2468 bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2469 }
2470 if (is_reference_type(bt, true)) {
2471 // accessing an array field with getReference is not a mismatch
2472 bt = T_OBJECT;
2473 }
2474 if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2475 // Don't intrinsify mismatched object accesses
2476 set_map(old_map);
2477 set_sp(old_sp);
2478 return false;
2479 }
2480 mismatched = (bt != type);
2481 } else if (alias_type->adr_type()->isa_oopptr()) {
2482 mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
2483 }
2484
2485 if (is_flat) {
2486 if (adr_type->isa_instptr()) {
2487 if (field == nullptr || field->type() != inline_klass) {
2488 mismatched = true;
2489 }
2490 } else if (adr_type->isa_aryptr()) {
2491 const Type* elem = adr_type->is_aryptr()->elem();
2492 if (!adr_type->is_flat() || elem->inline_klass() != inline_klass) {
2493 mismatched = true;
2494 }
2495 } else {
2496 mismatched = true;
2497 }
2498 if (is_store) {
2499 const Type* val_t = _gvn.type(val);
2500 if (!val_t->is_inlinetypeptr() || val_t->inline_klass() != inline_klass) {
2501 set_map(old_map);
2502 set_sp(old_sp);
2503 return false;
2504 }
2505 }
2506 }
2507
2508 destruct_map_clone(old_map);
2509 assert(!mismatched || is_flat || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2510
2511 if (mismatched) {
2512 decorators |= C2_MISMATCHED;
2513 }
2514
2515 // First guess at the value type.
2516 const Type *value_type = Type::get_const_basic_type(type);
2517
2518 // Figure out the memory ordering.
2519 decorators |= mo_decorator_for_access_kind(kind);
2520
2521 if (!is_store) {
2522 if (type == T_OBJECT && !is_flat) {
2523 const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2524 if (tjp != nullptr) {
2525 value_type = tjp;
2526 }
2527 }
2528 }
2529
2530 receiver = null_check(receiver);
2531 if (stopped()) {
2532 return true;
2533 }
2534 // Heap pointers get a null-check from the interpreter,
2535 // as a courtesy. However, this is not guaranteed by Unsafe,
2536 // and it is not possible to fully distinguish unintended nulls
2537 // from intended ones in this API.
2538
2539 if (!is_store) {
2540 Node* p = nullptr;
2541 // Try to constant fold a load from a constant field
2542
2543 if (heap_base_oop != top() && field != nullptr && field->is_constant() && !field->is_flat() && !mismatched) {
2544 // final or stable field
2545 p = make_constant_from_field(field, heap_base_oop);
2546 }
2547
2548 if (p == nullptr) { // Could not constant fold the load
2549 if (is_flat) {
2550 if (adr_type->isa_instptr() && !mismatched) {
2551 ciInstanceKlass* holder = adr_type->is_instptr()->instance_klass();
2552 int offset = adr_type->is_instptr()->offset();
2553 p = InlineTypeNode::make_from_flat(this, inline_klass, base, base, holder, offset, decorators);
2554 } else {
2555 p = InlineTypeNode::make_from_flat(this, inline_klass, base, adr, nullptr, 0, decorators);
2556 }
2557 } else {
2558 p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);
2559 const TypeOopPtr* ptr = value_type->make_oopptr();
2560 if (ptr != nullptr && ptr->is_inlinetypeptr()) {
2561 // Load a non-flattened inline type from memory
2562 p = InlineTypeNode::make_from_oop(this, p, ptr->inline_klass(), !ptr->maybe_null());
2563 }
2564 }
2565 // Normalize the value returned by getBoolean in the following cases
2566 if (type == T_BOOLEAN &&
2567 (mismatched ||
2568 heap_base_oop == top() || // - heap_base_oop is null or
2569 (can_access_non_heap && field == nullptr)) // - heap_base_oop is potentially null
2570 // and the unsafe access is made to large offset
2571 // (i.e., larger than the maximum offset necessary for any
2572 // field access)
2573 ) {
2574 IdealKit ideal = IdealKit(this);
2575 #define __ ideal.
2576 IdealVariable normalized_result(ideal);
2577 __ declarations_done();
2578 __ set(normalized_result, p);
2579 __ if_then(p, BoolTest::ne, ideal.ConI(0));
2580 __ set(normalized_result, ideal.ConI(1));
2581 ideal.end_if();
2582 final_sync(ideal);
2583 p = __ value(normalized_result);
2584 #undef __
2585 }
2586 }
2587 if (type == T_ADDRESS) {
2588 p = gvn().transform(new CastP2XNode(nullptr, p));
2589 p = ConvX2UL(p);
2590 }
2591 // The load node has the control of the preceding MemBarCPUOrder. All
2592 // following nodes will have the control of the MemBarCPUOrder inserted at
2593 // the end of this method. So, pushing the load onto the stack at a later
2594 // point is fine.
2595 set_result(p);
2596 } else {
2597 if (bt == T_ADDRESS) {
2598 // Repackage the long as a pointer.
2599 val = ConvL2X(val);
2600 val = gvn().transform(new CastX2PNode(val));
2601 }
2602 if (is_flat) {
2603 if (adr_type->isa_instptr() && !mismatched) {
2604 ciInstanceKlass* holder = adr_type->is_instptr()->instance_klass();
2605 int offset = adr_type->is_instptr()->offset();
2606 val->as_InlineType()->store_flat(this, base, base, holder, offset, decorators);
2607 } else {
2608 val->as_InlineType()->store_flat(this, base, adr, nullptr, 0, decorators);
2609 }
2610 } else {
2611 access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators);
2612 }
2613 }
2614
2615 if (argument(1)->is_InlineType() && is_store) {
2616 InlineTypeNode* value = InlineTypeNode::make_from_oop(this, base, _gvn.type(argument(1))->inline_klass());
2617 value = value->make_larval(this, false);
2618 replace_in_map(argument(1), value);
2619 }
2620
2621 return true;
2622 }
2623
2624 bool LibraryCallKit::inline_unsafe_make_private_buffer() {
2625 Node* receiver = argument(0);
2626 Node* value = argument(1);
2627 if (!value->is_InlineType()) {
2628 return false;
2629 }
2630
2631 receiver = null_check(receiver);
2632 if (stopped()) {
2633 return true;
2634 }
2635
2636 set_result(value->as_InlineType()->make_larval(this, true));
2637 return true;
2638 }
2639
2640 bool LibraryCallKit::inline_unsafe_finish_private_buffer() {
2641 Node* receiver = argument(0);
2642 Node* buffer = argument(1);
2643 if (!buffer->is_InlineType()) {
2644 return false;
2645 }
2646 InlineTypeNode* vt = buffer->as_InlineType();
2647 if (!vt->is_allocated(&_gvn)) {
2648 return false;
2649 }
2650 // TODO 8239003 Why is this needed?
2651 if (AllocateNode::Ideal_allocation(vt->get_oop()) == nullptr) {
2652 return false;
2653 }
2654
2655 receiver = null_check(receiver);
2656 if (stopped()) {
2657 return true;
2658 }
2659
2660 set_result(vt->finish_larval(this));
2661 return true;
2662 }
2663
2664 //----------------------------inline_unsafe_load_store----------------------------
2665 // This method serves a couple of different customers (depending on LoadStoreKind):
2666 //
2667 // LS_cmp_swap:
2668 //
2669 // boolean compareAndSetReference(Object o, long offset, Object expected, Object x);
2670 // boolean compareAndSetInt( Object o, long offset, int expected, int x);
2671 // boolean compareAndSetLong( Object o, long offset, long expected, long x);
2672 //
2673 // LS_cmp_swap_weak:
2674 //
2675 // boolean weakCompareAndSetReference( Object o, long offset, Object expected, Object x);
2676 // boolean weakCompareAndSetReferencePlain( Object o, long offset, Object expected, Object x);
2677 // boolean weakCompareAndSetReferenceAcquire(Object o, long offset, Object expected, Object x);
2678 // boolean weakCompareAndSetReferenceRelease(Object o, long offset, Object expected, Object x);
2679 //
2680 // boolean weakCompareAndSetInt( Object o, long offset, int expected, int x);
2681 // boolean weakCompareAndSetIntPlain( Object o, long offset, int expected, int x);
2682 // boolean weakCompareAndSetIntAcquire( Object o, long offset, int expected, int x);
2683 // boolean weakCompareAndSetIntRelease( Object o, long offset, int expected, int x);
2849 }
2850 case LS_cmp_swap:
2851 case LS_cmp_swap_weak:
2852 case LS_get_add:
2853 break;
2854 default:
2855 ShouldNotReachHere();
2856 }
2857
2858 // Null check receiver.
2859 receiver = null_check(receiver);
2860 if (stopped()) {
2861 return true;
2862 }
2863
2864 int alias_idx = C->get_alias_index(adr_type);
2865
2866 if (is_reference_type(type)) {
2867 decorators |= IN_HEAP | ON_UNKNOWN_OOP_REF;
2868
2869 if (oldval != nullptr && oldval->is_InlineType()) {
2870 // Re-execute the unsafe access if allocation triggers deoptimization.
2871 PreserveReexecuteState preexecs(this);
2872 jvms()->set_should_reexecute(true);
2873 oldval = oldval->as_InlineType()->buffer(this)->get_oop();
2874 }
2875 if (newval != nullptr && newval->is_InlineType()) {
2876 // Re-execute the unsafe access if allocation triggers deoptimization.
2877 PreserveReexecuteState preexecs(this);
2878 jvms()->set_should_reexecute(true);
2879 newval = newval->as_InlineType()->buffer(this)->get_oop();
2880 }
2881
2882 // Transformation of a value which could be null pointer (CastPP #null)
2883 // could be delayed during Parse (for example, in adjust_map_after_if()).
2884 // Execute transformation here to avoid barrier generation in such case.
2885 if (_gvn.type(newval) == TypePtr::NULL_PTR)
2886 newval = _gvn.makecon(TypePtr::NULL_PTR);
2887
2888 if (oldval != nullptr && _gvn.type(oldval) == TypePtr::NULL_PTR) {
2889 // Refine the value to a null constant, when it is known to be null
2890 oldval = _gvn.makecon(TypePtr::NULL_PTR);
2891 }
2892 }
2893
2894 Node* result = nullptr;
2895 switch (kind) {
2896 case LS_cmp_exchange: {
2897 result = access_atomic_cmpxchg_val_at(base, adr, adr_type, alias_idx,
2898 oldval, newval, value_type, type, decorators);
2899 break;
2900 }
2901 case LS_cmp_swap_weak:
3048 Deoptimization::Action_make_not_entrant);
3049 }
3050 if (stopped()) {
3051 return true;
3052 }
3053 #endif //INCLUDE_JVMTI
3054
3055 Node* test = nullptr;
3056 if (LibraryCallKit::klass_needs_init_guard(kls)) {
3057 // Note: The argument might still be an illegal value like
3058 // Serializable.class or Object[].class. The runtime will handle it.
3059 // But we must make an explicit check for initialization.
3060 Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset()));
3061 // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
3062 // can generate code to load it as unsigned byte.
3063 Node* inst = make_load(nullptr, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered);
3064 Node* bits = intcon(InstanceKlass::fully_initialized);
3065 test = _gvn.transform(new SubINode(inst, bits));
3066 // The 'test' is non-zero if we need to take a slow path.
3067 }
3068 Node* obj = nullptr;
3069 const TypeInstKlassPtr* tkls = _gvn.type(kls)->isa_instklassptr();
3070 if (tkls != nullptr && tkls->instance_klass()->is_inlinetype()) {
3071 obj = InlineTypeNode::make_default(_gvn, tkls->instance_klass()->as_inline_klass())->buffer(this);
3072 } else {
3073 obj = new_instance(kls, test);
3074 }
3075 set_result(obj);
3076 return true;
3077 }
3078
3079 //------------------------inline_native_time_funcs--------------
3080 // inline code for System.currentTimeMillis() and System.nanoTime()
3081 // these have the same type and signature
3082 bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
3083 const TypeFunc* tf = OptoRuntime::void_long_Type();
3084 const TypePtr* no_memory_effects = nullptr;
3085 Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
3086 Node* value = _gvn.transform(new ProjNode(time, TypeFunc::Parms+0));
3087 #ifdef ASSERT
3088 Node* value_top = _gvn.transform(new ProjNode(time, TypeFunc::Parms+1));
3089 assert(value_top == top(), "second value must be top");
3090 #endif
3091 set_result(value);
3092 return true;
3093 }
3094
3830
3831 //------------------------inline_native_setVthread------------------
3832 bool LibraryCallKit::inline_native_setCurrentThread() {
3833 assert(C->method()->changes_current_thread(),
3834 "method changes current Thread but is not annotated ChangesCurrentThread");
3835 Node* arr = argument(1);
3836 Node* thread = _gvn.transform(new ThreadLocalNode());
3837 Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::vthread_offset()));
3838 Node* thread_obj_handle
3839 = make_load(nullptr, p, p->bottom_type()->is_ptr(), T_OBJECT, MemNode::unordered);
3840 thread_obj_handle = _gvn.transform(thread_obj_handle);
3841 const TypePtr *adr_type = _gvn.type(thread_obj_handle)->isa_ptr();
3842 access_store_at(nullptr, thread_obj_handle, adr_type, arr, _gvn.type(arr), T_OBJECT, IN_NATIVE | MO_UNORDERED);
3843 JFR_ONLY(extend_setCurrentThread(thread, arr);)
3844 return true;
3845 }
3846
3847 const Type* LibraryCallKit::scopedValueCache_type() {
3848 ciKlass* objects_klass = ciObjArrayKlass::make(env()->Object_klass());
3849 const TypeOopPtr* etype = TypeOopPtr::make_from_klass(env()->Object_klass());
3850 const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS, /* stable= */ false, /* flat= */ false, /* not_flat= */ true, /* not_null_free= */ true);
3851
3852 // Because we create the scopedValue cache lazily we have to make the
3853 // type of the result BotPTR.
3854 bool xk = etype->klass_is_exact();
3855 const Type* objects_type = TypeAryPtr::make(TypePtr::BotPTR, arr0, objects_klass, xk, TypeAryPtr::Offset(0));
3856 return objects_type;
3857 }
3858
3859 Node* LibraryCallKit::scopedValueCache_helper() {
3860 Node* thread = _gvn.transform(new ThreadLocalNode());
3861 Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::scopedValueCache_offset()));
3862 // We cannot use immutable_memory() because we might flip onto a
3863 // different carrier thread, at which point we'll need to use that
3864 // carrier thread's cache.
3865 // return _gvn.transform(LoadNode::make(_gvn, nullptr, immutable_memory(), p, p->bottom_type()->is_ptr(),
3866 // TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered));
3867 return make_load(nullptr, p, p->bottom_type()->is_ptr(), T_ADDRESS, MemNode::unordered);
3868 }
3869
3870 //------------------------inline_native_scopedValueCache------------------
3871 bool LibraryCallKit::inline_native_scopedValueCache() {
3872 Node* cache_obj_handle = scopedValueCache_helper();
3873 const Type* objects_type = scopedValueCache_type();
3874 set_result(access_load(cache_obj_handle, objects_type, T_OBJECT, IN_NATIVE));
3875
3876 return true;
3877 }
3878
3879 //------------------------inline_native_setScopedValueCache------------------
3880 bool LibraryCallKit::inline_native_setScopedValueCache() {
3881 Node* arr = argument(0);
3882 Node* cache_obj_handle = scopedValueCache_helper();
3883 const Type* objects_type = scopedValueCache_type();
3884
3885 const TypePtr *adr_type = _gvn.type(cache_obj_handle)->isa_ptr();
3886 access_store_at(nullptr, cache_obj_handle, adr_type, arr, objects_type, T_OBJECT, IN_NATIVE | MO_UNORDERED);
3887
3888 return true;
3889 }
3890
3891 //-----------------------load_klass_from_mirror_common-------------------------
3892 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
3893 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
3894 // and branch to the given path on the region.
3895 // If never_see_null, take an uncommon trap on null, so we can optimistically
3896 // compile for the non-null case.
3897 // If the region is null, force never_see_null = true.
3898 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
3899 bool never_see_null,
3900 RegionNode* region,
3901 int null_path,
3902 int offset) {
3903 if (region == nullptr) never_see_null = true;
3904 Node* p = basic_plus_adr(mirror, offset);
3905 const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
3906 Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
3907 Node* null_ctl = top();
3908 kls = null_check_oop(kls, &null_ctl, never_see_null);
3909 if (region != nullptr) {
3910 // Set region->in(null_path) if the mirror is a primitive (e.g, int.class).
3913 assert(null_ctl == top(), "no loose ends");
3914 }
3915 return kls;
3916 }
3917
3918 //--------------------(inline_native_Class_query helpers)---------------------
3919 // Use this for JVM_ACC_INTERFACE, JVM_ACC_IS_CLONEABLE_FAST, JVM_ACC_HAS_FINALIZER.
3920 // Fall through if (mods & mask) == bits, take the guard otherwise.
3921 Node* LibraryCallKit::generate_access_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
3922 // Branch around if the given klass has the given modifier bit set.
3923 // Like generate_guard, adds a new path onto the region.
3924 Node* modp = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3925 Node* mods = make_load(nullptr, modp, TypeInt::INT, T_INT, MemNode::unordered);
3926 Node* mask = intcon(modifier_mask);
3927 Node* bits = intcon(modifier_bits);
3928 Node* mbit = _gvn.transform(new AndINode(mods, mask));
3929 Node* cmp = _gvn.transform(new CmpINode(mbit, bits));
3930 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
3931 return generate_fair_guard(bol, region);
3932 }
3933
3934 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
3935 return generate_access_flags_guard(kls, JVM_ACC_INTERFACE, 0, region);
3936 }
3937 Node* LibraryCallKit::generate_hidden_class_guard(Node* kls, RegionNode* region) {
3938 return generate_access_flags_guard(kls, JVM_ACC_IS_HIDDEN_CLASS, 0, region);
3939 }
3940
3941 //-------------------------inline_native_Class_query-------------------
3942 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
3943 const Type* return_type = TypeInt::BOOL;
3944 Node* prim_return_value = top(); // what happens if it's a primitive class?
3945 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3946 bool expect_prim = false; // most of these guys expect to work on refs
3947
3948 enum { _normal_path = 1, _prim_path = 2, PATH_LIMIT };
3949
3950 Node* mirror = argument(0);
3951 Node* obj = top();
3952
3953 switch (id) {
4107
4108 case vmIntrinsics::_getClassAccessFlags:
4109 p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
4110 query_value = make_load(nullptr, p, TypeInt::INT, T_INT, MemNode::unordered);
4111 break;
4112
4113 default:
4114 fatal_unexpected_iid(id);
4115 break;
4116 }
4117
4118 // Fall-through is the normal case of a query to a real class.
4119 phi->init_req(1, query_value);
4120 region->init_req(1, control());
4121
4122 C->set_has_split_ifs(true); // Has chance for split-if optimization
4123 set_result(region, phi);
4124 return true;
4125 }
4126
4127
4128 //-------------------------inline_Class_cast-------------------
4129 bool LibraryCallKit::inline_Class_cast() {
4130 Node* mirror = argument(0); // Class
4131 Node* obj = argument(1);
4132 const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
4133 if (mirror_con == nullptr) {
4134 return false; // dead path (mirror->is_top()).
4135 }
4136 if (obj == nullptr || obj->is_top()) {
4137 return false; // dead path
4138 }
4139 const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr();
4140
4141 // First, see if Class.cast() can be folded statically.
4142 // java_mirror_type() returns non-null for compile-time Class constants.
4143 bool is_null_free_array = false;
4144 ciType* tm = mirror_con->java_mirror_type(&is_null_free_array);
4145 if (tm != nullptr && tm->is_klass() &&
4146 tp != nullptr) {
4147 if (!tp->is_loaded()) {
4148 // Don't use intrinsic when class is not loaded.
4149 return false;
4150 } else {
4151 const TypeKlassPtr* tklass = TypeKlassPtr::make(tm->as_klass(), Type::trust_interfaces);
4152 if (is_null_free_array) {
4153 tklass = tklass->is_aryklassptr()->cast_to_null_free();
4154 }
4155 int static_res = C->static_subtype_check(tklass, tp->as_klass_type());
4156 if (static_res == Compile::SSC_always_true) {
4157 // isInstance() is true - fold the code.
4158 set_result(obj);
4159 return true;
4160 } else if (static_res == Compile::SSC_always_false) {
4161 // Don't use intrinsic, have to throw ClassCastException.
4162 // If the reference is null, the non-intrinsic bytecode will
4163 // be optimized appropriately.
4164 return false;
4165 }
4166 }
4167 }
4168
4169 // Bailout intrinsic and do normal inlining if exception path is frequent.
4170 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
4171 return false;
4172 }
4173
4174 // Generate dynamic checks.
4175 // Class.cast() is java implementation of _checkcast bytecode.
4176 // Do checkcast (Parse::do_checkcast()) optimizations here.
4177
4178 mirror = null_check(mirror);
4179 // If mirror is dead, only null-path is taken.
4180 if (stopped()) {
4181 return true;
4182 }
4183
4184 // Not-subtype or the mirror's klass ptr is nullptr (in case it is a primitive).
4185 enum { _bad_type_path = 1, _prim_path = 2, _npe_path = 3, PATH_LIMIT };
4186 RegionNode* region = new RegionNode(PATH_LIMIT);
4187 record_for_igvn(region);
4188
4189 // Now load the mirror's klass metaobject, and null-check it.
4190 // If kls is null, we have a primitive mirror and
4191 // nothing is an instance of a primitive type.
4192 Node* kls = load_klass_from_mirror(mirror, false, region, _prim_path);
4193
4194 Node* res = top();
4195 Node* io = i_o();
4196 Node* mem = merged_memory();
4197 if (!stopped()) {
4198
4199 Node* bad_type_ctrl = top();
4200 // Do checkcast optimizations.
4201 res = gen_checkcast(obj, kls, &bad_type_ctrl);
4202 region->init_req(_bad_type_path, bad_type_ctrl);
4203 }
4204 if (region->in(_prim_path) != top() ||
4205 region->in(_bad_type_path) != top() ||
4206 region->in(_npe_path) != top()) {
4207 // Let Interpreter throw ClassCastException.
4208 PreserveJVMState pjvms(this);
4209 set_control(_gvn.transform(region));
4210 // Set IO and memory because gen_checkcast may override them when buffering inline types
4211 set_i_o(io);
4212 set_all_memory(mem);
4213 uncommon_trap(Deoptimization::Reason_intrinsic,
4214 Deoptimization::Action_maybe_recompile);
4215 }
4216 if (!stopped()) {
4217 set_result(res);
4218 }
4219 return true;
4220 }
4221
4222
4223 //--------------------------inline_native_subtype_check------------------------
4224 // This intrinsic takes the JNI calls out of the heart of
4225 // UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
4226 bool LibraryCallKit::inline_native_subtype_check() {
4227 // Pull both arguments off the stack.
4228 Node* args[2]; // two java.lang.Class mirrors: superc, subc
4229 args[0] = argument(0);
4230 args[1] = argument(1);
4231 Node* klasses[2]; // corresponding Klasses: superk, subk
4232 klasses[0] = klasses[1] = top();
4233
4234 enum {
4235 // A full decision tree on {superc is prim, subc is prim}:
4236 _prim_0_path = 1, // {P,N} => false
4237 // {P,P} & superc!=subc => false
4238 _prim_same_path, // {P,P} & superc==subc => true
4239 _prim_1_path, // {N,P} => false
4240 _ref_subtype_path, // {N,N} & subtype check wins => true
4241 _both_ref_path, // {N,N} & subtype check loses => false
4242 PATH_LIMIT
4243 };
4244
4245 RegionNode* region = new RegionNode(PATH_LIMIT);
4246 RegionNode* prim_region = new RegionNode(2);
4247 Node* phi = new PhiNode(region, TypeInt::BOOL);
4248 record_for_igvn(region);
4249 record_for_igvn(prim_region);
4250
4251 const TypePtr* adr_type = TypeRawPtr::BOTTOM; // memory type of loads
4252 const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
4253 int class_klass_offset = java_lang_Class::klass_offset();
4254
4255 // First null-check both mirrors and load each mirror's klass metaobject.
4256 int which_arg;
4257 for (which_arg = 0; which_arg <= 1; which_arg++) {
4258 Node* arg = args[which_arg];
4259 arg = null_check(arg);
4260 if (stopped()) break;
4261 args[which_arg] = arg;
4262
4263 Node* p = basic_plus_adr(arg, class_klass_offset);
4264 Node* kls = LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, adr_type, kls_type);
4265 klasses[which_arg] = _gvn.transform(kls);
4266 }
4267
4268 // Having loaded both klasses, test each for null.
4269 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
4270 for (which_arg = 0; which_arg <= 1; which_arg++) {
4271 Node* kls = klasses[which_arg];
4272 Node* null_ctl = top();
4273 kls = null_check_oop(kls, &null_ctl, never_see_null);
4274 if (which_arg == 0) {
4275 prim_region->init_req(1, null_ctl);
4276 } else {
4277 region->init_req(_prim_1_path, null_ctl);
4278 }
4279 if (stopped()) break;
4280 klasses[which_arg] = kls;
4281 }
4282
4283 if (!stopped()) {
4284 // now we have two reference types, in klasses[0..1]
4285 Node* subk = klasses[1]; // the argument to isAssignableFrom
4286 Node* superk = klasses[0]; // the receiver
4287 region->set_req(_both_ref_path, gen_subtype_check(subk, superk));
4288 region->set_req(_ref_subtype_path, control());
4289 }
4290
4291 // If both operands are primitive (both klasses null), then
4292 // we must return true when they are identical primitives.
4293 // It is convenient to test this after the first null klass check.
4294 // This path is also used if superc is a value mirror.
4295 set_control(_gvn.transform(prim_region));
4296 if (!stopped()) {
4297 // Since superc is primitive, make a guard for the superc==subc case.
4298 Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1]));
4299 Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq));
4300 generate_fair_guard(bol_eq, region);
4301 if (region->req() == PATH_LIMIT+1) {
4302 // A guard was added. If the added guard is taken, superc==subc.
4303 region->swap_edges(PATH_LIMIT, _prim_same_path);
4304 region->del_req(PATH_LIMIT);
4305 }
4306 region->set_req(_prim_0_path, control()); // Not equal after all.
4307 }
4308
4309 // these are the only paths that produce 'true':
4310 phi->set_req(_prim_same_path, intcon(1));
4311 phi->set_req(_ref_subtype_path, intcon(1));
4312
4313 // pull together the cases:
4314 assert(region->req() == PATH_LIMIT, "sane region");
4315 for (uint i = 1; i < region->req(); i++) {
4316 Node* ctl = region->in(i);
4317 if (ctl == nullptr || ctl == top()) {
4318 region->set_req(i, top());
4319 phi ->set_req(i, top());
4320 } else if (phi->in(i) == nullptr) {
4321 phi->set_req(i, intcon(0)); // all other paths produce 'false'
4322 }
4323 }
4324
4325 set_control(_gvn.transform(region));
4326 set_result(_gvn.transform(phi));
4327 return true;
4328 }
4329
4330 //---------------------generate_array_guard_common------------------------
4331 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region, ArrayKind kind) {
4332
4333 if (stopped()) {
4334 return nullptr;
4335 }
4336
4337 // Like generate_guard, adds a new path onto the region.
4338 jint layout_con = 0;
4339 Node* layout_val = get_layout_helper(kls, layout_con);
4340 if (layout_val == nullptr) {
4341 bool query = 0;
4342 switch(kind) {
4343 case ObjectArray: query = Klass::layout_helper_is_objArray(layout_con); break;
4344 case NonObjectArray: query = !Klass::layout_helper_is_objArray(layout_con); break;
4345 case TypeArray: query = Klass::layout_helper_is_typeArray(layout_con); break;
4346 case AnyArray: query = Klass::layout_helper_is_array(layout_con); break;
4347 case NonArray: query = !Klass::layout_helper_is_array(layout_con); break;
4348 default:
4349 ShouldNotReachHere();
4350 }
4351 if (!query) {
4352 return nullptr; // never a branch
4353 } else { // always a branch
4354 Node* always_branch = control();
4355 if (region != nullptr)
4356 region->add_req(always_branch);
4357 set_control(top());
4358 return always_branch;
4359 }
4360 }
4361 unsigned int value = 0;
4362 BoolTest::mask btest = BoolTest::illegal;
4363 switch(kind) {
4364 case ObjectArray:
4365 case NonObjectArray: {
4366 value = Klass::_lh_array_tag_obj_value;
4367 layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
4368 btest = (kind == ObjectArray) ? BoolTest::eq : BoolTest::ne;
4369 break;
4370 }
4371 case TypeArray: {
4372 value = Klass::_lh_array_tag_type_value;
4373 layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
4374 btest = BoolTest::eq;
4375 break;
4376 }
4377 case AnyArray: value = Klass::_lh_neutral_value; btest = BoolTest::lt; break;
4378 case NonArray: value = Klass::_lh_neutral_value; btest = BoolTest::gt; break;
4379 default:
4380 ShouldNotReachHere();
4381 }
4382 // Now test the correct condition.
4383 jint nval = (jint)value;
4384 Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval)));
4385 Node* bol = _gvn.transform(new BoolNode(cmp, btest));
4386 return generate_fair_guard(bol, region);
4387 }
4388
4389 //-----------------------inline_newNullRestrictedArray--------------------------
4390 // public static native Object[] newNullRestrictedArray(Class<?> componentType, int length);
4391 bool LibraryCallKit::inline_newNullRestrictedArray() {
4392 Node* componentType = argument(0);
4393 Node* length = argument(1);
4394
4395 const TypeInstPtr* tp = _gvn.type(componentType)->isa_instptr();
4396 if (tp != nullptr) {
4397 ciInstanceKlass* ik = tp->instance_klass();
4398 if (ik == C->env()->Class_klass()) {
4399 ciType* t = tp->java_mirror_type();
4400 if (t != nullptr && t->is_inlinetype()) {
4401 ciArrayKlass* array_klass = ciArrayKlass::make(t, true);
4402 if (array_klass->is_loaded() && array_klass->element_klass()->as_inline_klass()->is_initialized()) {
4403 const TypeAryKlassPtr* array_klass_type = TypeKlassPtr::make(array_klass, Type::trust_interfaces)->is_aryklassptr();
4404 array_klass_type = array_klass_type->cast_to_null_free();
4405 Node* obj = new_array(makecon(array_klass_type), length, 0, nullptr, false); // no arguments to push
4406 AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(obj);
4407 alloc->set_null_free();
4408 set_result(obj);
4409 assert(gvn().type(obj)->is_aryptr()->is_null_free(), "must be null-free");
4410 return true;
4411 }
4412 }
4413 }
4414 }
4415 return false;
4416 }
4417
4418 //-----------------------inline_native_newArray--------------------------
4419 // private static native Object java.lang.reflect.Array.newArray(Class<?> componentType, int length);
4420 // private native Object Unsafe.allocateUninitializedArray0(Class<?> cls, int size);
4421 bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) {
4422 Node* mirror;
4423 Node* count_val;
4424 if (uninitialized) {
4425 null_check_receiver();
4426 mirror = argument(1);
4427 count_val = argument(2);
4428 } else {
4429 mirror = argument(0);
4430 count_val = argument(1);
4431 }
4432
4433 mirror = null_check(mirror);
4434 // If mirror or obj is dead, only null-path is taken.
4435 if (stopped()) return true;
4436
4437 enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
4438 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4439 PhiNode* result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
4545 // the bytecode that invokes Arrays.copyOf if deoptimization happens.
4546 { PreserveReexecuteState preexecs(this);
4547 jvms()->set_should_reexecute(true);
4548
4549 array_type_mirror = null_check(array_type_mirror);
4550 original = null_check(original);
4551
4552 // Check if a null path was taken unconditionally.
4553 if (stopped()) return true;
4554
4555 Node* orig_length = load_array_length(original);
4556
4557 Node* klass_node = load_klass_from_mirror(array_type_mirror, false, nullptr, 0);
4558 klass_node = null_check(klass_node);
4559
4560 RegionNode* bailout = new RegionNode(1);
4561 record_for_igvn(bailout);
4562
4563 // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
4564 // Bail out if that is so.
4565 // Inline type array may have object field that would require a
4566 // write barrier. Conservatively, go to slow path.
4567 // TODO 8251971: Optimize for the case when flat src/dst are later found
4568 // to not contain oops (i.e., move this check to the macro expansion phase).
4569 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
4570 const TypeAryPtr* orig_t = _gvn.type(original)->isa_aryptr();
4571 const TypeKlassPtr* tklass = _gvn.type(klass_node)->is_klassptr();
4572 bool exclude_flat = UseFlatArray && bs->array_copy_requires_gc_barriers(true, T_OBJECT, false, false, BarrierSetC2::Parsing) &&
4573 // Can src array be flat and contain oops?
4574 (orig_t == nullptr || (!orig_t->is_not_flat() && (!orig_t->is_flat() || orig_t->elem()->inline_klass()->contains_oops()))) &&
4575 // Can dest array be flat and contain oops?
4576 tklass->can_be_inline_array() && (!tklass->is_flat() || tklass->is_aryklassptr()->elem()->is_instklassptr()->instance_klass()->as_inline_klass()->contains_oops());
4577 Node* not_objArray = exclude_flat ? generate_non_objArray_guard(klass_node, bailout) : generate_typeArray_guard(klass_node, bailout);
4578 if (not_objArray != nullptr) {
4579 // Improve the klass node's type from the new optimistic assumption:
4580 ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
4581 const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, Type::Offset(0));
4582 Node* cast = new CastPPNode(control(), klass_node, akls);
4583 klass_node = _gvn.transform(cast);
4584 }
4585
4586 // Bail out if either start or end is negative.
4587 generate_negative_guard(start, bailout, &start);
4588 generate_negative_guard(end, bailout, &end);
4589
4590 Node* length = end;
4591 if (_gvn.type(start) != TypeInt::ZERO) {
4592 length = _gvn.transform(new SubINode(end, start));
4593 }
4594
4595 // Bail out if length is negative.
4596 // Without this the new_array would throw
4597 // NegativeArraySizeException but IllegalArgumentException is what
4598 // should be thrown
4599 generate_negative_guard(length, bailout, &length);
4600
4601 // Handle inline type arrays
4602 bool can_validate = !too_many_traps(Deoptimization::Reason_class_check);
4603 if (!stopped()) {
4604 // TODO JDK-8329224
4605 if (!orig_t->is_null_free()) {
4606 // Not statically known to be null free, add a check
4607 generate_fair_guard(null_free_array_test(original), bailout);
4608 }
4609 orig_t = _gvn.type(original)->isa_aryptr();
4610 if (orig_t != nullptr && orig_t->is_flat()) {
4611 // Src is flat, check that dest is flat as well
4612 if (exclude_flat) {
4613 // Dest can't be flat, bail out
4614 bailout->add_req(control());
4615 set_control(top());
4616 } else {
4617 generate_fair_guard(flat_array_test(klass_node, /* flat = */ false), bailout);
4618 }
4619 } else if (UseFlatArray && (orig_t == nullptr || !orig_t->is_not_flat()) &&
4620 // If dest is flat, src must be flat as well (guaranteed by src <: dest check if validated).
4621 ((!tklass->is_flat() && tklass->can_be_inline_array()) || !can_validate)) {
4622 // Src might be flat and dest might not be flat. Go to the slow path if src is flat.
4623 // TODO 8251971: Optimize for the case when src/dest are later found to be both flat.
4624 generate_fair_guard(flat_array_test(load_object_klass(original)), bailout);
4625 if (orig_t != nullptr) {
4626 orig_t = orig_t->cast_to_not_flat();
4627 original = _gvn.transform(new CheckCastPPNode(control(), original, orig_t));
4628 }
4629 }
4630 if (!can_validate) {
4631 // No validation. The subtype check emitted at macro expansion time will not go to the slow
4632 // path but call checkcast_arraycopy which can not handle flat/null-free inline type arrays.
4633 // TODO 8251971: Optimize for the case when src/dest are later found to be both flat/null-free.
4634 generate_fair_guard(flat_array_test(klass_node), bailout);
4635 generate_fair_guard(null_free_array_test(original), bailout);
4636 }
4637 }
4638
4639 if (bailout->req() > 1) {
4640 PreserveJVMState pjvms(this);
4641 set_control(_gvn.transform(bailout));
4642 uncommon_trap(Deoptimization::Reason_intrinsic,
4643 Deoptimization::Action_maybe_recompile);
4644 }
4645
4646 if (!stopped()) {
4647 // How many elements will we copy from the original?
4648 // The answer is MinI(orig_length - start, length).
4649 Node* orig_tail = _gvn.transform(new SubINode(orig_length, start));
4650 Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
4651
4652 // Generate a direct call to the right arraycopy function(s).
4653 // We know the copy is disjoint but we might not know if the
4654 // oop stores need checking.
4655 // Extreme case: Arrays.copyOf((Integer[])x, 10, String[].class).
4656 // This will fail a store-check if x contains any non-nulls.
4657
4658 // ArrayCopyNode:Ideal may transform the ArrayCopyNode to
4661 // to the copyOf to be validated, including that the copy to the
4662 // new array won't trigger an ArrayStoreException. That subtype
4663 // check can be optimized if we know something on the type of
4664 // the input array from type speculation.
4665 if (_gvn.type(klass_node)->singleton()) {
4666 const TypeKlassPtr* subk = _gvn.type(load_object_klass(original))->is_klassptr();
4667 const TypeKlassPtr* superk = _gvn.type(klass_node)->is_klassptr();
4668
4669 int test = C->static_subtype_check(superk, subk);
4670 if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
4671 const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();
4672 if (t_original->speculative_type() != nullptr) {
4673 original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true);
4674 }
4675 }
4676 }
4677
4678 bool validated = false;
4679 // Reason_class_check rather than Reason_intrinsic because we
4680 // want to intrinsify even if this traps.
4681 if (can_validate) {
4682 Node* not_subtype_ctrl = gen_subtype_check(original, klass_node);
4683
4684 if (not_subtype_ctrl != top()) {
4685 PreserveJVMState pjvms(this);
4686 set_control(not_subtype_ctrl);
4687 uncommon_trap(Deoptimization::Reason_class_check,
4688 Deoptimization::Action_make_not_entrant);
4689 assert(stopped(), "Should be stopped");
4690 }
4691 validated = true;
4692 }
4693
4694 if (!stopped()) {
4695 newcopy = new_array(klass_node, length, 0); // no arguments to push
4696
4697 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true, false,
4698 load_object_klass(original), klass_node);
4699 if (!is_copyOfRange) {
4700 ac->set_copyof(validated);
4701 } else {
4747
4748 //-----------------------generate_method_call----------------------------
4749 // Use generate_method_call to make a slow-call to the real
4750 // method if the fast path fails. An alternative would be to
4751 // use a stub like OptoRuntime::slow_arraycopy_Java.
4752 // This only works for expanding the current library call,
4753 // not another intrinsic. (E.g., don't use this for making an
4754 // arraycopy call inside of the copyOf intrinsic.)
4755 CallJavaNode*
4756 LibraryCallKit::generate_method_call(vmIntrinsicID method_id, bool is_virtual, bool is_static, bool res_not_null) {
4757 // When compiling the intrinsic method itself, do not use this technique.
4758 guarantee(callee() != C->method(), "cannot make slow-call to self");
4759
4760 ciMethod* method = callee();
4761 // ensure the JVMS we have will be correct for this call
4762 guarantee(method_id == method->intrinsic_id(), "must match");
4763
4764 const TypeFunc* tf = TypeFunc::make(method);
4765 if (res_not_null) {
4766 assert(tf->return_type() == T_OBJECT, "");
4767 const TypeTuple* range = tf->range_cc();
4768 const Type** fields = TypeTuple::fields(range->cnt());
4769 fields[TypeFunc::Parms] = range->field_at(TypeFunc::Parms)->filter_speculative(TypePtr::NOTNULL);
4770 const TypeTuple* new_range = TypeTuple::make(range->cnt(), fields);
4771 tf = TypeFunc::make(tf->domain_cc(), new_range);
4772 }
4773 CallJavaNode* slow_call;
4774 if (is_static) {
4775 assert(!is_virtual, "");
4776 slow_call = new CallStaticJavaNode(C, tf,
4777 SharedRuntime::get_resolve_static_call_stub(), method);
4778 } else if (is_virtual) {
4779 assert(!gvn().type(argument(0))->maybe_null(), "should not be null");
4780 int vtable_index = Method::invalid_vtable_index;
4781 if (UseInlineCaches) {
4782 // Suppress the vtable call
4783 } else {
4784 // hashCode and clone are not a miranda methods,
4785 // so the vtable index is fixed.
4786 // No need to use the linkResolver to get it.
4787 vtable_index = method->vtable_index();
4788 assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
4789 "bad index %d", vtable_index);
4790 }
4791 slow_call = new CallDynamicJavaNode(tf,
4808 set_edges_for_java_call(slow_call);
4809 return slow_call;
4810 }
4811
4812
4813 /**
4814 * Build special case code for calls to hashCode on an object. This call may
4815 * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
4816 * slightly different code.
4817 */
4818 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
4819 assert(is_static == callee()->is_static(), "correct intrinsic selection");
4820 assert(!(is_virtual && is_static), "either virtual, special, or static");
4821
4822 enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
4823
4824 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4825 PhiNode* result_val = new PhiNode(result_reg, TypeInt::INT);
4826 PhiNode* result_io = new PhiNode(result_reg, Type::ABIO);
4827 PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4828 Node* obj = argument(0);
4829
4830 if (gvn().type(obj)->is_inlinetypeptr()) {
4831 return false;
4832 }
4833
4834 if (!is_static) {
4835 // Check for hashing null object
4836 obj = null_check_receiver();
4837 if (stopped()) return true; // unconditionally null
4838 result_reg->init_req(_null_path, top());
4839 result_val->init_req(_null_path, top());
4840 } else {
4841 // Do a null check, and return zero if null.
4842 // System.identityHashCode(null) == 0
4843 Node* null_ctl = top();
4844 obj = null_check_oop(obj, &null_ctl);
4845 result_reg->init_req(_null_path, null_ctl);
4846 result_val->init_req(_null_path, _gvn.intcon(0));
4847 }
4848
4849 // Unconditionally null? Then return right away.
4850 if (stopped()) {
4851 set_control( result_reg->in(_null_path));
4852 if (!stopped())
4853 set_result(result_val->in(_null_path));
4854 return true;
4855 }
4856
4857 // We only go to the fast case code if we pass a number of guards. The
4858 // paths which do not pass are accumulated in the slow_region.
4859 RegionNode* slow_region = new RegionNode(1);
4860 record_for_igvn(slow_region);
4861
4862 // If this is a virtual call, we generate a funny guard. We pull out
4863 // the vtable entry corresponding to hashCode() from the target object.
4864 // If the target method which we are calling happens to be the native
4865 // Object hashCode() method, we pass the guard. We do not need this
4866 // guard for non-virtual calls -- the caller is known to be the native
4867 // Object hashCode().
4868 if (is_virtual) {
4869 // After null check, get the object's klass.
4870 Node* obj_klass = load_object_klass(obj);
4871 generate_virtual_guard(obj_klass, slow_region);
4872 }
4873
4874 // Get the header out of the object, use LoadMarkNode when available
4875 Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
4876 // The control of the load must be null. Otherwise, the load can move before
4877 // the null check after castPP removal.
4878 Node* no_ctrl = nullptr;
4879 Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
4880
4881 // Test the header to see if it is safe to read w.r.t. locking.
4882 // This also serves as guard against inline types
4883 Node *lock_mask = _gvn.MakeConX(markWord::inline_type_mask_in_place);
4884 Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
4885 if (LockingMode == LM_LIGHTWEIGHT) {
4886 Node *monitor_val = _gvn.MakeConX(markWord::monitor_value);
4887 Node *chk_monitor = _gvn.transform(new CmpXNode(lmasked_header, monitor_val));
4888 Node *test_monitor = _gvn.transform(new BoolNode(chk_monitor, BoolTest::eq));
4889
4890 generate_slow_guard(test_monitor, slow_region);
4891 } else {
4892 Node *unlocked_val = _gvn.MakeConX(markWord::unlocked_value);
4893 Node *chk_unlocked = _gvn.transform(new CmpXNode(lmasked_header, unlocked_val));
4894 Node *test_not_unlocked = _gvn.transform(new BoolNode(chk_unlocked, BoolTest::ne));
4895
4896 generate_slow_guard(test_not_unlocked, slow_region);
4897 }
4898
4899 // Get the hash value and check to see that it has been properly assigned.
4900 // We depend on hash_mask being at most 32 bits and avoid the use of
4901 // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
4902 // vm: see markWord.hpp.
4903 Node *hash_mask = _gvn.intcon(markWord::hash_mask);
4937 // this->control() comes from set_results_for_java_call
4938 result_reg->init_req(_slow_path, control());
4939 result_val->init_req(_slow_path, slow_result);
4940 result_io ->set_req(_slow_path, i_o());
4941 result_mem ->set_req(_slow_path, reset_memory());
4942 }
4943
4944 // Return the combined state.
4945 set_i_o( _gvn.transform(result_io) );
4946 set_all_memory( _gvn.transform(result_mem));
4947
4948 set_result(result_reg, result_val);
4949 return true;
4950 }
4951
4952 //---------------------------inline_native_getClass----------------------------
4953 // public final native Class<?> java.lang.Object.getClass();
4954 //
4955 // Build special case code for calls to getClass on an object.
4956 bool LibraryCallKit::inline_native_getClass() {
4957 Node* obj = argument(0);
4958 if (obj->is_InlineType()) {
4959 const Type* t = _gvn.type(obj);
4960 if (t->maybe_null()) {
4961 null_check(obj);
4962 }
4963 set_result(makecon(TypeInstPtr::make(t->inline_klass()->java_mirror())));
4964 return true;
4965 }
4966 obj = null_check_receiver();
4967 if (stopped()) return true;
4968 set_result(load_mirror_from_klass(load_object_klass(obj)));
4969 return true;
4970 }
4971
4972 //-----------------inline_native_Reflection_getCallerClass---------------------
4973 // public static native Class<?> sun.reflect.Reflection.getCallerClass();
4974 //
4975 // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
4976 //
4977 // NOTE: This code must perform the same logic as JVM_GetCallerClass
4978 // in that it must skip particular security frames and checks for
4979 // caller sensitive methods.
4980 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
4981 #ifndef PRODUCT
4982 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4983 tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
4984 }
4985 #endif
4986
5247 if (C->get_alias_index(src_type) == C->get_alias_index(dst_type)) {
5248 flags |= RC_NARROW_MEM; // narrow in memory
5249 }
5250 }
5251
5252 // Call it. Note that the length argument is not scaled.
5253 make_runtime_call(flags,
5254 OptoRuntime::fast_arraycopy_Type(),
5255 StubRoutines::unsafe_arraycopy(),
5256 "unsafe_arraycopy",
5257 dst_type,
5258 src_addr, dst_addr, size XTOP);
5259
5260 store_to_memory(control(), doing_unsafe_access_addr, intcon(0), doing_unsafe_access_bt, Compile::AliasIdxRaw, MemNode::unordered);
5261
5262 return true;
5263 }
5264
5265 #undef XTOP
5266
5267 // TODO 8325106 Remove this and corresponding tests. Flatness is not a property of the Class anymore with JEP 401.
5268 //----------------------inline_unsafe_isFlatArray------------------------
5269 // public native boolean Unsafe.isFlatArray(Class<?> arrayClass);
5270 // This intrinsic exploits assumptions made by the native implementation
5271 // (arrayClass is neither null nor primitive) to avoid unnecessary null checks.
5272 bool LibraryCallKit::inline_unsafe_isFlatArray() {
5273 Node* cls = argument(1);
5274 Node* p = basic_plus_adr(cls, java_lang_Class::klass_offset());
5275 Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p,
5276 TypeRawPtr::BOTTOM, TypeInstKlassPtr::OBJECT));
5277 Node* result = flat_array_test(kls);
5278 set_result(result);
5279 return true;
5280 }
5281
5282 //------------------------clone_coping-----------------------------------
5283 // Helper function for inline_native_clone.
5284 void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array) {
5285 assert(obj_size != nullptr, "");
5286 Node* raw_obj = alloc_obj->in(1);
5287 assert(alloc_obj->is_CheckCastPP() && raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
5288
5289 AllocateNode* alloc = nullptr;
5290 if (ReduceBulkZeroing) {
5291 // We will be completely responsible for initializing this object -
5292 // mark Initialize node as complete.
5293 alloc = AllocateNode::Ideal_allocation(alloc_obj);
5294 // The object was just allocated - there should be no any stores!
5295 guarantee(alloc != nullptr && alloc->maybe_set_complete(&_gvn), "");
5296 // Mark as complete_with_arraycopy so that on AllocateNode
5297 // expansion, we know this AllocateNode is initialized by an array
5298 // copy and a StoreStore barrier exists after the array copy.
5299 alloc->initialization()->set_complete_with_arraycopy();
5300 }
5301
5326 // not cloneable or finalizer => slow path to out-of-line Object.clone
5327 //
5328 // The general case has two steps, allocation and copying.
5329 // Allocation has two cases, and uses GraphKit::new_instance or new_array.
5330 //
5331 // Copying also has two cases, oop arrays and everything else.
5332 // Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
5333 // Everything else uses the tight inline loop supplied by CopyArrayNode.
5334 //
5335 // These steps fold up nicely if and when the cloned object's klass
5336 // can be sharply typed as an object array, a type array, or an instance.
5337 //
5338 bool LibraryCallKit::inline_native_clone(bool is_virtual) {
5339 PhiNode* result_val;
5340
5341 // Set the reexecute bit for the interpreter to reexecute
5342 // the bytecode that invokes Object.clone if deoptimization happens.
5343 { PreserveReexecuteState preexecs(this);
5344 jvms()->set_should_reexecute(true);
5345
5346 Node* obj = argument(0);
5347 obj = null_check_receiver();
5348 if (stopped()) return true;
5349
5350 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
5351 if (obj_type->is_inlinetypeptr()) {
5352 // If the object to clone is an inline type, we can simply return it (i.e. a nop) since inline types have
5353 // no identity.
5354 set_result(obj);
5355 return true;
5356 }
5357
5358 // If we are going to clone an instance, we need its exact type to
5359 // know the number and types of fields to convert the clone to
5360 // loads/stores. Maybe a speculative type can help us.
5361 if (!obj_type->klass_is_exact() &&
5362 obj_type->speculative_type() != nullptr &&
5363 obj_type->speculative_type()->is_instance_klass() &&
5364 !obj_type->speculative_type()->is_inlinetype()) {
5365 ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass();
5366 if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem &&
5367 !spec_ik->has_injected_fields()) {
5368 if (!obj_type->isa_instptr() ||
5369 obj_type->is_instptr()->instance_klass()->has_subklass()) {
5370 obj = maybe_cast_profiled_obj(obj, obj_type->speculative_type(), false);
5371 }
5372 }
5373 }
5374
5375 // Conservatively insert a memory barrier on all memory slices.
5376 // Do not let writes into the original float below the clone.
5377 insert_mem_bar(Op_MemBarCPUOrder);
5378
5379 // paths into result_reg:
5380 enum {
5381 _slow_path = 1, // out-of-line call to clone method (virtual or not)
5382 _objArray_path, // plain array allocation, plus arrayof_oop_arraycopy
5383 _array_path, // plain array allocation, plus arrayof_long_arraycopy
5384 _instance_path, // plain instance allocation, plus arrayof_long_arraycopy
5385 PATH_LIMIT
5386 };
5387 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
5388 result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
5389 PhiNode* result_i_o = new PhiNode(result_reg, Type::ABIO);
5390 PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
5391 record_for_igvn(result_reg);
5392
5393 Node* obj_klass = load_object_klass(obj);
5394 // We only go to the fast case code if we pass a number of guards.
5395 // The paths which do not pass are accumulated in the slow_region.
5396 RegionNode* slow_region = new RegionNode(1);
5397 record_for_igvn(slow_region);
5398
5399 Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)nullptr);
5400 if (array_ctl != nullptr) {
5401 // It's an array.
5402 PreserveJVMState pjvms(this);
5403 set_control(array_ctl);
5404
5405 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
5406 const TypeAryPtr* ary_ptr = obj_type->isa_aryptr();
5407 if (UseFlatArray && bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Expansion) &&
5408 obj_type->can_be_inline_array() &&
5409 (ary_ptr == nullptr || (!ary_ptr->is_not_flat() && (!ary_ptr->is_flat() || ary_ptr->elem()->inline_klass()->contains_oops())))) {
5410 // Flat inline type array may have object field that would require a
5411 // write barrier. Conservatively, go to slow path.
5412 generate_fair_guard(flat_array_test(obj_klass), slow_region);
5413 }
5414
5415 if (!stopped()) {
5416 Node* obj_length = load_array_length(obj);
5417 Node* array_size = nullptr; // Size of the array without object alignment padding.
5418 Node* alloc_obj = new_array(obj_klass, obj_length, 0, &array_size, /*deoptimize_on_exception=*/true);
5419
5420 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
5421 if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Parsing)) {
5422 // If it is an oop array, it requires very special treatment,
5423 // because gc barriers are required when accessing the array.
5424 Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)nullptr);
5425 if (is_obja != nullptr) {
5426 PreserveJVMState pjvms2(this);
5427 set_control(is_obja);
5428 // Generate a direct call to the right arraycopy function(s).
5429 // Clones are always tightly coupled.
5430 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, true, false);
5431 ac->set_clone_oop_array();
5432 Node* n = _gvn.transform(ac);
5433 assert(n == ac, "cannot disappear");
5434 ac->connect_outputs(this, /*deoptimize_on_exception=*/true);
5435
5436 result_reg->init_req(_objArray_path, control());
5437 result_val->init_req(_objArray_path, alloc_obj);
5438 result_i_o ->set_req(_objArray_path, i_o());
5439 result_mem ->set_req(_objArray_path, reset_memory());
5440 }
5441 }
5442 // Otherwise, there are no barriers to worry about.
5443 // (We can dispense with card marks if we know the allocation
5444 // comes out of eden (TLAB)... In fact, ReduceInitialCardMarks
5445 // causes the non-eden paths to take compensating steps to
5446 // simulate a fresh allocation, so that no further
5447 // card marks are required in compiled code to initialize
5448 // the object.)
5449
5450 if (!stopped()) {
5451 copy_to_clone(obj, alloc_obj, array_size, true);
5452
5453 // Present the results of the copy.
5454 result_reg->init_req(_array_path, control());
5455 result_val->init_req(_array_path, alloc_obj);
5456 result_i_o ->set_req(_array_path, i_o());
5457 result_mem ->set_req(_array_path, reset_memory());
5458 }
5459 }
5460 }
5461
5462 if (!stopped()) {
5463 // It's an instance (we did array above). Make the slow-path tests.
5464 // If this is a virtual call, we generate a funny guard. We grab
5465 // the vtable entry corresponding to clone() from the target object.
5466 // If the target method which we are calling happens to be the
5467 // Object clone() method, we pass the guard. We do not need this
5468 // guard for non-virtual calls; the caller is known to be the native
5469 // Object clone().
5470 if (is_virtual) {
5471 generate_virtual_guard(obj_klass, slow_region);
5472 }
5473
5474 // The object must be easily cloneable and must not have a finalizer.
5475 // Both of these conditions may be checked in a single test.
5476 // We could optimize the test further, but we don't care.
5477 generate_access_flags_guard(obj_klass,
5478 // Test both conditions:
5479 JVM_ACC_IS_CLONEABLE_FAST | JVM_ACC_HAS_FINALIZER,
5480 // Must be cloneable but not finalizer:
5481 JVM_ACC_IS_CLONEABLE_FAST,
5573 set_jvms(sfpt->jvms());
5574 _reexecute_sp = jvms()->sp();
5575
5576 return saved_jvms;
5577 }
5578 }
5579 }
5580 return nullptr;
5581 }
5582
5583 // Clone the JVMState of the array allocation and create a new safepoint with it. Re-push the array length to the stack
5584 // such that uncommon traps can be emitted to re-execute the array allocation in the interpreter.
5585 SafePointNode* LibraryCallKit::create_safepoint_with_state_before_array_allocation(const AllocateArrayNode* alloc) const {
5586 JVMState* old_jvms = alloc->jvms()->clone_shallow(C);
5587 uint size = alloc->req();
5588 SafePointNode* sfpt = new SafePointNode(size, old_jvms);
5589 old_jvms->set_map(sfpt);
5590 for (uint i = 0; i < size; i++) {
5591 sfpt->init_req(i, alloc->in(i));
5592 }
5593 int adjustment = 1;
5594 // TODO 8325106 why can't we check via the type of the const klass node?
5595 if (alloc->is_null_free()) {
5596 // A null-free, tightly coupled array allocation can only come from LibraryCallKit::inline_newNullRestrictedArray
5597 // which requires both the component type and the array length on stack for re-execution. Re-create and push
5598 // the component type.
5599 ciArrayKlass* klass = alloc->in(AllocateNode::KlassNode)->bottom_type()->is_aryklassptr()->exact_klass()->as_array_klass();
5600 ciInstance* instance = klass->component_mirror_instance();
5601 const TypeInstPtr* t_instance = TypeInstPtr::make(instance);
5602 sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp(), makecon(t_instance));
5603 adjustment++;
5604 }
5605 // re-push array length for deoptimization
5606 sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp() + adjustment - 1, alloc->in(AllocateNode::ALength));
5607 old_jvms->set_sp(old_jvms->sp() + adjustment);
5608 old_jvms->set_monoff(old_jvms->monoff() + adjustment);
5609 old_jvms->set_scloff(old_jvms->scloff() + adjustment);
5610 old_jvms->set_endoff(old_jvms->endoff() + adjustment);
5611 old_jvms->set_should_reexecute(true);
5612
5613 sfpt->set_i_o(map()->i_o());
5614 sfpt->set_memory(map()->memory());
5615 sfpt->set_control(map()->control());
5616 return sfpt;
5617 }
5618
5619 // In case of a deoptimization, we restart execution at the
5620 // allocation, allocating a new array. We would leave an uninitialized
5621 // array in the heap that GCs wouldn't expect. Move the allocation
5622 // after the traps so we don't allocate the array if we
5623 // deoptimize. This is possible because tightly_coupled_allocation()
5624 // guarantees there's no observer of the allocated array at this point
5625 // and the control flow is simple enough.
5626 void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms_before_guards,
5627 int saved_reexecute_sp, uint new_idx) {
5628 if (saved_jvms_before_guards != nullptr && !stopped()) {
5629 replace_unrelated_uncommon_traps_with_alloc_state(alloc, saved_jvms_before_guards);
5630
5631 assert(alloc != nullptr, "only with a tightly coupled allocation");
5632 // restore JVM state to the state at the arraycopy
5633 saved_jvms_before_guards->map()->set_control(map()->control());
5634 assert(saved_jvms_before_guards->map()->memory() == map()->memory(), "memory state changed?");
5635 assert(saved_jvms_before_guards->map()->i_o() == map()->i_o(), "IO state changed?");
5636 // If we've improved the types of some nodes (null check) while
5637 // emitting the guards, propagate them to the current state
5638 map()->replaced_nodes().apply(saved_jvms_before_guards->map(), new_idx);
5639 set_jvms(saved_jvms_before_guards);
5640 _reexecute_sp = saved_reexecute_sp;
5641
5642 // Remove the allocation from above the guards
5643 CallProjections* callprojs = alloc->extract_projections(true);
5644 InitializeNode* init = alloc->initialization();
5645 Node* alloc_mem = alloc->in(TypeFunc::Memory);
5646 C->gvn_replace_by(callprojs->fallthrough_ioproj, alloc->in(TypeFunc::I_O));
5647 C->gvn_replace_by(init->proj_out(TypeFunc::Memory), alloc_mem);
5648
5649 // The CastIINode created in GraphKit::new_array (in AllocateArrayNode::make_ideal_length) must stay below
5650 // the allocation (i.e. is only valid if the allocation succeeds):
5651 // 1) replace CastIINode with AllocateArrayNode's length here
5652 // 2) Create CastIINode again once allocation has moved (see below) at the end of this method
5653 //
5654 // Multiple identical CastIINodes might exist here. Each GraphKit::load_array_length() call will generate
5655 // new separate CastIINode (arraycopy guard checks or any array length use between array allocation and ararycopy)
5656 Node* init_control = init->proj_out(TypeFunc::Control);
5657 Node* alloc_length = alloc->Ideal_length();
5658 #ifdef ASSERT
5659 Node* prev_cast = nullptr;
5660 #endif
5661 for (uint i = 0; i < init_control->outcnt(); i++) {
5662 Node* init_out = init_control->raw_out(i);
5663 if (init_out->is_CastII() && init_out->in(TypeFunc::Control) == init_control && init_out->in(1) == alloc_length) {
5664 #ifdef ASSERT
5665 if (prev_cast == nullptr) {
5666 prev_cast = init_out;
5668 if (prev_cast->cmp(*init_out) == false) {
5669 prev_cast->dump();
5670 init_out->dump();
5671 assert(false, "not equal CastIINode");
5672 }
5673 }
5674 #endif
5675 C->gvn_replace_by(init_out, alloc_length);
5676 }
5677 }
5678 C->gvn_replace_by(init->proj_out(TypeFunc::Control), alloc->in(0));
5679
5680 // move the allocation here (after the guards)
5681 _gvn.hash_delete(alloc);
5682 alloc->set_req(TypeFunc::Control, control());
5683 alloc->set_req(TypeFunc::I_O, i_o());
5684 Node *mem = reset_memory();
5685 set_all_memory(mem);
5686 alloc->set_req(TypeFunc::Memory, mem);
5687 set_control(init->proj_out_or_null(TypeFunc::Control));
5688 set_i_o(callprojs->fallthrough_ioproj);
5689
5690 // Update memory as done in GraphKit::set_output_for_allocation()
5691 const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
5692 const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
5693 if (ary_type->isa_aryptr() && length_type != nullptr) {
5694 ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
5695 }
5696 const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot);
5697 int elemidx = C->get_alias_index(telemref);
5698 set_memory(init->proj_out_or_null(TypeFunc::Memory), Compile::AliasIdxRaw);
5699 set_memory(init->proj_out_or_null(TypeFunc::Memory), elemidx);
5700
5701 Node* allocx = _gvn.transform(alloc);
5702 assert(allocx == alloc, "where has the allocation gone?");
5703 assert(dest->is_CheckCastPP(), "not an allocation result?");
5704
5705 _gvn.hash_delete(dest);
5706 dest->set_req(0, control());
5707 Node* destx = _gvn.transform(dest);
5708 assert(destx == dest, "where has the allocation result gone?");
5978 top_src = src_type->isa_aryptr();
5979 has_src = (top_src != nullptr && top_src->elem() != Type::BOTTOM);
5980 src_spec = true;
5981 }
5982 if (!has_dest) {
5983 dest = maybe_cast_profiled_obj(dest, dest_k, true);
5984 dest_type = _gvn.type(dest);
5985 top_dest = dest_type->isa_aryptr();
5986 has_dest = (top_dest != nullptr && top_dest->elem() != Type::BOTTOM);
5987 dest_spec = true;
5988 }
5989 }
5990 }
5991
5992 if (has_src && has_dest && can_emit_guards) {
5993 BasicType src_elem = top_src->isa_aryptr()->elem()->array_element_basic_type();
5994 BasicType dest_elem = top_dest->isa_aryptr()->elem()->array_element_basic_type();
5995 if (is_reference_type(src_elem, true)) src_elem = T_OBJECT;
5996 if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT;
5997
5998 if (src_elem == dest_elem && top_src->is_flat() == top_dest->is_flat() && src_elem == T_OBJECT) {
5999 // If both arrays are object arrays then having the exact types
6000 // for both will remove the need for a subtype check at runtime
6001 // before the call and may make it possible to pick a faster copy
6002 // routine (without a subtype check on every element)
6003 // Do we have the exact type of src?
6004 bool could_have_src = src_spec;
6005 // Do we have the exact type of dest?
6006 bool could_have_dest = dest_spec;
6007 ciKlass* src_k = nullptr;
6008 ciKlass* dest_k = nullptr;
6009 if (!src_spec) {
6010 src_k = src_type->speculative_type_not_null();
6011 if (src_k != nullptr && src_k->is_array_klass()) {
6012 could_have_src = true;
6013 }
6014 }
6015 if (!dest_spec) {
6016 dest_k = dest_type->speculative_type_not_null();
6017 if (dest_k != nullptr && dest_k->is_array_klass()) {
6018 could_have_dest = true;
6019 }
6020 }
6021 if (could_have_src && could_have_dest) {
6022 // If we can have both exact types, emit the missing guards
6023 if (could_have_src && !src_spec) {
6024 src = maybe_cast_profiled_obj(src, src_k, true);
6025 src_type = _gvn.type(src);
6026 top_src = src_type->isa_aryptr();
6027 }
6028 if (could_have_dest && !dest_spec) {
6029 dest = maybe_cast_profiled_obj(dest, dest_k, true);
6030 dest_type = _gvn.type(dest);
6031 top_dest = dest_type->isa_aryptr();
6032 }
6033 }
6034 }
6035 }
6036
6037 ciMethod* trap_method = method();
6038 int trap_bci = bci();
6039 if (saved_jvms_before_guards != nullptr) {
6040 trap_method = alloc->jvms()->method();
6041 trap_bci = alloc->jvms()->bci();
6042 }
6043
6044 bool negative_length_guard_generated = false;
6045
6046 if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
6047 can_emit_guards && !src->is_top() && !dest->is_top()) {
6048 // validate arguments: enables transformation the ArrayCopyNode
6049 validated = true;
6050
6051 RegionNode* slow_region = new RegionNode(1);
6052 record_for_igvn(slow_region);
6053
6054 // (1) src and dest are arrays.
6055 generate_non_array_guard(load_object_klass(src), slow_region);
6056 generate_non_array_guard(load_object_klass(dest), slow_region);
6057
6058 // (2) src and dest arrays must have elements of the same BasicType
6059 // done at macro expansion or at Ideal transformation time
6060
6061 // (4) src_offset must not be negative.
6062 generate_negative_guard(src_offset, slow_region);
6063
6064 // (5) dest_offset must not be negative.
6065 generate_negative_guard(dest_offset, slow_region);
6066
6067 // (7) src_offset + length must not exceed length of src.
6070 slow_region);
6071
6072 // (8) dest_offset + length must not exceed length of dest.
6073 generate_limit_guard(dest_offset, length,
6074 load_array_length(dest),
6075 slow_region);
6076
6077 // (6) length must not be negative.
6078 // This is also checked in generate_arraycopy() during macro expansion, but
6079 // we also have to check it here for the case where the ArrayCopyNode will
6080 // be eliminated by Escape Analysis.
6081 if (EliminateAllocations) {
6082 generate_negative_guard(length, slow_region);
6083 negative_length_guard_generated = true;
6084 }
6085
6086 // (9) each element of an oop array must be assignable
6087 Node* dest_klass = load_object_klass(dest);
6088 if (src != dest) {
6089 Node* not_subtype_ctrl = gen_subtype_check(src, dest_klass);
6090 slow_region->add_req(not_subtype_ctrl);
6091 }
6092
6093 const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr();
6094 const Type* toop = dest_klass_t->cast_to_exactness(false)->as_instance_type();
6095 src = _gvn.transform(new CheckCastPPNode(control(), src, toop));
6096 src_type = _gvn.type(src);
6097 top_src = src_type->isa_aryptr();
6098
6099 // Handle flat inline type arrays (null-free arrays are handled by the subtype check above)
6100 if (!stopped() && UseFlatArray) {
6101 // If dest is flat, src must be flat as well (guaranteed by src <: dest check). Handle flat src here.
6102 assert(top_dest == nullptr || !top_dest->is_flat() || top_src->is_flat(), "src array must be flat");
6103 if (top_src != nullptr && top_src->is_flat()) {
6104 // Src is flat, check that dest is flat as well
6105 if (top_dest != nullptr && !top_dest->is_flat()) {
6106 generate_fair_guard(flat_array_test(dest_klass, /* flat = */ false), slow_region);
6107 // Since dest is flat and src <: dest, dest must have the same type as src.
6108 top_dest = top_src->cast_to_exactness(false);
6109 assert(top_dest->is_flat(), "dest must be flat");
6110 dest = _gvn.transform(new CheckCastPPNode(control(), dest, top_dest));
6111 }
6112 } else if (top_src == nullptr || !top_src->is_not_flat()) {
6113 // Src might be flat and dest might not be flat. Go to the slow path if src is flat.
6114 // TODO 8251971: Optimize for the case when src/dest are later found to be both flat.
6115 assert(top_dest == nullptr || !top_dest->is_flat(), "dest array must not be flat");
6116 generate_fair_guard(flat_array_test(src), slow_region);
6117 if (top_src != nullptr) {
6118 top_src = top_src->cast_to_not_flat();
6119 src = _gvn.transform(new CheckCastPPNode(control(), src, top_src));
6120 }
6121 }
6122 }
6123
6124 {
6125 PreserveJVMState pjvms(this);
6126 set_control(_gvn.transform(slow_region));
6127 uncommon_trap(Deoptimization::Reason_intrinsic,
6128 Deoptimization::Action_make_not_entrant);
6129 assert(stopped(), "Should be stopped");
6130 }
6131 arraycopy_move_allocation_here(alloc, dest, saved_jvms_before_guards, saved_reexecute_sp, new_idx);
6132 }
6133
6134 if (stopped()) {
6135 return true;
6136 }
6137
6138 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != nullptr, negative_length_guard_generated,
6139 // Create LoadRange and LoadKlass nodes for use during macro expansion here
6140 // so the compiler has a chance to eliminate them: during macro expansion,
6141 // we have to set their control (CastPP nodes are eliminated).
6142 load_object_klass(src), load_object_klass(dest),
6143 load_array_length(src), load_array_length(dest));
6144
6145 ac->set_arraycopy(validated);
6146
6147 Node* n = _gvn.transform(ac);
6148 if (n == ac) {
6149 ac->connect_outputs(this);
6150 } else {
|