7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "ci/ciUtilities.inline.hpp"
28 #include "classfile/vmIntrinsics.hpp"
29 #include "compiler/compileBroker.hpp"
30 #include "compiler/compileLog.hpp"
31 #include "gc/shared/barrierSet.hpp"
32 #include "jfr/support/jfrIntrinsics.hpp"
33 #include "memory/resourceArea.hpp"
34 #include "oops/klass.inline.hpp"
35 #include "oops/objArrayKlass.hpp"
36 #include "opto/addnode.hpp"
37 #include "opto/arraycopynode.hpp"
38 #include "opto/c2compiler.hpp"
39 #include "opto/castnode.hpp"
40 #include "opto/cfgnode.hpp"
41 #include "opto/convertnode.hpp"
42 #include "opto/countbitsnode.hpp"
43 #include "opto/idealKit.hpp"
44 #include "opto/library_call.hpp"
45 #include "opto/mathexactnode.hpp"
46 #include "opto/mulnode.hpp"
308 case vmIntrinsics::_indexOfIL: return inline_string_indexOfI(StrIntrinsicNode::LL);
309 case vmIntrinsics::_indexOfIU: return inline_string_indexOfI(StrIntrinsicNode::UU);
310 case vmIntrinsics::_indexOfIUL: return inline_string_indexOfI(StrIntrinsicNode::UL);
311 case vmIntrinsics::_indexOfU_char: return inline_string_indexOfChar(StrIntrinsicNode::U);
312 case vmIntrinsics::_indexOfL_char: return inline_string_indexOfChar(StrIntrinsicNode::L);
313
314 case vmIntrinsics::_equalsL: return inline_string_equals(StrIntrinsicNode::LL);
315
316 case vmIntrinsics::_vectorizedHashCode: return inline_vectorizedHashCode();
317
318 case vmIntrinsics::_toBytesStringU: return inline_string_toBytesU();
319 case vmIntrinsics::_getCharsStringU: return inline_string_getCharsU();
320 case vmIntrinsics::_getCharStringU: return inline_string_char_access(!is_store);
321 case vmIntrinsics::_putCharStringU: return inline_string_char_access( is_store);
322
323 case vmIntrinsics::_compressStringC:
324 case vmIntrinsics::_compressStringB: return inline_string_copy( is_compress);
325 case vmIntrinsics::_inflateStringC:
326 case vmIntrinsics::_inflateStringB: return inline_string_copy(!is_compress);
327
328 case vmIntrinsics::_getReference: return inline_unsafe_access(!is_store, T_OBJECT, Relaxed, false);
329 case vmIntrinsics::_getBoolean: return inline_unsafe_access(!is_store, T_BOOLEAN, Relaxed, false);
330 case vmIntrinsics::_getByte: return inline_unsafe_access(!is_store, T_BYTE, Relaxed, false);
331 case vmIntrinsics::_getShort: return inline_unsafe_access(!is_store, T_SHORT, Relaxed, false);
332 case vmIntrinsics::_getChar: return inline_unsafe_access(!is_store, T_CHAR, Relaxed, false);
333 case vmIntrinsics::_getInt: return inline_unsafe_access(!is_store, T_INT, Relaxed, false);
334 case vmIntrinsics::_getLong: return inline_unsafe_access(!is_store, T_LONG, Relaxed, false);
335 case vmIntrinsics::_getFloat: return inline_unsafe_access(!is_store, T_FLOAT, Relaxed, false);
336 case vmIntrinsics::_getDouble: return inline_unsafe_access(!is_store, T_DOUBLE, Relaxed, false);
337
338 case vmIntrinsics::_putReference: return inline_unsafe_access( is_store, T_OBJECT, Relaxed, false);
339 case vmIntrinsics::_putBoolean: return inline_unsafe_access( is_store, T_BOOLEAN, Relaxed, false);
340 case vmIntrinsics::_putByte: return inline_unsafe_access( is_store, T_BYTE, Relaxed, false);
341 case vmIntrinsics::_putShort: return inline_unsafe_access( is_store, T_SHORT, Relaxed, false);
342 case vmIntrinsics::_putChar: return inline_unsafe_access( is_store, T_CHAR, Relaxed, false);
343 case vmIntrinsics::_putInt: return inline_unsafe_access( is_store, T_INT, Relaxed, false);
344 case vmIntrinsics::_putLong: return inline_unsafe_access( is_store, T_LONG, Relaxed, false);
345 case vmIntrinsics::_putFloat: return inline_unsafe_access( is_store, T_FLOAT, Relaxed, false);
346 case vmIntrinsics::_putDouble: return inline_unsafe_access( is_store, T_DOUBLE, Relaxed, false);
347
348 case vmIntrinsics::_getReferenceVolatile: return inline_unsafe_access(!is_store, T_OBJECT, Volatile, false);
349 case vmIntrinsics::_getBooleanVolatile: return inline_unsafe_access(!is_store, T_BOOLEAN, Volatile, false);
350 case vmIntrinsics::_getByteVolatile: return inline_unsafe_access(!is_store, T_BYTE, Volatile, false);
351 case vmIntrinsics::_getShortVolatile: return inline_unsafe_access(!is_store, T_SHORT, Volatile, false);
352 case vmIntrinsics::_getCharVolatile: return inline_unsafe_access(!is_store, T_CHAR, Volatile, false);
353 case vmIntrinsics::_getIntVolatile: return inline_unsafe_access(!is_store, T_INT, Volatile, false);
354 case vmIntrinsics::_getLongVolatile: return inline_unsafe_access(!is_store, T_LONG, Volatile, false);
355 case vmIntrinsics::_getFloatVolatile: return inline_unsafe_access(!is_store, T_FLOAT, Volatile, false);
356 case vmIntrinsics::_getDoubleVolatile: return inline_unsafe_access(!is_store, T_DOUBLE, Volatile, false);
357
358 case vmIntrinsics::_putReferenceVolatile: return inline_unsafe_access( is_store, T_OBJECT, Volatile, false);
359 case vmIntrinsics::_putBooleanVolatile: return inline_unsafe_access( is_store, T_BOOLEAN, Volatile, false);
360 case vmIntrinsics::_putByteVolatile: return inline_unsafe_access( is_store, T_BYTE, Volatile, false);
361 case vmIntrinsics::_putShortVolatile: return inline_unsafe_access( is_store, T_SHORT, Volatile, false);
362 case vmIntrinsics::_putCharVolatile: return inline_unsafe_access( is_store, T_CHAR, Volatile, false);
363 case vmIntrinsics::_putIntVolatile: return inline_unsafe_access( is_store, T_INT, Volatile, false);
364 case vmIntrinsics::_putLongVolatile: return inline_unsafe_access( is_store, T_LONG, Volatile, false);
365 case vmIntrinsics::_putFloatVolatile: return inline_unsafe_access( is_store, T_FLOAT, Volatile, false);
366 case vmIntrinsics::_putDoubleVolatile: return inline_unsafe_access( is_store, T_DOUBLE, Volatile, false);
494 case vmIntrinsics::_notifyJvmtiVThreadMount: return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_mount()),
495 "notifyJvmtiMount", false, false);
496 case vmIntrinsics::_notifyJvmtiVThreadUnmount: return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_unmount()),
497 "notifyJvmtiUnmount", false, false);
498 case vmIntrinsics::_notifyJvmtiVThreadHideFrames: return inline_native_notify_jvmti_hide();
499 case vmIntrinsics::_notifyJvmtiVThreadDisableSuspend: return inline_native_notify_jvmti_sync();
500 #endif
501
502 #ifdef JFR_HAVE_INTRINSICS
503 case vmIntrinsics::_counterTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, JfrTime::time_function()), "counterTime");
504 case vmIntrinsics::_getEventWriter: return inline_native_getEventWriter();
505 case vmIntrinsics::_jvm_commit: return inline_native_jvm_commit();
506 #endif
507 case vmIntrinsics::_currentTimeMillis: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
508 case vmIntrinsics::_nanoTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
509 case vmIntrinsics::_writeback0: return inline_unsafe_writeback0();
510 case vmIntrinsics::_writebackPreSync0: return inline_unsafe_writebackSync0(true);
511 case vmIntrinsics::_writebackPostSync0: return inline_unsafe_writebackSync0(false);
512 case vmIntrinsics::_allocateInstance: return inline_unsafe_allocate();
513 case vmIntrinsics::_copyMemory: return inline_unsafe_copyMemory();
514 case vmIntrinsics::_setMemory: return inline_unsafe_setMemory();
515 case vmIntrinsics::_getLength: return inline_native_getLength();
516 case vmIntrinsics::_copyOf: return inline_array_copyOf(false);
517 case vmIntrinsics::_copyOfRange: return inline_array_copyOf(true);
518 case vmIntrinsics::_equalsB: return inline_array_equals(StrIntrinsicNode::LL);
519 case vmIntrinsics::_equalsC: return inline_array_equals(StrIntrinsicNode::UU);
520 case vmIntrinsics::_Preconditions_checkIndex: return inline_preconditions_checkIndex(T_INT);
521 case vmIntrinsics::_Preconditions_checkLongIndex: return inline_preconditions_checkIndex(T_LONG);
522 case vmIntrinsics::_clone: return inline_native_clone(intrinsic()->is_virtual());
523
524 case vmIntrinsics::_allocateUninitializedArray: return inline_unsafe_newArray(true);
525 case vmIntrinsics::_newArray: return inline_unsafe_newArray(false);
526
527 case vmIntrinsics::_isAssignableFrom: return inline_native_subtype_check();
528
529 case vmIntrinsics::_isInstance:
530 case vmIntrinsics::_getModifiers:
531 case vmIntrinsics::_isInterface:
532 case vmIntrinsics::_isArray:
533 case vmIntrinsics::_isPrimitive:
534 case vmIntrinsics::_isHidden:
535 case vmIntrinsics::_getSuperclass:
536 case vmIntrinsics::_getClassAccessFlags: return inline_native_Class_query(intrinsic_id());
537
538 case vmIntrinsics::_floatToRawIntBits:
539 case vmIntrinsics::_floatToIntBits:
540 case vmIntrinsics::_intBitsToFloat:
541 case vmIntrinsics::_doubleToRawLongBits:
542 case vmIntrinsics::_doubleToLongBits:
543 case vmIntrinsics::_longBitsToDouble:
544 case vmIntrinsics::_floatToFloat16:
545 case vmIntrinsics::_float16ToFloat: return inline_fp_conversions(intrinsic_id());
2237 case vmIntrinsics::_remainderUnsigned_l: {
2238 zero_check_long(argument(2));
2239 // Compile-time detect of null-exception
2240 if (stopped()) {
2241 return true; // keep the graph constructed so far
2242 }
2243 n = new UModLNode(control(), argument(0), argument(2));
2244 break;
2245 }
2246 default: fatal_unexpected_iid(id); break;
2247 }
2248 set_result(_gvn.transform(n));
2249 return true;
2250 }
2251
2252 //----------------------------inline_unsafe_access----------------------------
2253
2254 const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type) {
2255 // Attempt to infer a sharper value type from the offset and base type.
2256 ciKlass* sharpened_klass = nullptr;
2257
2258 // See if it is an instance field, with an object type.
2259 if (alias_type->field() != nullptr) {
2260 if (alias_type->field()->type()->is_klass()) {
2261 sharpened_klass = alias_type->field()->type()->as_klass();
2262 }
2263 }
2264
2265 const TypeOopPtr* result = nullptr;
2266 // See if it is a narrow oop array.
2267 if (adr_type->isa_aryptr()) {
2268 if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
2269 const TypeOopPtr* elem_type = adr_type->is_aryptr()->elem()->make_oopptr();
2270 if (elem_type != nullptr && elem_type->is_loaded()) {
2271 // Sharpen the value type.
2272 result = elem_type;
2273 }
2274 }
2275 }
2276
2277 // The sharpened class might be unloaded if there is no class loader
2278 // contraint in place.
2279 if (result == nullptr && sharpened_klass != nullptr && sharpened_klass->is_loaded()) {
2280 // Sharpen the value type.
2281 result = TypeOopPtr::make_from_klass(sharpened_klass);
2282 }
2283 if (result != nullptr) {
2284 #ifndef PRODUCT
2285 if (C->print_intrinsics() || C->print_inlining()) {
2286 tty->print(" from base type: "); adr_type->dump(); tty->cr();
2287 tty->print(" sharpened value: "); result->dump(); tty->cr();
2288 }
2289 #endif
2290 }
2291 return result;
2292 }
2293
2294 DecoratorSet LibraryCallKit::mo_decorator_for_access_kind(AccessKind kind) {
2295 switch (kind) {
2296 case Relaxed:
2297 return MO_UNORDERED;
2298 case Opaque:
2299 return MO_RELAXED;
2300 case Acquire:
2301 return MO_ACQUIRE;
2302 case Release:
2303 return MO_RELEASE;
2304 case Volatile:
2305 return MO_SEQ_CST;
2306 default:
2307 ShouldNotReachHere();
2308 return 0;
2309 }
2310 }
2311
2312 bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, const AccessKind kind, const bool unaligned) {
2313 if (callee()->is_static()) return false; // caller must have the capability!
2314 DecoratorSet decorators = C2_UNSAFE_ACCESS;
2315 guarantee(!is_store || kind != Acquire, "Acquire accesses can be produced only for loads");
2316 guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
2317 assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
2318
2319 if (is_reference_type(type)) {
2320 decorators |= ON_UNKNOWN_OOP_REF;
2321 }
2322
2323 if (unaligned) {
2324 decorators |= C2_UNALIGNED;
2325 }
2326
2327 #ifndef PRODUCT
2328 {
2329 ResourceMark rm;
2330 // Check the signatures.
2331 ciSignature* sig = callee()->signature();
2332 #ifdef ASSERT
2333 if (!is_store) {
2334 // Object getReference(Object base, int/long offset), etc.
2335 BasicType rtype = sig->return_type()->basic_type();
2336 assert(rtype == type, "getter must return the expected value");
2337 assert(sig->count() == 2, "oop getter has 2 arguments");
2338 assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2339 assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2340 } else {
2341 // void putReference(Object base, int/long offset, Object x), etc.
2342 assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
2343 assert(sig->count() == 3, "oop putter has 3 arguments");
2344 assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
2345 assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
2346 BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
2347 assert(vtype == type, "putter must accept the expected value");
2348 }
2349 #endif // ASSERT
2350 }
2351 #endif //PRODUCT
2352
2353 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
2354
2355 Node* receiver = argument(0); // type: oop
2356
2357 // Build address expression.
2358 Node* heap_base_oop = top();
2359
2360 // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2361 Node* base = argument(1); // type: oop
2362 // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2363 Node* offset = argument(2); // type: long
2364 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2365 // to be plain byte offsets, which are also the same as those accepted
2366 // by oopDesc::field_addr.
2367 assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2368 "fieldOffset must be byte-scaled");
2369 // 32-bit machines ignore the high half!
2370 offset = ConvL2X(offset);
2371
2372 // Save state and restore on bailout
2373 uint old_sp = sp();
2374 SafePointNode* old_map = clone_map();
2375
2376 Node* adr = make_unsafe_address(base, offset, type, kind == Relaxed);
2377 assert(!stopped(), "Inlining of unsafe access failed: address construction stopped unexpectedly");
2378
2379 if (_gvn.type(base->uncast())->isa_ptr() == TypePtr::NULL_PTR) {
2380 if (type != T_OBJECT) {
2381 decorators |= IN_NATIVE; // off-heap primitive access
2382 } else {
2383 set_map(old_map);
2384 set_sp(old_sp);
2385 return false; // off-heap oop accesses are not supported
2386 }
2387 } else {
2388 heap_base_oop = base; // on-heap or mixed access
2389 }
2390
2391 // Can base be null? Otherwise, always on-heap access.
2392 bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(base));
2393
2394 if (!can_access_non_heap) {
2395 decorators |= IN_HEAP;
2396 }
2397
2398 Node* val = is_store ? argument(4) : nullptr;
2399
2400 const TypePtr* adr_type = _gvn.type(adr)->isa_ptr();
2401 if (adr_type == TypePtr::NULL_PTR) {
2402 set_map(old_map);
2403 set_sp(old_sp);
2404 return false; // off-heap access with zero address
2405 }
2406
2407 // Try to categorize the address.
2408 Compile::AliasType* alias_type = C->alias_type(adr_type);
2409 assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2410
2411 if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2412 alias_type->adr_type() == TypeAryPtr::RANGE) {
2413 set_map(old_map);
2414 set_sp(old_sp);
2415 return false; // not supported
2416 }
2417
2418 bool mismatched = false;
2419 BasicType bt = alias_type->basic_type();
2420 if (bt != T_ILLEGAL) {
2421 assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
2422 if (bt == T_BYTE && adr_type->isa_aryptr()) {
2423 // Alias type doesn't differentiate between byte[] and boolean[]).
2424 // Use address type to get the element type.
2425 bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2426 }
2427 if (is_reference_type(bt, true)) {
2428 // accessing an array field with getReference is not a mismatch
2429 bt = T_OBJECT;
2430 }
2431 if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2432 // Don't intrinsify mismatched object accesses
2433 set_map(old_map);
2434 set_sp(old_sp);
2435 return false;
2436 }
2437 mismatched = (bt != type);
2438 } else if (alias_type->adr_type()->isa_oopptr()) {
2439 mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
2440 }
2441
2442 destruct_map_clone(old_map);
2443 assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2444
2445 if (mismatched) {
2446 decorators |= C2_MISMATCHED;
2447 }
2448
2449 // First guess at the value type.
2450 const Type *value_type = Type::get_const_basic_type(type);
2451
2452 // Figure out the memory ordering.
2453 decorators |= mo_decorator_for_access_kind(kind);
2454
2455 if (!is_store && type == T_OBJECT) {
2456 const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2457 if (tjp != nullptr) {
2458 value_type = tjp;
2459 }
2460 }
2461
2462 receiver = null_check(receiver);
2463 if (stopped()) {
2464 return true;
2465 }
2466 // Heap pointers get a null-check from the interpreter,
2467 // as a courtesy. However, this is not guaranteed by Unsafe,
2468 // and it is not possible to fully distinguish unintended nulls
2469 // from intended ones in this API.
2470
2471 if (!is_store) {
2472 Node* p = nullptr;
2473 // Try to constant fold a load from a constant field
2474 ciField* field = alias_type->field();
2475 if (heap_base_oop != top() && field != nullptr && field->is_constant() && !mismatched) {
2476 // final or stable field
2477 p = make_constant_from_field(field, heap_base_oop);
2478 }
2479
2480 if (p == nullptr) { // Could not constant fold the load
2481 p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);
2482 // Normalize the value returned by getBoolean in the following cases
2483 if (type == T_BOOLEAN &&
2484 (mismatched ||
2485 heap_base_oop == top() || // - heap_base_oop is null or
2486 (can_access_non_heap && field == nullptr)) // - heap_base_oop is potentially null
2487 // and the unsafe access is made to large offset
2488 // (i.e., larger than the maximum offset necessary for any
2489 // field access)
2490 ) {
2491 IdealKit ideal = IdealKit(this);
2492 #define __ ideal.
2493 IdealVariable normalized_result(ideal);
2494 __ declarations_done();
2495 __ set(normalized_result, p);
2496 __ if_then(p, BoolTest::ne, ideal.ConI(0));
2497 __ set(normalized_result, ideal.ConI(1));
2498 ideal.end_if();
2499 final_sync(ideal);
2500 p = __ value(normalized_result);
2501 #undef __
2502 }
2503 }
2504 if (type == T_ADDRESS) {
2505 p = gvn().transform(new CastP2XNode(nullptr, p));
2506 p = ConvX2UL(p);
2507 }
2508 // The load node has the control of the preceding MemBarCPUOrder. All
2509 // following nodes will have the control of the MemBarCPUOrder inserted at
2510 // the end of this method. So, pushing the load onto the stack at a later
2511 // point is fine.
2512 set_result(p);
2513 } else {
2514 if (bt == T_ADDRESS) {
2515 // Repackage the long as a pointer.
2516 val = ConvL2X(val);
2517 val = gvn().transform(new CastX2PNode(val));
2518 }
2519 access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators);
2520 }
2521
2522 return true;
2523 }
2524
2525 //----------------------------inline_unsafe_load_store----------------------------
2526 // This method serves a couple of different customers (depending on LoadStoreKind):
2527 //
2528 // LS_cmp_swap:
2529 //
2530 // boolean compareAndSetReference(Object o, long offset, Object expected, Object x);
2531 // boolean compareAndSetInt( Object o, long offset, int expected, int x);
2532 // boolean compareAndSetLong( Object o, long offset, long expected, long x);
2533 //
2534 // LS_cmp_swap_weak:
2535 //
2536 // boolean weakCompareAndSetReference( Object o, long offset, Object expected, Object x);
2537 // boolean weakCompareAndSetReferencePlain( Object o, long offset, Object expected, Object x);
2538 // boolean weakCompareAndSetReferenceAcquire(Object o, long offset, Object expected, Object x);
2539 // boolean weakCompareAndSetReferenceRelease(Object o, long offset, Object expected, Object x);
2540 //
2541 // boolean weakCompareAndSetInt( Object o, long offset, int expected, int x);
2542 // boolean weakCompareAndSetIntPlain( Object o, long offset, int expected, int x);
2543 // boolean weakCompareAndSetIntAcquire( Object o, long offset, int expected, int x);
2544 // boolean weakCompareAndSetIntRelease( Object o, long offset, int expected, int x);
2710 }
2711 case LS_cmp_swap:
2712 case LS_cmp_swap_weak:
2713 case LS_get_add:
2714 break;
2715 default:
2716 ShouldNotReachHere();
2717 }
2718
2719 // Null check receiver.
2720 receiver = null_check(receiver);
2721 if (stopped()) {
2722 return true;
2723 }
2724
2725 int alias_idx = C->get_alias_index(adr_type);
2726
2727 if (is_reference_type(type)) {
2728 decorators |= IN_HEAP | ON_UNKNOWN_OOP_REF;
2729
2730 // Transformation of a value which could be null pointer (CastPP #null)
2731 // could be delayed during Parse (for example, in adjust_map_after_if()).
2732 // Execute transformation here to avoid barrier generation in such case.
2733 if (_gvn.type(newval) == TypePtr::NULL_PTR)
2734 newval = _gvn.makecon(TypePtr::NULL_PTR);
2735
2736 if (oldval != nullptr && _gvn.type(oldval) == TypePtr::NULL_PTR) {
2737 // Refine the value to a null constant, when it is known to be null
2738 oldval = _gvn.makecon(TypePtr::NULL_PTR);
2739 }
2740 }
2741
2742 Node* result = nullptr;
2743 switch (kind) {
2744 case LS_cmp_exchange: {
2745 result = access_atomic_cmpxchg_val_at(base, adr, adr_type, alias_idx,
2746 oldval, newval, value_type, type, decorators);
2747 break;
2748 }
2749 case LS_cmp_swap_weak:
2896 Deoptimization::Action_make_not_entrant);
2897 }
2898 if (stopped()) {
2899 return true;
2900 }
2901 #endif //INCLUDE_JVMTI
2902
2903 Node* test = nullptr;
2904 if (LibraryCallKit::klass_needs_init_guard(kls)) {
2905 // Note: The argument might still be an illegal value like
2906 // Serializable.class or Object[].class. The runtime will handle it.
2907 // But we must make an explicit check for initialization.
2908 Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset()));
2909 // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
2910 // can generate code to load it as unsigned byte.
2911 Node* inst = make_load(nullptr, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::acquire);
2912 Node* bits = intcon(InstanceKlass::fully_initialized);
2913 test = _gvn.transform(new SubINode(inst, bits));
2914 // The 'test' is non-zero if we need to take a slow path.
2915 }
2916
2917 Node* obj = new_instance(kls, test);
2918 set_result(obj);
2919 return true;
2920 }
2921
2922 //------------------------inline_native_time_funcs--------------
2923 // inline code for System.currentTimeMillis() and System.nanoTime()
2924 // these have the same type and signature
2925 bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
2926 const TypeFunc* tf = OptoRuntime::void_long_Type();
2927 const TypePtr* no_memory_effects = nullptr;
2928 Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
2929 Node* value = _gvn.transform(new ProjNode(time, TypeFunc::Parms+0));
2930 #ifdef ASSERT
2931 Node* value_top = _gvn.transform(new ProjNode(time, TypeFunc::Parms+1));
2932 assert(value_top == top(), "second value must be top");
2933 #endif
2934 set_result(value);
2935 return true;
2936 }
2937
3693
3694 //------------------------inline_native_setVthread------------------
3695 bool LibraryCallKit::inline_native_setCurrentThread() {
3696 assert(C->method()->changes_current_thread(),
3697 "method changes current Thread but is not annotated ChangesCurrentThread");
3698 Node* arr = argument(1);
3699 Node* thread = _gvn.transform(new ThreadLocalNode());
3700 Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::vthread_offset()));
3701 Node* thread_obj_handle
3702 = make_load(nullptr, p, p->bottom_type()->is_ptr(), T_OBJECT, MemNode::unordered);
3703 thread_obj_handle = _gvn.transform(thread_obj_handle);
3704 const TypePtr *adr_type = _gvn.type(thread_obj_handle)->isa_ptr();
3705 access_store_at(nullptr, thread_obj_handle, adr_type, arr, _gvn.type(arr), T_OBJECT, IN_NATIVE | MO_UNORDERED);
3706 JFR_ONLY(extend_setCurrentThread(thread, arr);)
3707 return true;
3708 }
3709
3710 const Type* LibraryCallKit::scopedValueCache_type() {
3711 ciKlass* objects_klass = ciObjArrayKlass::make(env()->Object_klass());
3712 const TypeOopPtr* etype = TypeOopPtr::make_from_klass(env()->Object_klass());
3713 const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS);
3714
3715 // Because we create the scopedValue cache lazily we have to make the
3716 // type of the result BotPTR.
3717 bool xk = etype->klass_is_exact();
3718 const Type* objects_type = TypeAryPtr::make(TypePtr::BotPTR, arr0, objects_klass, xk, 0);
3719 return objects_type;
3720 }
3721
3722 Node* LibraryCallKit::scopedValueCache_helper() {
3723 Node* thread = _gvn.transform(new ThreadLocalNode());
3724 Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::scopedValueCache_offset()));
3725 // We cannot use immutable_memory() because we might flip onto a
3726 // different carrier thread, at which point we'll need to use that
3727 // carrier thread's cache.
3728 // return _gvn.transform(LoadNode::make(_gvn, nullptr, immutable_memory(), p, p->bottom_type()->is_ptr(),
3729 // TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered));
3730 return make_load(nullptr, p, p->bottom_type()->is_ptr(), T_ADDRESS, MemNode::unordered);
3731 }
3732
3733 //------------------------inline_native_scopedValueCache------------------
3734 bool LibraryCallKit::inline_native_scopedValueCache() {
3735 Node* cache_obj_handle = scopedValueCache_helper();
3736 const Type* objects_type = scopedValueCache_type();
3737 set_result(access_load(cache_obj_handle, objects_type, T_OBJECT, IN_NATIVE));
3738
3821 }
3822
3823 // Result of top level CFG and Memory.
3824 RegionNode* result_rgn = new RegionNode(PATH_LIMIT);
3825 record_for_igvn(result_rgn);
3826 PhiNode* result_mem = new PhiNode(result_rgn, Type::MEMORY, TypePtr::BOTTOM);
3827 record_for_igvn(result_mem);
3828
3829 result_rgn->init_req(_true_path, _gvn.transform(valid_pin_count));
3830 result_rgn->init_req(_false_path, _gvn.transform(continuation_is_null));
3831 result_mem->init_req(_true_path, _gvn.transform(updated_pin_count_memory));
3832 result_mem->init_req(_false_path, _gvn.transform(input_memory_state));
3833
3834 // Set output state.
3835 set_control(_gvn.transform(result_rgn));
3836 set_all_memory(_gvn.transform(result_mem));
3837
3838 return true;
3839 }
3840
3841 //---------------------------load_mirror_from_klass----------------------------
3842 // Given a klass oop, load its java mirror (a java.lang.Class oop).
3843 Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
3844 Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
3845 Node* load = make_load(nullptr, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
3846 // mirror = ((OopHandle)mirror)->resolve();
3847 return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE);
3848 }
3849
3850 //-----------------------load_klass_from_mirror_common-------------------------
3851 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
3852 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
3853 // and branch to the given path on the region.
3854 // If never_see_null, take an uncommon trap on null, so we can optimistically
3855 // compile for the non-null case.
3856 // If the region is null, force never_see_null = true.
3857 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
3858 bool never_see_null,
3859 RegionNode* region,
3860 int null_path,
3861 int offset) {
3862 if (region == nullptr) never_see_null = true;
3863 Node* p = basic_plus_adr(mirror, offset);
3864 const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
3865 Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
3866 Node* null_ctl = top();
3867 kls = null_check_oop(kls, &null_ctl, never_see_null);
3868 if (region != nullptr) {
3869 // Set region->in(null_path) if the mirror is a primitive (e.g, int.class).
3873 }
3874 return kls;
3875 }
3876
3877 //--------------------(inline_native_Class_query helpers)---------------------
3878 // Use this for JVM_ACC_INTERFACE.
3879 // Fall through if (mods & mask) == bits, take the guard otherwise.
3880 Node* LibraryCallKit::generate_klass_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region,
3881 ByteSize offset, const Type* type, BasicType bt) {
3882 // Branch around if the given klass has the given modifier bit set.
3883 // Like generate_guard, adds a new path onto the region.
3884 Node* modp = basic_plus_adr(kls, in_bytes(offset));
3885 Node* mods = make_load(nullptr, modp, type, bt, MemNode::unordered);
3886 Node* mask = intcon(modifier_mask);
3887 Node* bits = intcon(modifier_bits);
3888 Node* mbit = _gvn.transform(new AndINode(mods, mask));
3889 Node* cmp = _gvn.transform(new CmpINode(mbit, bits));
3890 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
3891 return generate_fair_guard(bol, region);
3892 }
3893 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
3894 return generate_klass_flags_guard(kls, JVM_ACC_INTERFACE, 0, region,
3895 Klass::access_flags_offset(), TypeInt::INT, T_INT);
3896 }
3897
3898 // Use this for testing if Klass is_hidden, has_finalizer, and is_cloneable_fast.
3899 Node* LibraryCallKit::generate_misc_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
3900 return generate_klass_flags_guard(kls, modifier_mask, modifier_bits, region,
3901 Klass::misc_flags_offset(), TypeInt::UBYTE, T_BOOLEAN);
3902 }
3903
3904 Node* LibraryCallKit::generate_hidden_class_guard(Node* kls, RegionNode* region) {
3905 return generate_misc_flags_guard(kls, KlassFlags::_misc_is_hidden_class, 0, region);
3906 }
3907
3908 //-------------------------inline_native_Class_query-------------------
3909 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
3910 const Type* return_type = TypeInt::BOOL;
3911 Node* prim_return_value = top(); // what happens if it's a primitive class?
3912 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
4074
4075 case vmIntrinsics::_getClassAccessFlags:
4076 p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
4077 query_value = make_load(nullptr, p, TypeInt::INT, T_INT, MemNode::unordered);
4078 break;
4079
4080 default:
4081 fatal_unexpected_iid(id);
4082 break;
4083 }
4084
4085 // Fall-through is the normal case of a query to a real class.
4086 phi->init_req(1, query_value);
4087 region->init_req(1, control());
4088
4089 C->set_has_split_ifs(true); // Has chance for split-if optimization
4090 set_result(region, phi);
4091 return true;
4092 }
4093
4094 //-------------------------inline_Class_cast-------------------
4095 bool LibraryCallKit::inline_Class_cast() {
4096 Node* mirror = argument(0); // Class
4097 Node* obj = argument(1);
4098 const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
4099 if (mirror_con == nullptr) {
4100 return false; // dead path (mirror->is_top()).
4101 }
4102 if (obj == nullptr || obj->is_top()) {
4103 return false; // dead path
4104 }
4105 const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr();
4106
4107 // First, see if Class.cast() can be folded statically.
4108 // java_mirror_type() returns non-null for compile-time Class constants.
4109 ciType* tm = mirror_con->java_mirror_type();
4110 if (tm != nullptr && tm->is_klass() &&
4111 tp != nullptr) {
4112 if (!tp->is_loaded()) {
4113 // Don't use intrinsic when class is not loaded.
4114 return false;
4115 } else {
4116 int static_res = C->static_subtype_check(TypeKlassPtr::make(tm->as_klass(), Type::trust_interfaces), tp->as_klass_type());
4117 if (static_res == Compile::SSC_always_true) {
4118 // isInstance() is true - fold the code.
4119 set_result(obj);
4120 return true;
4121 } else if (static_res == Compile::SSC_always_false) {
4122 // Don't use intrinsic, have to throw ClassCastException.
4123 // If the reference is null, the non-intrinsic bytecode will
4124 // be optimized appropriately.
4125 return false;
4126 }
4127 }
4128 }
4129
4130 // Bailout intrinsic and do normal inlining if exception path is frequent.
4131 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
4132 return false;
4133 }
4134
4135 // Generate dynamic checks.
4136 // Class.cast() is java implementation of _checkcast bytecode.
4137 // Do checkcast (Parse::do_checkcast()) optimizations here.
4138
4139 mirror = null_check(mirror);
4140 // If mirror is dead, only null-path is taken.
4141 if (stopped()) {
4142 return true;
4143 }
4144
4145 // Not-subtype or the mirror's klass ptr is null (in case it is a primitive).
4146 enum { _bad_type_path = 1, _prim_path = 2, PATH_LIMIT };
4147 RegionNode* region = new RegionNode(PATH_LIMIT);
4148 record_for_igvn(region);
4149
4150 // Now load the mirror's klass metaobject, and null-check it.
4151 // If kls is null, we have a primitive mirror and
4152 // nothing is an instance of a primitive type.
4153 Node* kls = load_klass_from_mirror(mirror, false, region, _prim_path);
4154
4155 Node* res = top();
4156 if (!stopped()) {
4157 Node* bad_type_ctrl = top();
4158 // Do checkcast optimizations.
4159 res = gen_checkcast(obj, kls, &bad_type_ctrl);
4160 region->init_req(_bad_type_path, bad_type_ctrl);
4161 }
4162 if (region->in(_prim_path) != top() ||
4163 region->in(_bad_type_path) != top()) {
4164 // Let Interpreter throw ClassCastException.
4165 PreserveJVMState pjvms(this);
4166 set_control(_gvn.transform(region));
4167 uncommon_trap(Deoptimization::Reason_intrinsic,
4168 Deoptimization::Action_maybe_recompile);
4169 }
4170 if (!stopped()) {
4171 set_result(res);
4172 }
4173 return true;
4174 }
4175
4176
4177 //--------------------------inline_native_subtype_check------------------------
4178 // This intrinsic takes the JNI calls out of the heart of
4179 // UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
4180 bool LibraryCallKit::inline_native_subtype_check() {
4181 // Pull both arguments off the stack.
4182 Node* args[2]; // two java.lang.Class mirrors: superc, subc
4183 args[0] = argument(0);
4184 args[1] = argument(1);
4185 Node* klasses[2]; // corresponding Klasses: superk, subk
4186 klasses[0] = klasses[1] = top();
4187
4188 enum {
4189 // A full decision tree on {superc is prim, subc is prim}:
4190 _prim_0_path = 1, // {P,N} => false
4191 // {P,P} & superc!=subc => false
4192 _prim_same_path, // {P,P} & superc==subc => true
4193 _prim_1_path, // {N,P} => false
4194 _ref_subtype_path, // {N,N} & subtype check wins => true
4195 _both_ref_path, // {N,N} & subtype check loses => false
4196 PATH_LIMIT
4197 };
4198
4199 RegionNode* region = new RegionNode(PATH_LIMIT);
4200 Node* phi = new PhiNode(region, TypeInt::BOOL);
4201 record_for_igvn(region);
4202
4203 const TypePtr* adr_type = TypeRawPtr::BOTTOM; // memory type of loads
4204 const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
4205 int class_klass_offset = java_lang_Class::klass_offset();
4206
4207 // First null-check both mirrors and load each mirror's klass metaobject.
4208 int which_arg;
4209 for (which_arg = 0; which_arg <= 1; which_arg++) {
4210 Node* arg = args[which_arg];
4211 arg = null_check(arg);
4212 if (stopped()) break;
4213 args[which_arg] = arg;
4214
4215 Node* p = basic_plus_adr(arg, class_klass_offset);
4216 Node* kls = LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, adr_type, kls_type);
4217 klasses[which_arg] = _gvn.transform(kls);
4218 }
4219
4220 // Having loaded both klasses, test each for null.
4221 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
4222 for (which_arg = 0; which_arg <= 1; which_arg++) {
4223 Node* kls = klasses[which_arg];
4224 Node* null_ctl = top();
4225 kls = null_check_oop(kls, &null_ctl, never_see_null);
4226 int prim_path = (which_arg == 0 ? _prim_0_path : _prim_1_path);
4227 region->init_req(prim_path, null_ctl);
4228 if (stopped()) break;
4229 klasses[which_arg] = kls;
4230 }
4231
4232 if (!stopped()) {
4233 // now we have two reference types, in klasses[0..1]
4234 Node* subk = klasses[1]; // the argument to isAssignableFrom
4235 Node* superk = klasses[0]; // the receiver
4236 region->set_req(_both_ref_path, gen_subtype_check(subk, superk));
4237 // now we have a successful reference subtype check
4238 region->set_req(_ref_subtype_path, control());
4239 }
4240
4241 // If both operands are primitive (both klasses null), then
4242 // we must return true when they are identical primitives.
4243 // It is convenient to test this after the first null klass check.
4244 set_control(region->in(_prim_0_path)); // go back to first null check
4245 if (!stopped()) {
4246 // Since superc is primitive, make a guard for the superc==subc case.
4247 Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1]));
4248 Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq));
4249 generate_guard(bol_eq, region, PROB_FAIR);
4250 if (region->req() == PATH_LIMIT+1) {
4251 // A guard was added. If the added guard is taken, superc==subc.
4252 region->swap_edges(PATH_LIMIT, _prim_same_path);
4253 region->del_req(PATH_LIMIT);
4254 }
4255 region->set_req(_prim_0_path, control()); // Not equal after all.
4256 }
4257
4258 // these are the only paths that produce 'true':
4259 phi->set_req(_prim_same_path, intcon(1));
4260 phi->set_req(_ref_subtype_path, intcon(1));
4261
4262 // pull together the cases:
4263 assert(region->req() == PATH_LIMIT, "sane region");
4264 for (uint i = 1; i < region->req(); i++) {
4265 Node* ctl = region->in(i);
4266 if (ctl == nullptr || ctl == top()) {
4267 region->set_req(i, top());
4268 phi ->set_req(i, top());
4269 } else if (phi->in(i) == nullptr) {
4270 phi->set_req(i, intcon(0)); // all other paths produce 'false'
4271 }
4272 }
4273
4274 set_control(_gvn.transform(region));
4275 set_result(_gvn.transform(phi));
4276 return true;
4277 }
4278
4279 //---------------------generate_array_guard_common------------------------
4280 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region,
4281 bool obj_array, bool not_array) {
4282
4283 if (stopped()) {
4284 return nullptr;
4285 }
4286
4287 // If obj_array/non_array==false/false:
4288 // Branch around if the given klass is in fact an array (either obj or prim).
4289 // If obj_array/non_array==false/true:
4290 // Branch around if the given klass is not an array klass of any kind.
4291 // If obj_array/non_array==true/true:
4292 // Branch around if the kls is not an oop array (kls is int[], String, etc.)
4293 // If obj_array/non_array==true/false:
4294 // Branch around if the kls is an oop array (Object[] or subtype)
4295 //
4296 // Like generate_guard, adds a new path onto the region.
4297 jint layout_con = 0;
4298 Node* layout_val = get_layout_helper(kls, layout_con);
4299 if (layout_val == nullptr) {
4300 bool query = (obj_array
4301 ? Klass::layout_helper_is_objArray(layout_con)
4302 : Klass::layout_helper_is_array(layout_con));
4303 if (query == not_array) {
4304 return nullptr; // never a branch
4305 } else { // always a branch
4306 Node* always_branch = control();
4307 if (region != nullptr)
4308 region->add_req(always_branch);
4309 set_control(top());
4310 return always_branch;
4311 }
4312 }
4313 // Now test the correct condition.
4314 jint nval = (obj_array
4315 ? (jint)(Klass::_lh_array_tag_type_value
4316 << Klass::_lh_array_tag_shift)
4317 : Klass::_lh_neutral_value);
4318 Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval)));
4319 BoolTest::mask btest = BoolTest::lt; // correct for testing is_[obj]array
4320 // invert the test if we are looking for a non-array
4321 if (not_array) btest = BoolTest(btest).negate();
4322 Node* bol = _gvn.transform(new BoolNode(cmp, btest));
4323 return generate_fair_guard(bol, region);
4324 }
4325
4326
4327 //-----------------------inline_native_newArray--------------------------
4328 // private static native Object java.lang.reflect.newArray(Class<?> componentType, int length);
4329 // private native Object Unsafe.allocateUninitializedArray0(Class<?> cls, int size);
4330 bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) {
4331 Node* mirror;
4332 Node* count_val;
4333 if (uninitialized) {
4334 null_check_receiver();
4335 mirror = argument(1);
4336 count_val = argument(2);
4337 } else {
4338 mirror = argument(0);
4339 count_val = argument(1);
4340 }
4341
4342 mirror = null_check(mirror);
4343 // If mirror or obj is dead, only null-path is taken.
4344 if (stopped()) return true;
4345
4346 enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
4347 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4348 PhiNode* result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
4454 // the bytecode that invokes Arrays.copyOf if deoptimization happens.
4455 { PreserveReexecuteState preexecs(this);
4456 jvms()->set_should_reexecute(true);
4457
4458 array_type_mirror = null_check(array_type_mirror);
4459 original = null_check(original);
4460
4461 // Check if a null path was taken unconditionally.
4462 if (stopped()) return true;
4463
4464 Node* orig_length = load_array_length(original);
4465
4466 Node* klass_node = load_klass_from_mirror(array_type_mirror, false, nullptr, 0);
4467 klass_node = null_check(klass_node);
4468
4469 RegionNode* bailout = new RegionNode(1);
4470 record_for_igvn(bailout);
4471
4472 // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
4473 // Bail out if that is so.
4474 Node* not_objArray = generate_non_objArray_guard(klass_node, bailout);
4475 if (not_objArray != nullptr) {
4476 // Improve the klass node's type from the new optimistic assumption:
4477 ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
4478 const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/);
4479 Node* cast = new CastPPNode(control(), klass_node, akls);
4480 klass_node = _gvn.transform(cast);
4481 }
4482
4483 // Bail out if either start or end is negative.
4484 generate_negative_guard(start, bailout, &start);
4485 generate_negative_guard(end, bailout, &end);
4486
4487 Node* length = end;
4488 if (_gvn.type(start) != TypeInt::ZERO) {
4489 length = _gvn.transform(new SubINode(end, start));
4490 }
4491
4492 // Bail out if length is negative (i.e., if start > end).
4493 // Without this the new_array would throw
4494 // NegativeArraySizeException but IllegalArgumentException is what
4495 // should be thrown
4496 generate_negative_guard(length, bailout, &length);
4497
4498 // Bail out if start is larger than the original length
4499 Node* orig_tail = _gvn.transform(new SubINode(orig_length, start));
4500 generate_negative_guard(orig_tail, bailout, &orig_tail);
4501
4502 if (bailout->req() > 1) {
4503 PreserveJVMState pjvms(this);
4504 set_control(_gvn.transform(bailout));
4505 uncommon_trap(Deoptimization::Reason_intrinsic,
4506 Deoptimization::Action_maybe_recompile);
4507 }
4508
4509 if (!stopped()) {
4510 // How many elements will we copy from the original?
4511 // The answer is MinI(orig_tail, length).
4512 Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
4513
4514 // Generate a direct call to the right arraycopy function(s).
4515 // We know the copy is disjoint but we might not know if the
4516 // oop stores need checking.
4517 // Extreme case: Arrays.copyOf((Integer[])x, 10, String[].class).
4523 // to the copyOf to be validated, including that the copy to the
4524 // new array won't trigger an ArrayStoreException. That subtype
4525 // check can be optimized if we know something on the type of
4526 // the input array from type speculation.
4527 if (_gvn.type(klass_node)->singleton()) {
4528 const TypeKlassPtr* subk = _gvn.type(load_object_klass(original))->is_klassptr();
4529 const TypeKlassPtr* superk = _gvn.type(klass_node)->is_klassptr();
4530
4531 int test = C->static_subtype_check(superk, subk);
4532 if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
4533 const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();
4534 if (t_original->speculative_type() != nullptr) {
4535 original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true);
4536 }
4537 }
4538 }
4539
4540 bool validated = false;
4541 // Reason_class_check rather than Reason_intrinsic because we
4542 // want to intrinsify even if this traps.
4543 if (!too_many_traps(Deoptimization::Reason_class_check)) {
4544 Node* not_subtype_ctrl = gen_subtype_check(original, klass_node);
4545
4546 if (not_subtype_ctrl != top()) {
4547 PreserveJVMState pjvms(this);
4548 set_control(not_subtype_ctrl);
4549 uncommon_trap(Deoptimization::Reason_class_check,
4550 Deoptimization::Action_make_not_entrant);
4551 assert(stopped(), "Should be stopped");
4552 }
4553 validated = true;
4554 }
4555
4556 if (!stopped()) {
4557 newcopy = new_array(klass_node, length, 0); // no arguments to push
4558
4559 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true, true,
4560 load_object_klass(original), klass_node);
4561 if (!is_copyOfRange) {
4562 ac->set_copyof(validated);
4563 } else {
4609
4610 //-----------------------generate_method_call----------------------------
4611 // Use generate_method_call to make a slow-call to the real
4612 // method if the fast path fails. An alternative would be to
4613 // use a stub like OptoRuntime::slow_arraycopy_Java.
4614 // This only works for expanding the current library call,
4615 // not another intrinsic. (E.g., don't use this for making an
4616 // arraycopy call inside of the copyOf intrinsic.)
4617 CallJavaNode*
4618 LibraryCallKit::generate_method_call(vmIntrinsicID method_id, bool is_virtual, bool is_static, bool res_not_null) {
4619 // When compiling the intrinsic method itself, do not use this technique.
4620 guarantee(callee() != C->method(), "cannot make slow-call to self");
4621
4622 ciMethod* method = callee();
4623 // ensure the JVMS we have will be correct for this call
4624 guarantee(method_id == method->intrinsic_id(), "must match");
4625
4626 const TypeFunc* tf = TypeFunc::make(method);
4627 if (res_not_null) {
4628 assert(tf->return_type() == T_OBJECT, "");
4629 const TypeTuple* range = tf->range();
4630 const Type** fields = TypeTuple::fields(range->cnt());
4631 fields[TypeFunc::Parms] = range->field_at(TypeFunc::Parms)->filter_speculative(TypePtr::NOTNULL);
4632 const TypeTuple* new_range = TypeTuple::make(range->cnt(), fields);
4633 tf = TypeFunc::make(tf->domain(), new_range);
4634 }
4635 CallJavaNode* slow_call;
4636 if (is_static) {
4637 assert(!is_virtual, "");
4638 slow_call = new CallStaticJavaNode(C, tf,
4639 SharedRuntime::get_resolve_static_call_stub(), method);
4640 } else if (is_virtual) {
4641 assert(!gvn().type(argument(0))->maybe_null(), "should not be null");
4642 int vtable_index = Method::invalid_vtable_index;
4643 if (UseInlineCaches) {
4644 // Suppress the vtable call
4645 } else {
4646 // hashCode and clone are not a miranda methods,
4647 // so the vtable index is fixed.
4648 // No need to use the linkResolver to get it.
4649 vtable_index = method->vtable_index();
4650 assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
4651 "bad index %d", vtable_index);
4652 }
4653 slow_call = new CallDynamicJavaNode(tf,
4670 set_edges_for_java_call(slow_call);
4671 return slow_call;
4672 }
4673
4674
4675 /**
4676 * Build special case code for calls to hashCode on an object. This call may
4677 * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
4678 * slightly different code.
4679 */
4680 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
4681 assert(is_static == callee()->is_static(), "correct intrinsic selection");
4682 assert(!(is_virtual && is_static), "either virtual, special, or static");
4683
4684 enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
4685
4686 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4687 PhiNode* result_val = new PhiNode(result_reg, TypeInt::INT);
4688 PhiNode* result_io = new PhiNode(result_reg, Type::ABIO);
4689 PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4690 Node* obj = nullptr;
4691 if (!is_static) {
4692 // Check for hashing null object
4693 obj = null_check_receiver();
4694 if (stopped()) return true; // unconditionally null
4695 result_reg->init_req(_null_path, top());
4696 result_val->init_req(_null_path, top());
4697 } else {
4698 // Do a null check, and return zero if null.
4699 // System.identityHashCode(null) == 0
4700 obj = argument(0);
4701 Node* null_ctl = top();
4702 obj = null_check_oop(obj, &null_ctl);
4703 result_reg->init_req(_null_path, null_ctl);
4704 result_val->init_req(_null_path, _gvn.intcon(0));
4705 }
4706
4707 // Unconditionally null? Then return right away.
4708 if (stopped()) {
4709 set_control( result_reg->in(_null_path));
4710 if (!stopped())
4711 set_result(result_val->in(_null_path));
4712 return true;
4713 }
4714
4715 // We only go to the fast case code if we pass a number of guards. The
4716 // paths which do not pass are accumulated in the slow_region.
4717 RegionNode* slow_region = new RegionNode(1);
4718 record_for_igvn(slow_region);
4719
4720 // If this is a virtual call, we generate a funny guard. We pull out
4721 // the vtable entry corresponding to hashCode() from the target object.
4722 // If the target method which we are calling happens to be the native
4723 // Object hashCode() method, we pass the guard. We do not need this
4724 // guard for non-virtual calls -- the caller is known to be the native
4725 // Object hashCode().
4726 if (is_virtual) {
4727 // After null check, get the object's klass.
4728 Node* obj_klass = load_object_klass(obj);
4729 generate_virtual_guard(obj_klass, slow_region);
4730 }
4731
4732 // Get the header out of the object, use LoadMarkNode when available
4733 Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
4734 // The control of the load must be null. Otherwise, the load can move before
4735 // the null check after castPP removal.
4736 Node* no_ctrl = nullptr;
4737 Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
4738
4739 if (!UseObjectMonitorTable) {
4740 // Test the header to see if it is safe to read w.r.t. locking.
4741 Node *lock_mask = _gvn.MakeConX(markWord::lock_mask_in_place);
4742 Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
4743 if (LockingMode == LM_LIGHTWEIGHT) {
4744 Node *monitor_val = _gvn.MakeConX(markWord::monitor_value);
4745 Node *chk_monitor = _gvn.transform(new CmpXNode(lmasked_header, monitor_val));
4746 Node *test_monitor = _gvn.transform(new BoolNode(chk_monitor, BoolTest::eq));
4747
4748 generate_slow_guard(test_monitor, slow_region);
4749 } else {
4750 Node *unlocked_val = _gvn.MakeConX(markWord::unlocked_value);
4751 Node *chk_unlocked = _gvn.transform(new CmpXNode(lmasked_header, unlocked_val));
4752 Node *test_not_unlocked = _gvn.transform(new BoolNode(chk_unlocked, BoolTest::ne));
4753
4754 generate_slow_guard(test_not_unlocked, slow_region);
4755 }
4756 }
4757
4758 // Get the hash value and check to see that it has been properly assigned.
4759 // We depend on hash_mask being at most 32 bits and avoid the use of
4760 // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
4761 // vm: see markWord.hpp.
4796 // this->control() comes from set_results_for_java_call
4797 result_reg->init_req(_slow_path, control());
4798 result_val->init_req(_slow_path, slow_result);
4799 result_io ->set_req(_slow_path, i_o());
4800 result_mem ->set_req(_slow_path, reset_memory());
4801 }
4802
4803 // Return the combined state.
4804 set_i_o( _gvn.transform(result_io) );
4805 set_all_memory( _gvn.transform(result_mem));
4806
4807 set_result(result_reg, result_val);
4808 return true;
4809 }
4810
4811 //---------------------------inline_native_getClass----------------------------
4812 // public final native Class<?> java.lang.Object.getClass();
4813 //
4814 // Build special case code for calls to getClass on an object.
4815 bool LibraryCallKit::inline_native_getClass() {
4816 Node* obj = null_check_receiver();
4817 if (stopped()) return true;
4818 set_result(load_mirror_from_klass(load_object_klass(obj)));
4819 return true;
4820 }
4821
4822 //-----------------inline_native_Reflection_getCallerClass---------------------
4823 // public static native Class<?> sun.reflect.Reflection.getCallerClass();
4824 //
4825 // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
4826 //
4827 // NOTE: This code must perform the same logic as JVM_GetCallerClass
4828 // in that it must skip particular security frames and checks for
4829 // caller sensitive methods.
4830 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
4831 #ifndef PRODUCT
4832 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4833 tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
4834 }
4835 #endif
4836
5148 dst_type = _gvn.type(dst_addr)->is_ptr(); // narrow out memory
5149
5150 flags |= RC_NARROW_MEM; // narrow in memory
5151 }
5152
5153 // Call it. Note that the length argument is not scaled.
5154 make_runtime_call(flags,
5155 OptoRuntime::make_setmemory_Type(),
5156 StubRoutines::unsafe_setmemory(),
5157 "unsafe_setmemory",
5158 dst_type,
5159 dst_addr, size XTOP, byte);
5160
5161 store_to_memory(control(), doing_unsafe_access_addr, intcon(0), doing_unsafe_access_bt, Compile::AliasIdxRaw, MemNode::unordered);
5162
5163 return true;
5164 }
5165
5166 #undef XTOP
5167
5168 //------------------------clone_coping-----------------------------------
5169 // Helper function for inline_native_clone.
5170 void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array) {
5171 assert(obj_size != nullptr, "");
5172 Node* raw_obj = alloc_obj->in(1);
5173 assert(alloc_obj->is_CheckCastPP() && raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
5174
5175 AllocateNode* alloc = nullptr;
5176 if (ReduceBulkZeroing &&
5177 // If we are implementing an array clone without knowing its source type
5178 // (can happen when compiling the array-guarded branch of a reflective
5179 // Object.clone() invocation), initialize the array within the allocation.
5180 // This is needed because some GCs (e.g. ZGC) might fall back in this case
5181 // to a runtime clone call that assumes fully initialized source arrays.
5182 (!is_array || obj->get_ptr_type()->isa_aryptr() != nullptr)) {
5183 // We will be completely responsible for initializing this object -
5184 // mark Initialize node as complete.
5185 alloc = AllocateNode::Ideal_allocation(alloc_obj);
5186 // The object was just allocated - there should be no any stores!
5187 guarantee(alloc != nullptr && alloc->maybe_set_complete(&_gvn), "");
5218 // not cloneable or finalizer => slow path to out-of-line Object.clone
5219 //
5220 // The general case has two steps, allocation and copying.
5221 // Allocation has two cases, and uses GraphKit::new_instance or new_array.
5222 //
5223 // Copying also has two cases, oop arrays and everything else.
5224 // Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
5225 // Everything else uses the tight inline loop supplied by CopyArrayNode.
5226 //
5227 // These steps fold up nicely if and when the cloned object's klass
5228 // can be sharply typed as an object array, a type array, or an instance.
5229 //
5230 bool LibraryCallKit::inline_native_clone(bool is_virtual) {
5231 PhiNode* result_val;
5232
5233 // Set the reexecute bit for the interpreter to reexecute
5234 // the bytecode that invokes Object.clone if deoptimization happens.
5235 { PreserveReexecuteState preexecs(this);
5236 jvms()->set_should_reexecute(true);
5237
5238 Node* obj = null_check_receiver();
5239 if (stopped()) return true;
5240
5241 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
5242
5243 // If we are going to clone an instance, we need its exact type to
5244 // know the number and types of fields to convert the clone to
5245 // loads/stores. Maybe a speculative type can help us.
5246 if (!obj_type->klass_is_exact() &&
5247 obj_type->speculative_type() != nullptr &&
5248 obj_type->speculative_type()->is_instance_klass()) {
5249 ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass();
5250 if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem &&
5251 !spec_ik->has_injected_fields()) {
5252 if (!obj_type->isa_instptr() ||
5253 obj_type->is_instptr()->instance_klass()->has_subklass()) {
5254 obj = maybe_cast_profiled_obj(obj, obj_type->speculative_type(), false);
5255 }
5256 }
5257 }
5258
5259 // Conservatively insert a memory barrier on all memory slices.
5260 // Do not let writes into the original float below the clone.
5261 insert_mem_bar(Op_MemBarCPUOrder);
5262
5263 // paths into result_reg:
5264 enum {
5265 _slow_path = 1, // out-of-line call to clone method (virtual or not)
5266 _objArray_path, // plain array allocation, plus arrayof_oop_arraycopy
5267 _array_path, // plain array allocation, plus arrayof_long_arraycopy
5268 _instance_path, // plain instance allocation, plus arrayof_long_arraycopy
5269 PATH_LIMIT
5270 };
5271 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
5272 result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
5273 PhiNode* result_i_o = new PhiNode(result_reg, Type::ABIO);
5274 PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
5275 record_for_igvn(result_reg);
5276
5277 Node* obj_klass = load_object_klass(obj);
5278 Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)nullptr);
5279 if (array_ctl != nullptr) {
5280 // It's an array.
5281 PreserveJVMState pjvms(this);
5282 set_control(array_ctl);
5283 Node* obj_length = load_array_length(obj);
5284 Node* array_size = nullptr; // Size of the array without object alignment padding.
5285 Node* alloc_obj = new_array(obj_klass, obj_length, 0, &array_size, /*deoptimize_on_exception=*/true);
5286
5287 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
5288 if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Parsing)) {
5289 // If it is an oop array, it requires very special treatment,
5290 // because gc barriers are required when accessing the array.
5291 Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)nullptr);
5292 if (is_obja != nullptr) {
5293 PreserveJVMState pjvms2(this);
5294 set_control(is_obja);
5295 // Generate a direct call to the right arraycopy function(s).
5296 // Clones are always tightly coupled.
5297 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, true, false);
5298 ac->set_clone_oop_array();
5299 Node* n = _gvn.transform(ac);
5300 assert(n == ac, "cannot disappear");
5301 ac->connect_outputs(this, /*deoptimize_on_exception=*/true);
5302
5303 result_reg->init_req(_objArray_path, control());
5304 result_val->init_req(_objArray_path, alloc_obj);
5305 result_i_o ->set_req(_objArray_path, i_o());
5306 result_mem ->set_req(_objArray_path, reset_memory());
5307 }
5308 }
5309 // Otherwise, there are no barriers to worry about.
5310 // (We can dispense with card marks if we know the allocation
5311 // comes out of eden (TLAB)... In fact, ReduceInitialCardMarks
5312 // causes the non-eden paths to take compensating steps to
5313 // simulate a fresh allocation, so that no further
5314 // card marks are required in compiled code to initialize
5315 // the object.)
5316
5317 if (!stopped()) {
5318 copy_to_clone(obj, alloc_obj, array_size, true);
5319
5320 // Present the results of the copy.
5321 result_reg->init_req(_array_path, control());
5322 result_val->init_req(_array_path, alloc_obj);
5323 result_i_o ->set_req(_array_path, i_o());
5324 result_mem ->set_req(_array_path, reset_memory());
5325 }
5326 }
5327
5328 // We only go to the instance fast case code if we pass a number of guards.
5329 // The paths which do not pass are accumulated in the slow_region.
5330 RegionNode* slow_region = new RegionNode(1);
5331 record_for_igvn(slow_region);
5332 if (!stopped()) {
5333 // It's an instance (we did array above). Make the slow-path tests.
5334 // If this is a virtual call, we generate a funny guard. We grab
5335 // the vtable entry corresponding to clone() from the target object.
5336 // If the target method which we are calling happens to be the
5337 // Object clone() method, we pass the guard. We do not need this
5338 // guard for non-virtual calls; the caller is known to be the native
5339 // Object clone().
5340 if (is_virtual) {
5341 generate_virtual_guard(obj_klass, slow_region);
5342 }
5343
5344 // The object must be easily cloneable and must not have a finalizer.
5345 // Both of these conditions may be checked in a single test.
5346 // We could optimize the test further, but we don't care.
5347 generate_misc_flags_guard(obj_klass,
5348 // Test both conditions:
5349 KlassFlags::_misc_is_cloneable_fast | KlassFlags::_misc_has_finalizer,
5350 // Must be cloneable but not finalizer:
5351 KlassFlags::_misc_is_cloneable_fast,
5443 set_jvms(sfpt->jvms());
5444 _reexecute_sp = jvms()->sp();
5445
5446 return saved_jvms;
5447 }
5448 }
5449 }
5450 return nullptr;
5451 }
5452
5453 // Clone the JVMState of the array allocation and create a new safepoint with it. Re-push the array length to the stack
5454 // such that uncommon traps can be emitted to re-execute the array allocation in the interpreter.
5455 SafePointNode* LibraryCallKit::create_safepoint_with_state_before_array_allocation(const AllocateArrayNode* alloc) const {
5456 JVMState* old_jvms = alloc->jvms()->clone_shallow(C);
5457 uint size = alloc->req();
5458 SafePointNode* sfpt = new SafePointNode(size, old_jvms);
5459 old_jvms->set_map(sfpt);
5460 for (uint i = 0; i < size; i++) {
5461 sfpt->init_req(i, alloc->in(i));
5462 }
5463 // re-push array length for deoptimization
5464 sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp(), alloc->in(AllocateNode::ALength));
5465 old_jvms->set_sp(old_jvms->sp()+1);
5466 old_jvms->set_monoff(old_jvms->monoff()+1);
5467 old_jvms->set_scloff(old_jvms->scloff()+1);
5468 old_jvms->set_endoff(old_jvms->endoff()+1);
5469 old_jvms->set_should_reexecute(true);
5470
5471 sfpt->set_i_o(map()->i_o());
5472 sfpt->set_memory(map()->memory());
5473 sfpt->set_control(map()->control());
5474 return sfpt;
5475 }
5476
5477 // In case of a deoptimization, we restart execution at the
5478 // allocation, allocating a new array. We would leave an uninitialized
5479 // array in the heap that GCs wouldn't expect. Move the allocation
5480 // after the traps so we don't allocate the array if we
5481 // deoptimize. This is possible because tightly_coupled_allocation()
5482 // guarantees there's no observer of the allocated array at this point
5483 // and the control flow is simple enough.
5484 void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms_before_guards,
5485 int saved_reexecute_sp, uint new_idx) {
5486 if (saved_jvms_before_guards != nullptr && !stopped()) {
5487 replace_unrelated_uncommon_traps_with_alloc_state(alloc, saved_jvms_before_guards);
5488
5489 assert(alloc != nullptr, "only with a tightly coupled allocation");
5490 // restore JVM state to the state at the arraycopy
5491 saved_jvms_before_guards->map()->set_control(map()->control());
5492 assert(saved_jvms_before_guards->map()->memory() == map()->memory(), "memory state changed?");
5493 assert(saved_jvms_before_guards->map()->i_o() == map()->i_o(), "IO state changed?");
5494 // If we've improved the types of some nodes (null check) while
5495 // emitting the guards, propagate them to the current state
5496 map()->replaced_nodes().apply(saved_jvms_before_guards->map(), new_idx);
5497 set_jvms(saved_jvms_before_guards);
5498 _reexecute_sp = saved_reexecute_sp;
5499
5500 // Remove the allocation from above the guards
5501 CallProjections callprojs;
5502 alloc->extract_projections(&callprojs, true);
5503 InitializeNode* init = alloc->initialization();
5504 Node* alloc_mem = alloc->in(TypeFunc::Memory);
5505 C->gvn_replace_by(callprojs.fallthrough_ioproj, alloc->in(TypeFunc::I_O));
5506 C->gvn_replace_by(init->proj_out(TypeFunc::Memory), alloc_mem);
5507
5508 // The CastIINode created in GraphKit::new_array (in AllocateArrayNode::make_ideal_length) must stay below
5509 // the allocation (i.e. is only valid if the allocation succeeds):
5510 // 1) replace CastIINode with AllocateArrayNode's length here
5511 // 2) Create CastIINode again once allocation has moved (see below) at the end of this method
5512 //
5513 // Multiple identical CastIINodes might exist here. Each GraphKit::load_array_length() call will generate
5514 // new separate CastIINode (arraycopy guard checks or any array length use between array allocation and ararycopy)
5515 Node* init_control = init->proj_out(TypeFunc::Control);
5516 Node* alloc_length = alloc->Ideal_length();
5517 #ifdef ASSERT
5518 Node* prev_cast = nullptr;
5519 #endif
5520 for (uint i = 0; i < init_control->outcnt(); i++) {
5521 Node* init_out = init_control->raw_out(i);
5522 if (init_out->is_CastII() && init_out->in(TypeFunc::Control) == init_control && init_out->in(1) == alloc_length) {
5523 #ifdef ASSERT
5524 if (prev_cast == nullptr) {
5525 prev_cast = init_out;
5527 if (prev_cast->cmp(*init_out) == false) {
5528 prev_cast->dump();
5529 init_out->dump();
5530 assert(false, "not equal CastIINode");
5531 }
5532 }
5533 #endif
5534 C->gvn_replace_by(init_out, alloc_length);
5535 }
5536 }
5537 C->gvn_replace_by(init->proj_out(TypeFunc::Control), alloc->in(0));
5538
5539 // move the allocation here (after the guards)
5540 _gvn.hash_delete(alloc);
5541 alloc->set_req(TypeFunc::Control, control());
5542 alloc->set_req(TypeFunc::I_O, i_o());
5543 Node *mem = reset_memory();
5544 set_all_memory(mem);
5545 alloc->set_req(TypeFunc::Memory, mem);
5546 set_control(init->proj_out_or_null(TypeFunc::Control));
5547 set_i_o(callprojs.fallthrough_ioproj);
5548
5549 // Update memory as done in GraphKit::set_output_for_allocation()
5550 const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
5551 const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
5552 if (ary_type->isa_aryptr() && length_type != nullptr) {
5553 ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
5554 }
5555 const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot);
5556 int elemidx = C->get_alias_index(telemref);
5557 set_memory(init->proj_out_or_null(TypeFunc::Memory), Compile::AliasIdxRaw);
5558 set_memory(init->proj_out_or_null(TypeFunc::Memory), elemidx);
5559
5560 Node* allocx = _gvn.transform(alloc);
5561 assert(allocx == alloc, "where has the allocation gone?");
5562 assert(dest->is_CheckCastPP(), "not an allocation result?");
5563
5564 _gvn.hash_delete(dest);
5565 dest->set_req(0, control());
5566 Node* destx = _gvn.transform(dest);
5567 assert(destx == dest, "where has the allocation result gone?");
5865 top_src = src_type->isa_aryptr();
5866 has_src = (top_src != nullptr && top_src->elem() != Type::BOTTOM);
5867 src_spec = true;
5868 }
5869 if (!has_dest) {
5870 dest = maybe_cast_profiled_obj(dest, dest_k, true);
5871 dest_type = _gvn.type(dest);
5872 top_dest = dest_type->isa_aryptr();
5873 has_dest = (top_dest != nullptr && top_dest->elem() != Type::BOTTOM);
5874 dest_spec = true;
5875 }
5876 }
5877 }
5878
5879 if (has_src && has_dest && can_emit_guards) {
5880 BasicType src_elem = top_src->isa_aryptr()->elem()->array_element_basic_type();
5881 BasicType dest_elem = top_dest->isa_aryptr()->elem()->array_element_basic_type();
5882 if (is_reference_type(src_elem, true)) src_elem = T_OBJECT;
5883 if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT;
5884
5885 if (src_elem == dest_elem && src_elem == T_OBJECT) {
5886 // If both arrays are object arrays then having the exact types
5887 // for both will remove the need for a subtype check at runtime
5888 // before the call and may make it possible to pick a faster copy
5889 // routine (without a subtype check on every element)
5890 // Do we have the exact type of src?
5891 bool could_have_src = src_spec;
5892 // Do we have the exact type of dest?
5893 bool could_have_dest = dest_spec;
5894 ciKlass* src_k = nullptr;
5895 ciKlass* dest_k = nullptr;
5896 if (!src_spec) {
5897 src_k = src_type->speculative_type_not_null();
5898 if (src_k != nullptr && src_k->is_array_klass()) {
5899 could_have_src = true;
5900 }
5901 }
5902 if (!dest_spec) {
5903 dest_k = dest_type->speculative_type_not_null();
5904 if (dest_k != nullptr && dest_k->is_array_klass()) {
5905 could_have_dest = true;
5906 }
5907 }
5908 if (could_have_src && could_have_dest) {
5909 // If we can have both exact types, emit the missing guards
5910 if (could_have_src && !src_spec) {
5911 src = maybe_cast_profiled_obj(src, src_k, true);
5912 }
5913 if (could_have_dest && !dest_spec) {
5914 dest = maybe_cast_profiled_obj(dest, dest_k, true);
5915 }
5916 }
5917 }
5918 }
5919
5920 ciMethod* trap_method = method();
5921 int trap_bci = bci();
5922 if (saved_jvms_before_guards != nullptr) {
5923 trap_method = alloc->jvms()->method();
5924 trap_bci = alloc->jvms()->bci();
5925 }
5926
5927 bool negative_length_guard_generated = false;
5928
5929 if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
5930 can_emit_guards &&
5931 !src->is_top() && !dest->is_top()) {
5932 // validate arguments: enables transformation the ArrayCopyNode
5933 validated = true;
5934
5935 RegionNode* slow_region = new RegionNode(1);
5936 record_for_igvn(slow_region);
5937
5938 // (1) src and dest are arrays.
5939 generate_non_array_guard(load_object_klass(src), slow_region);
5940 generate_non_array_guard(load_object_klass(dest), slow_region);
5941
5942 // (2) src and dest arrays must have elements of the same BasicType
5943 // done at macro expansion or at Ideal transformation time
5944
5945 // (4) src_offset must not be negative.
5946 generate_negative_guard(src_offset, slow_region);
5947
5948 // (5) dest_offset must not be negative.
5949 generate_negative_guard(dest_offset, slow_region);
5950
5951 // (7) src_offset + length must not exceed length of src.
5954 slow_region);
5955
5956 // (8) dest_offset + length must not exceed length of dest.
5957 generate_limit_guard(dest_offset, length,
5958 load_array_length(dest),
5959 slow_region);
5960
5961 // (6) length must not be negative.
5962 // This is also checked in generate_arraycopy() during macro expansion, but
5963 // we also have to check it here for the case where the ArrayCopyNode will
5964 // be eliminated by Escape Analysis.
5965 if (EliminateAllocations) {
5966 generate_negative_guard(length, slow_region);
5967 negative_length_guard_generated = true;
5968 }
5969
5970 // (9) each element of an oop array must be assignable
5971 Node* dest_klass = load_object_klass(dest);
5972 if (src != dest) {
5973 Node* not_subtype_ctrl = gen_subtype_check(src, dest_klass);
5974
5975 if (not_subtype_ctrl != top()) {
5976 PreserveJVMState pjvms(this);
5977 set_control(not_subtype_ctrl);
5978 uncommon_trap(Deoptimization::Reason_intrinsic,
5979 Deoptimization::Action_make_not_entrant);
5980 assert(stopped(), "Should be stopped");
5981 }
5982 }
5983 {
5984 PreserveJVMState pjvms(this);
5985 set_control(_gvn.transform(slow_region));
5986 uncommon_trap(Deoptimization::Reason_intrinsic,
5987 Deoptimization::Action_make_not_entrant);
5988 assert(stopped(), "Should be stopped");
5989 }
5990
5991 const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr();
5992 const Type *toop = dest_klass_t->cast_to_exactness(false)->as_instance_type();
5993 src = _gvn.transform(new CheckCastPPNode(control(), src, toop));
5994 arraycopy_move_allocation_here(alloc, dest, saved_jvms_before_guards, saved_reexecute_sp, new_idx);
5995 }
5996
5997 if (stopped()) {
5998 return true;
5999 }
6000
6001 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != nullptr, negative_length_guard_generated,
6002 // Create LoadRange and LoadKlass nodes for use during macro expansion here
6003 // so the compiler has a chance to eliminate them: during macro expansion,
6004 // we have to set their control (CastPP nodes are eliminated).
6005 load_object_klass(src), load_object_klass(dest),
6006 load_array_length(src), load_array_length(dest));
6007
6008 ac->set_arraycopy(validated);
6009
6010 Node* n = _gvn.transform(ac);
6011 if (n == ac) {
6012 ac->connect_outputs(this);
6013 } else {
|
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "ci/ciFlatArrayKlass.hpp"
28 #include "ci/ciUtilities.inline.hpp"
29 #include "classfile/vmIntrinsics.hpp"
30 #include "compiler/compileBroker.hpp"
31 #include "compiler/compileLog.hpp"
32 #include "gc/shared/barrierSet.hpp"
33 #include "jfr/support/jfrIntrinsics.hpp"
34 #include "memory/resourceArea.hpp"
35 #include "oops/klass.inline.hpp"
36 #include "oops/objArrayKlass.hpp"
37 #include "opto/addnode.hpp"
38 #include "opto/arraycopynode.hpp"
39 #include "opto/c2compiler.hpp"
40 #include "opto/castnode.hpp"
41 #include "opto/cfgnode.hpp"
42 #include "opto/convertnode.hpp"
43 #include "opto/countbitsnode.hpp"
44 #include "opto/idealKit.hpp"
45 #include "opto/library_call.hpp"
46 #include "opto/mathexactnode.hpp"
47 #include "opto/mulnode.hpp"
309 case vmIntrinsics::_indexOfIL: return inline_string_indexOfI(StrIntrinsicNode::LL);
310 case vmIntrinsics::_indexOfIU: return inline_string_indexOfI(StrIntrinsicNode::UU);
311 case vmIntrinsics::_indexOfIUL: return inline_string_indexOfI(StrIntrinsicNode::UL);
312 case vmIntrinsics::_indexOfU_char: return inline_string_indexOfChar(StrIntrinsicNode::U);
313 case vmIntrinsics::_indexOfL_char: return inline_string_indexOfChar(StrIntrinsicNode::L);
314
315 case vmIntrinsics::_equalsL: return inline_string_equals(StrIntrinsicNode::LL);
316
317 case vmIntrinsics::_vectorizedHashCode: return inline_vectorizedHashCode();
318
319 case vmIntrinsics::_toBytesStringU: return inline_string_toBytesU();
320 case vmIntrinsics::_getCharsStringU: return inline_string_getCharsU();
321 case vmIntrinsics::_getCharStringU: return inline_string_char_access(!is_store);
322 case vmIntrinsics::_putCharStringU: return inline_string_char_access( is_store);
323
324 case vmIntrinsics::_compressStringC:
325 case vmIntrinsics::_compressStringB: return inline_string_copy( is_compress);
326 case vmIntrinsics::_inflateStringC:
327 case vmIntrinsics::_inflateStringB: return inline_string_copy(!is_compress);
328
329 case vmIntrinsics::_makePrivateBuffer: return inline_unsafe_make_private_buffer();
330 case vmIntrinsics::_finishPrivateBuffer: return inline_unsafe_finish_private_buffer();
331 case vmIntrinsics::_getReference: return inline_unsafe_access(!is_store, T_OBJECT, Relaxed, false);
332 case vmIntrinsics::_getBoolean: return inline_unsafe_access(!is_store, T_BOOLEAN, Relaxed, false);
333 case vmIntrinsics::_getByte: return inline_unsafe_access(!is_store, T_BYTE, Relaxed, false);
334 case vmIntrinsics::_getShort: return inline_unsafe_access(!is_store, T_SHORT, Relaxed, false);
335 case vmIntrinsics::_getChar: return inline_unsafe_access(!is_store, T_CHAR, Relaxed, false);
336 case vmIntrinsics::_getInt: return inline_unsafe_access(!is_store, T_INT, Relaxed, false);
337 case vmIntrinsics::_getLong: return inline_unsafe_access(!is_store, T_LONG, Relaxed, false);
338 case vmIntrinsics::_getFloat: return inline_unsafe_access(!is_store, T_FLOAT, Relaxed, false);
339 case vmIntrinsics::_getDouble: return inline_unsafe_access(!is_store, T_DOUBLE, Relaxed, false);
340 case vmIntrinsics::_getValue: return inline_unsafe_access(!is_store, T_OBJECT, Relaxed, false, true);
341
342 case vmIntrinsics::_putReference: return inline_unsafe_access( is_store, T_OBJECT, Relaxed, false);
343 case vmIntrinsics::_putBoolean: return inline_unsafe_access( is_store, T_BOOLEAN, Relaxed, false);
344 case vmIntrinsics::_putByte: return inline_unsafe_access( is_store, T_BYTE, Relaxed, false);
345 case vmIntrinsics::_putShort: return inline_unsafe_access( is_store, T_SHORT, Relaxed, false);
346 case vmIntrinsics::_putChar: return inline_unsafe_access( is_store, T_CHAR, Relaxed, false);
347 case vmIntrinsics::_putInt: return inline_unsafe_access( is_store, T_INT, Relaxed, false);
348 case vmIntrinsics::_putLong: return inline_unsafe_access( is_store, T_LONG, Relaxed, false);
349 case vmIntrinsics::_putFloat: return inline_unsafe_access( is_store, T_FLOAT, Relaxed, false);
350 case vmIntrinsics::_putDouble: return inline_unsafe_access( is_store, T_DOUBLE, Relaxed, false);
351 case vmIntrinsics::_putValue: return inline_unsafe_access( is_store, T_OBJECT, Relaxed, false, true);
352
353 case vmIntrinsics::_getReferenceVolatile: return inline_unsafe_access(!is_store, T_OBJECT, Volatile, false);
354 case vmIntrinsics::_getBooleanVolatile: return inline_unsafe_access(!is_store, T_BOOLEAN, Volatile, false);
355 case vmIntrinsics::_getByteVolatile: return inline_unsafe_access(!is_store, T_BYTE, Volatile, false);
356 case vmIntrinsics::_getShortVolatile: return inline_unsafe_access(!is_store, T_SHORT, Volatile, false);
357 case vmIntrinsics::_getCharVolatile: return inline_unsafe_access(!is_store, T_CHAR, Volatile, false);
358 case vmIntrinsics::_getIntVolatile: return inline_unsafe_access(!is_store, T_INT, Volatile, false);
359 case vmIntrinsics::_getLongVolatile: return inline_unsafe_access(!is_store, T_LONG, Volatile, false);
360 case vmIntrinsics::_getFloatVolatile: return inline_unsafe_access(!is_store, T_FLOAT, Volatile, false);
361 case vmIntrinsics::_getDoubleVolatile: return inline_unsafe_access(!is_store, T_DOUBLE, Volatile, false);
362
363 case vmIntrinsics::_putReferenceVolatile: return inline_unsafe_access( is_store, T_OBJECT, Volatile, false);
364 case vmIntrinsics::_putBooleanVolatile: return inline_unsafe_access( is_store, T_BOOLEAN, Volatile, false);
365 case vmIntrinsics::_putByteVolatile: return inline_unsafe_access( is_store, T_BYTE, Volatile, false);
366 case vmIntrinsics::_putShortVolatile: return inline_unsafe_access( is_store, T_SHORT, Volatile, false);
367 case vmIntrinsics::_putCharVolatile: return inline_unsafe_access( is_store, T_CHAR, Volatile, false);
368 case vmIntrinsics::_putIntVolatile: return inline_unsafe_access( is_store, T_INT, Volatile, false);
369 case vmIntrinsics::_putLongVolatile: return inline_unsafe_access( is_store, T_LONG, Volatile, false);
370 case vmIntrinsics::_putFloatVolatile: return inline_unsafe_access( is_store, T_FLOAT, Volatile, false);
371 case vmIntrinsics::_putDoubleVolatile: return inline_unsafe_access( is_store, T_DOUBLE, Volatile, false);
499 case vmIntrinsics::_notifyJvmtiVThreadMount: return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_mount()),
500 "notifyJvmtiMount", false, false);
501 case vmIntrinsics::_notifyJvmtiVThreadUnmount: return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_unmount()),
502 "notifyJvmtiUnmount", false, false);
503 case vmIntrinsics::_notifyJvmtiVThreadHideFrames: return inline_native_notify_jvmti_hide();
504 case vmIntrinsics::_notifyJvmtiVThreadDisableSuspend: return inline_native_notify_jvmti_sync();
505 #endif
506
507 #ifdef JFR_HAVE_INTRINSICS
508 case vmIntrinsics::_counterTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, JfrTime::time_function()), "counterTime");
509 case vmIntrinsics::_getEventWriter: return inline_native_getEventWriter();
510 case vmIntrinsics::_jvm_commit: return inline_native_jvm_commit();
511 #endif
512 case vmIntrinsics::_currentTimeMillis: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
513 case vmIntrinsics::_nanoTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
514 case vmIntrinsics::_writeback0: return inline_unsafe_writeback0();
515 case vmIntrinsics::_writebackPreSync0: return inline_unsafe_writebackSync0(true);
516 case vmIntrinsics::_writebackPostSync0: return inline_unsafe_writebackSync0(false);
517 case vmIntrinsics::_allocateInstance: return inline_unsafe_allocate();
518 case vmIntrinsics::_copyMemory: return inline_unsafe_copyMemory();
519 case vmIntrinsics::_isFlatArray: return inline_unsafe_isFlatArray();
520 case vmIntrinsics::_setMemory: return inline_unsafe_setMemory();
521 case vmIntrinsics::_getLength: return inline_native_getLength();
522 case vmIntrinsics::_copyOf: return inline_array_copyOf(false);
523 case vmIntrinsics::_copyOfRange: return inline_array_copyOf(true);
524 case vmIntrinsics::_equalsB: return inline_array_equals(StrIntrinsicNode::LL);
525 case vmIntrinsics::_equalsC: return inline_array_equals(StrIntrinsicNode::UU);
526 case vmIntrinsics::_Preconditions_checkIndex: return inline_preconditions_checkIndex(T_INT);
527 case vmIntrinsics::_Preconditions_checkLongIndex: return inline_preconditions_checkIndex(T_LONG);
528 case vmIntrinsics::_clone: return inline_native_clone(intrinsic()->is_virtual());
529
530 case vmIntrinsics::_allocateUninitializedArray: return inline_unsafe_newArray(true);
531 case vmIntrinsics::_newArray: return inline_unsafe_newArray(false);
532 case vmIntrinsics::_newNullRestrictedArray: return inline_newNullRestrictedArray();
533
534 case vmIntrinsics::_isAssignableFrom: return inline_native_subtype_check();
535
536 case vmIntrinsics::_isInstance:
537 case vmIntrinsics::_getModifiers:
538 case vmIntrinsics::_isInterface:
539 case vmIntrinsics::_isArray:
540 case vmIntrinsics::_isPrimitive:
541 case vmIntrinsics::_isHidden:
542 case vmIntrinsics::_getSuperclass:
543 case vmIntrinsics::_getClassAccessFlags: return inline_native_Class_query(intrinsic_id());
544
545 case vmIntrinsics::_floatToRawIntBits:
546 case vmIntrinsics::_floatToIntBits:
547 case vmIntrinsics::_intBitsToFloat:
548 case vmIntrinsics::_doubleToRawLongBits:
549 case vmIntrinsics::_doubleToLongBits:
550 case vmIntrinsics::_longBitsToDouble:
551 case vmIntrinsics::_floatToFloat16:
552 case vmIntrinsics::_float16ToFloat: return inline_fp_conversions(intrinsic_id());
2244 case vmIntrinsics::_remainderUnsigned_l: {
2245 zero_check_long(argument(2));
2246 // Compile-time detect of null-exception
2247 if (stopped()) {
2248 return true; // keep the graph constructed so far
2249 }
2250 n = new UModLNode(control(), argument(0), argument(2));
2251 break;
2252 }
2253 default: fatal_unexpected_iid(id); break;
2254 }
2255 set_result(_gvn.transform(n));
2256 return true;
2257 }
2258
2259 //----------------------------inline_unsafe_access----------------------------
2260
2261 const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type) {
2262 // Attempt to infer a sharper value type from the offset and base type.
2263 ciKlass* sharpened_klass = nullptr;
2264 bool null_free = false;
2265
2266 // See if it is an instance field, with an object type.
2267 if (alias_type->field() != nullptr) {
2268 if (alias_type->field()->type()->is_klass()) {
2269 sharpened_klass = alias_type->field()->type()->as_klass();
2270 null_free = alias_type->field()->is_null_free();
2271 }
2272 }
2273
2274 const TypeOopPtr* result = nullptr;
2275 // See if it is a narrow oop array.
2276 if (adr_type->isa_aryptr()) {
2277 if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
2278 const TypeOopPtr* elem_type = adr_type->is_aryptr()->elem()->make_oopptr();
2279 null_free = adr_type->is_aryptr()->is_null_free();
2280 if (elem_type != nullptr && elem_type->is_loaded()) {
2281 // Sharpen the value type.
2282 result = elem_type;
2283 }
2284 }
2285 }
2286
2287 // The sharpened class might be unloaded if there is no class loader
2288 // contraint in place.
2289 if (result == nullptr && sharpened_klass != nullptr && sharpened_klass->is_loaded()) {
2290 // Sharpen the value type.
2291 result = TypeOopPtr::make_from_klass(sharpened_klass);
2292 if (null_free) {
2293 result = result->join_speculative(TypePtr::NOTNULL)->is_oopptr();
2294 }
2295 }
2296 if (result != nullptr) {
2297 #ifndef PRODUCT
2298 if (C->print_intrinsics() || C->print_inlining()) {
2299 tty->print(" from base type: "); adr_type->dump(); tty->cr();
2300 tty->print(" sharpened value: "); result->dump(); tty->cr();
2301 }
2302 #endif
2303 }
2304 return result;
2305 }
2306
2307 DecoratorSet LibraryCallKit::mo_decorator_for_access_kind(AccessKind kind) {
2308 switch (kind) {
2309 case Relaxed:
2310 return MO_UNORDERED;
2311 case Opaque:
2312 return MO_RELAXED;
2313 case Acquire:
2314 return MO_ACQUIRE;
2315 case Release:
2316 return MO_RELEASE;
2317 case Volatile:
2318 return MO_SEQ_CST;
2319 default:
2320 ShouldNotReachHere();
2321 return 0;
2322 }
2323 }
2324
2325 bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, const AccessKind kind, const bool unaligned, const bool is_flat) {
2326 if (callee()->is_static()) return false; // caller must have the capability!
2327 DecoratorSet decorators = C2_UNSAFE_ACCESS;
2328 guarantee(!is_store || kind != Acquire, "Acquire accesses can be produced only for loads");
2329 guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
2330 assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
2331
2332 if (is_reference_type(type)) {
2333 decorators |= ON_UNKNOWN_OOP_REF;
2334 }
2335
2336 if (unaligned) {
2337 decorators |= C2_UNALIGNED;
2338 }
2339
2340 #ifndef PRODUCT
2341 {
2342 ResourceMark rm;
2343 // Check the signatures.
2344 ciSignature* sig = callee()->signature();
2345 #ifdef ASSERT
2346 if (!is_store) {
2347 // Object getReference(Object base, int/long offset), etc.
2348 BasicType rtype = sig->return_type()->basic_type();
2349 assert(rtype == type, "getter must return the expected value");
2350 assert(sig->count() == 2 || (is_flat && sig->count() == 3), "oop getter has 2 or 3 arguments");
2351 assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2352 assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2353 } else {
2354 // void putReference(Object base, int/long offset, Object x), etc.
2355 assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
2356 assert(sig->count() == 3 || (is_flat && sig->count() == 4), "oop putter has 3 arguments");
2357 assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
2358 assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
2359 BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
2360 assert(vtype == type, "putter must accept the expected value");
2361 }
2362 #endif // ASSERT
2363 }
2364 #endif //PRODUCT
2365
2366 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
2367
2368 Node* receiver = argument(0); // type: oop
2369
2370 // Build address expression.
2371 Node* heap_base_oop = top();
2372
2373 // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2374 Node* base = argument(1); // type: oop
2375 // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2376 Node* offset = argument(2); // type: long
2377 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2378 // to be plain byte offsets, which are also the same as those accepted
2379 // by oopDesc::field_addr.
2380 assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2381 "fieldOffset must be byte-scaled");
2382
2383 ciInlineKlass* inline_klass = nullptr;
2384 if (is_flat) {
2385 const TypeInstPtr* cls = _gvn.type(argument(4))->isa_instptr();
2386 if (cls == nullptr || cls->const_oop() == nullptr) {
2387 return false;
2388 }
2389 ciType* mirror_type = cls->const_oop()->as_instance()->java_mirror_type();
2390 if (!mirror_type->is_inlinetype()) {
2391 return false;
2392 }
2393 inline_klass = mirror_type->as_inline_klass();
2394 }
2395
2396 if (base->is_InlineType()) {
2397 InlineTypeNode* vt = base->as_InlineType();
2398 if (is_store) {
2399 if (!vt->is_allocated(&_gvn)) {
2400 return false;
2401 }
2402 base = vt->get_oop();
2403 } else {
2404 if (offset->is_Con()) {
2405 long off = find_long_con(offset, 0);
2406 ciInlineKlass* vk = vt->type()->inline_klass();
2407 if ((long)(int)off != off || !vk->contains_field_offset(off)) {
2408 return false;
2409 }
2410
2411 ciField* field = vk->get_non_flat_field_by_offset(off);
2412 if (field != nullptr) {
2413 BasicType bt = type2field[field->type()->basic_type()];
2414 if (bt == T_ARRAY || bt == T_NARROWOOP) {
2415 bt = T_OBJECT;
2416 }
2417 if (bt == type && (!field->is_flat() || field->type() == inline_klass)) {
2418 Node* value = vt->field_value_by_offset(off, false);
2419 if (value->is_InlineType()) {
2420 value = value->as_InlineType()->adjust_scalarization_depth(this);
2421 }
2422 set_result(value);
2423 return true;
2424 }
2425 }
2426 }
2427 {
2428 // Re-execute the unsafe access if allocation triggers deoptimization.
2429 PreserveReexecuteState preexecs(this);
2430 jvms()->set_should_reexecute(true);
2431 vt = vt->buffer(this);
2432 }
2433 base = vt->get_oop();
2434 }
2435 }
2436
2437 // 32-bit machines ignore the high half!
2438 offset = ConvL2X(offset);
2439
2440 // Save state and restore on bailout
2441 uint old_sp = sp();
2442 SafePointNode* old_map = clone_map();
2443
2444 Node* adr = make_unsafe_address(base, offset, type, kind == Relaxed);
2445 assert(!stopped(), "Inlining of unsafe access failed: address construction stopped unexpectedly");
2446
2447 if (_gvn.type(base->uncast())->isa_ptr() == TypePtr::NULL_PTR) {
2448 if (type != T_OBJECT && (inline_klass == nullptr || !inline_klass->has_object_fields())) {
2449 decorators |= IN_NATIVE; // off-heap primitive access
2450 } else {
2451 set_map(old_map);
2452 set_sp(old_sp);
2453 return false; // off-heap oop accesses are not supported
2454 }
2455 } else {
2456 heap_base_oop = base; // on-heap or mixed access
2457 }
2458
2459 // Can base be null? Otherwise, always on-heap access.
2460 bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(base));
2461
2462 if (!can_access_non_heap) {
2463 decorators |= IN_HEAP;
2464 }
2465
2466 Node* val = is_store ? argument(4 + (is_flat ? 1 : 0)) : nullptr;
2467
2468 const TypePtr* adr_type = _gvn.type(adr)->isa_ptr();
2469 if (adr_type == TypePtr::NULL_PTR) {
2470 set_map(old_map);
2471 set_sp(old_sp);
2472 return false; // off-heap access with zero address
2473 }
2474
2475 // Try to categorize the address.
2476 Compile::AliasType* alias_type = C->alias_type(adr_type);
2477 assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2478
2479 if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2480 alias_type->adr_type() == TypeAryPtr::RANGE) {
2481 set_map(old_map);
2482 set_sp(old_sp);
2483 return false; // not supported
2484 }
2485
2486 bool mismatched = false;
2487 BasicType bt = T_ILLEGAL;
2488 ciField* field = nullptr;
2489 if (adr_type->isa_instptr()) {
2490 const TypeInstPtr* instptr = adr_type->is_instptr();
2491 ciInstanceKlass* k = instptr->instance_klass();
2492 int off = instptr->offset();
2493 if (instptr->const_oop() != nullptr &&
2494 k == ciEnv::current()->Class_klass() &&
2495 instptr->offset() >= (k->size_helper() * wordSize)) {
2496 k = instptr->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
2497 field = k->get_field_by_offset(off, true);
2498 } else {
2499 field = k->get_non_flat_field_by_offset(off);
2500 }
2501 if (field != nullptr) {
2502 bt = type2field[field->type()->basic_type()];
2503 }
2504 assert(bt == alias_type->basic_type() || is_flat, "should match");
2505 } else {
2506 bt = alias_type->basic_type();
2507 }
2508
2509 if (bt != T_ILLEGAL) {
2510 assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
2511 if (bt == T_BYTE && adr_type->isa_aryptr()) {
2512 // Alias type doesn't differentiate between byte[] and boolean[]).
2513 // Use address type to get the element type.
2514 bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2515 }
2516 if (is_reference_type(bt, true)) {
2517 // accessing an array field with getReference is not a mismatch
2518 bt = T_OBJECT;
2519 }
2520 if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2521 // Don't intrinsify mismatched object accesses
2522 set_map(old_map);
2523 set_sp(old_sp);
2524 return false;
2525 }
2526 mismatched = (bt != type);
2527 } else if (alias_type->adr_type()->isa_oopptr()) {
2528 mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
2529 }
2530
2531 if (is_flat) {
2532 if (adr_type->isa_instptr()) {
2533 if (field == nullptr || field->type() != inline_klass) {
2534 mismatched = true;
2535 }
2536 } else if (adr_type->isa_aryptr()) {
2537 const Type* elem = adr_type->is_aryptr()->elem();
2538 if (!adr_type->is_flat() || elem->inline_klass() != inline_klass) {
2539 mismatched = true;
2540 }
2541 } else {
2542 mismatched = true;
2543 }
2544 if (is_store) {
2545 const Type* val_t = _gvn.type(val);
2546 if (!val_t->is_inlinetypeptr() || val_t->inline_klass() != inline_klass) {
2547 set_map(old_map);
2548 set_sp(old_sp);
2549 return false;
2550 }
2551 }
2552 }
2553
2554 destruct_map_clone(old_map);
2555 assert(!mismatched || is_flat || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2556
2557 if (mismatched) {
2558 decorators |= C2_MISMATCHED;
2559 }
2560
2561 // First guess at the value type.
2562 const Type *value_type = Type::get_const_basic_type(type);
2563
2564 // Figure out the memory ordering.
2565 decorators |= mo_decorator_for_access_kind(kind);
2566
2567 if (!is_store) {
2568 if (type == T_OBJECT && !is_flat) {
2569 const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2570 if (tjp != nullptr) {
2571 value_type = tjp;
2572 }
2573 }
2574 }
2575
2576 receiver = null_check(receiver);
2577 if (stopped()) {
2578 return true;
2579 }
2580 // Heap pointers get a null-check from the interpreter,
2581 // as a courtesy. However, this is not guaranteed by Unsafe,
2582 // and it is not possible to fully distinguish unintended nulls
2583 // from intended ones in this API.
2584
2585 if (!is_store) {
2586 Node* p = nullptr;
2587 // Try to constant fold a load from a constant field
2588
2589 if (heap_base_oop != top() && field != nullptr && field->is_constant() && !field->is_flat() && !mismatched) {
2590 // final or stable field
2591 p = make_constant_from_field(field, heap_base_oop);
2592 }
2593
2594 if (p == nullptr) { // Could not constant fold the load
2595 if (is_flat) {
2596 if (adr_type->isa_instptr() && !mismatched) {
2597 ciInstanceKlass* holder = adr_type->is_instptr()->instance_klass();
2598 int offset = adr_type->is_instptr()->offset();
2599 p = InlineTypeNode::make_from_flat(this, inline_klass, base, base, holder, offset, decorators);
2600 } else {
2601 p = InlineTypeNode::make_from_flat(this, inline_klass, base, adr, nullptr, 0, decorators);
2602 }
2603 } else {
2604 p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);
2605 const TypeOopPtr* ptr = value_type->make_oopptr();
2606 if (ptr != nullptr && ptr->is_inlinetypeptr()) {
2607 // Load a non-flattened inline type from memory
2608 p = InlineTypeNode::make_from_oop(this, p, ptr->inline_klass(), !ptr->maybe_null());
2609 }
2610 }
2611 // Normalize the value returned by getBoolean in the following cases
2612 if (type == T_BOOLEAN &&
2613 (mismatched ||
2614 heap_base_oop == top() || // - heap_base_oop is null or
2615 (can_access_non_heap && field == nullptr)) // - heap_base_oop is potentially null
2616 // and the unsafe access is made to large offset
2617 // (i.e., larger than the maximum offset necessary for any
2618 // field access)
2619 ) {
2620 IdealKit ideal = IdealKit(this);
2621 #define __ ideal.
2622 IdealVariable normalized_result(ideal);
2623 __ declarations_done();
2624 __ set(normalized_result, p);
2625 __ if_then(p, BoolTest::ne, ideal.ConI(0));
2626 __ set(normalized_result, ideal.ConI(1));
2627 ideal.end_if();
2628 final_sync(ideal);
2629 p = __ value(normalized_result);
2630 #undef __
2631 }
2632 }
2633 if (type == T_ADDRESS) {
2634 p = gvn().transform(new CastP2XNode(nullptr, p));
2635 p = ConvX2UL(p);
2636 }
2637 // The load node has the control of the preceding MemBarCPUOrder. All
2638 // following nodes will have the control of the MemBarCPUOrder inserted at
2639 // the end of this method. So, pushing the load onto the stack at a later
2640 // point is fine.
2641 set_result(p);
2642 } else {
2643 if (bt == T_ADDRESS) {
2644 // Repackage the long as a pointer.
2645 val = ConvL2X(val);
2646 val = gvn().transform(new CastX2PNode(val));
2647 }
2648 if (is_flat) {
2649 if (adr_type->isa_instptr() && !mismatched) {
2650 ciInstanceKlass* holder = adr_type->is_instptr()->instance_klass();
2651 int offset = adr_type->is_instptr()->offset();
2652 val->as_InlineType()->store_flat(this, base, base, holder, offset, decorators);
2653 } else {
2654 val->as_InlineType()->store_flat(this, base, adr, nullptr, 0, decorators);
2655 }
2656 } else {
2657 access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators);
2658 }
2659 }
2660
2661 if (argument(1)->is_InlineType() && is_store) {
2662 InlineTypeNode* value = InlineTypeNode::make_from_oop(this, base, _gvn.type(argument(1))->inline_klass());
2663 value = value->make_larval(this, false);
2664 replace_in_map(argument(1), value);
2665 }
2666
2667 return true;
2668 }
2669
2670 bool LibraryCallKit::inline_unsafe_make_private_buffer() {
2671 Node* receiver = argument(0);
2672 Node* value = argument(1);
2673 if (!value->is_InlineType()) {
2674 return false;
2675 }
2676
2677 receiver = null_check(receiver);
2678 if (stopped()) {
2679 return true;
2680 }
2681
2682 set_result(value->as_InlineType()->make_larval(this, true));
2683 return true;
2684 }
2685
2686 bool LibraryCallKit::inline_unsafe_finish_private_buffer() {
2687 Node* receiver = argument(0);
2688 Node* buffer = argument(1);
2689 if (!buffer->is_InlineType()) {
2690 return false;
2691 }
2692 InlineTypeNode* vt = buffer->as_InlineType();
2693 if (!vt->is_allocated(&_gvn)) {
2694 return false;
2695 }
2696 // TODO 8239003 Why is this needed?
2697 if (AllocateNode::Ideal_allocation(vt->get_oop()) == nullptr) {
2698 return false;
2699 }
2700
2701 receiver = null_check(receiver);
2702 if (stopped()) {
2703 return true;
2704 }
2705
2706 set_result(vt->finish_larval(this));
2707 return true;
2708 }
2709
2710 //----------------------------inline_unsafe_load_store----------------------------
2711 // This method serves a couple of different customers (depending on LoadStoreKind):
2712 //
2713 // LS_cmp_swap:
2714 //
2715 // boolean compareAndSetReference(Object o, long offset, Object expected, Object x);
2716 // boolean compareAndSetInt( Object o, long offset, int expected, int x);
2717 // boolean compareAndSetLong( Object o, long offset, long expected, long x);
2718 //
2719 // LS_cmp_swap_weak:
2720 //
2721 // boolean weakCompareAndSetReference( Object o, long offset, Object expected, Object x);
2722 // boolean weakCompareAndSetReferencePlain( Object o, long offset, Object expected, Object x);
2723 // boolean weakCompareAndSetReferenceAcquire(Object o, long offset, Object expected, Object x);
2724 // boolean weakCompareAndSetReferenceRelease(Object o, long offset, Object expected, Object x);
2725 //
2726 // boolean weakCompareAndSetInt( Object o, long offset, int expected, int x);
2727 // boolean weakCompareAndSetIntPlain( Object o, long offset, int expected, int x);
2728 // boolean weakCompareAndSetIntAcquire( Object o, long offset, int expected, int x);
2729 // boolean weakCompareAndSetIntRelease( Object o, long offset, int expected, int x);
2895 }
2896 case LS_cmp_swap:
2897 case LS_cmp_swap_weak:
2898 case LS_get_add:
2899 break;
2900 default:
2901 ShouldNotReachHere();
2902 }
2903
2904 // Null check receiver.
2905 receiver = null_check(receiver);
2906 if (stopped()) {
2907 return true;
2908 }
2909
2910 int alias_idx = C->get_alias_index(adr_type);
2911
2912 if (is_reference_type(type)) {
2913 decorators |= IN_HEAP | ON_UNKNOWN_OOP_REF;
2914
2915 if (oldval != nullptr && oldval->is_InlineType()) {
2916 // Re-execute the unsafe access if allocation triggers deoptimization.
2917 PreserveReexecuteState preexecs(this);
2918 jvms()->set_should_reexecute(true);
2919 oldval = oldval->as_InlineType()->buffer(this)->get_oop();
2920 }
2921 if (newval != nullptr && newval->is_InlineType()) {
2922 // Re-execute the unsafe access if allocation triggers deoptimization.
2923 PreserveReexecuteState preexecs(this);
2924 jvms()->set_should_reexecute(true);
2925 newval = newval->as_InlineType()->buffer(this)->get_oop();
2926 }
2927
2928 // Transformation of a value which could be null pointer (CastPP #null)
2929 // could be delayed during Parse (for example, in adjust_map_after_if()).
2930 // Execute transformation here to avoid barrier generation in such case.
2931 if (_gvn.type(newval) == TypePtr::NULL_PTR)
2932 newval = _gvn.makecon(TypePtr::NULL_PTR);
2933
2934 if (oldval != nullptr && _gvn.type(oldval) == TypePtr::NULL_PTR) {
2935 // Refine the value to a null constant, when it is known to be null
2936 oldval = _gvn.makecon(TypePtr::NULL_PTR);
2937 }
2938 }
2939
2940 Node* result = nullptr;
2941 switch (kind) {
2942 case LS_cmp_exchange: {
2943 result = access_atomic_cmpxchg_val_at(base, adr, adr_type, alias_idx,
2944 oldval, newval, value_type, type, decorators);
2945 break;
2946 }
2947 case LS_cmp_swap_weak:
3094 Deoptimization::Action_make_not_entrant);
3095 }
3096 if (stopped()) {
3097 return true;
3098 }
3099 #endif //INCLUDE_JVMTI
3100
3101 Node* test = nullptr;
3102 if (LibraryCallKit::klass_needs_init_guard(kls)) {
3103 // Note: The argument might still be an illegal value like
3104 // Serializable.class or Object[].class. The runtime will handle it.
3105 // But we must make an explicit check for initialization.
3106 Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset()));
3107 // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
3108 // can generate code to load it as unsigned byte.
3109 Node* inst = make_load(nullptr, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::acquire);
3110 Node* bits = intcon(InstanceKlass::fully_initialized);
3111 test = _gvn.transform(new SubINode(inst, bits));
3112 // The 'test' is non-zero if we need to take a slow path.
3113 }
3114 Node* obj = nullptr;
3115 const TypeInstKlassPtr* tkls = _gvn.type(kls)->isa_instklassptr();
3116 if (tkls != nullptr && tkls->instance_klass()->is_inlinetype()) {
3117 obj = InlineTypeNode::make_default(_gvn, tkls->instance_klass()->as_inline_klass())->buffer(this);
3118 } else {
3119 obj = new_instance(kls, test);
3120 }
3121 set_result(obj);
3122 return true;
3123 }
3124
3125 //------------------------inline_native_time_funcs--------------
3126 // inline code for System.currentTimeMillis() and System.nanoTime()
3127 // these have the same type and signature
3128 bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
3129 const TypeFunc* tf = OptoRuntime::void_long_Type();
3130 const TypePtr* no_memory_effects = nullptr;
3131 Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
3132 Node* value = _gvn.transform(new ProjNode(time, TypeFunc::Parms+0));
3133 #ifdef ASSERT
3134 Node* value_top = _gvn.transform(new ProjNode(time, TypeFunc::Parms+1));
3135 assert(value_top == top(), "second value must be top");
3136 #endif
3137 set_result(value);
3138 return true;
3139 }
3140
3896
3897 //------------------------inline_native_setVthread------------------
3898 bool LibraryCallKit::inline_native_setCurrentThread() {
3899 assert(C->method()->changes_current_thread(),
3900 "method changes current Thread but is not annotated ChangesCurrentThread");
3901 Node* arr = argument(1);
3902 Node* thread = _gvn.transform(new ThreadLocalNode());
3903 Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::vthread_offset()));
3904 Node* thread_obj_handle
3905 = make_load(nullptr, p, p->bottom_type()->is_ptr(), T_OBJECT, MemNode::unordered);
3906 thread_obj_handle = _gvn.transform(thread_obj_handle);
3907 const TypePtr *adr_type = _gvn.type(thread_obj_handle)->isa_ptr();
3908 access_store_at(nullptr, thread_obj_handle, adr_type, arr, _gvn.type(arr), T_OBJECT, IN_NATIVE | MO_UNORDERED);
3909 JFR_ONLY(extend_setCurrentThread(thread, arr);)
3910 return true;
3911 }
3912
3913 const Type* LibraryCallKit::scopedValueCache_type() {
3914 ciKlass* objects_klass = ciObjArrayKlass::make(env()->Object_klass());
3915 const TypeOopPtr* etype = TypeOopPtr::make_from_klass(env()->Object_klass());
3916 const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS, /* stable= */ false, /* flat= */ false, /* not_flat= */ true, /* not_null_free= */ true);
3917
3918 // Because we create the scopedValue cache lazily we have to make the
3919 // type of the result BotPTR.
3920 bool xk = etype->klass_is_exact();
3921 const Type* objects_type = TypeAryPtr::make(TypePtr::BotPTR, arr0, objects_klass, xk, TypeAryPtr::Offset(0));
3922 return objects_type;
3923 }
3924
3925 Node* LibraryCallKit::scopedValueCache_helper() {
3926 Node* thread = _gvn.transform(new ThreadLocalNode());
3927 Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::scopedValueCache_offset()));
3928 // We cannot use immutable_memory() because we might flip onto a
3929 // different carrier thread, at which point we'll need to use that
3930 // carrier thread's cache.
3931 // return _gvn.transform(LoadNode::make(_gvn, nullptr, immutable_memory(), p, p->bottom_type()->is_ptr(),
3932 // TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered));
3933 return make_load(nullptr, p, p->bottom_type()->is_ptr(), T_ADDRESS, MemNode::unordered);
3934 }
3935
3936 //------------------------inline_native_scopedValueCache------------------
3937 bool LibraryCallKit::inline_native_scopedValueCache() {
3938 Node* cache_obj_handle = scopedValueCache_helper();
3939 const Type* objects_type = scopedValueCache_type();
3940 set_result(access_load(cache_obj_handle, objects_type, T_OBJECT, IN_NATIVE));
3941
4024 }
4025
4026 // Result of top level CFG and Memory.
4027 RegionNode* result_rgn = new RegionNode(PATH_LIMIT);
4028 record_for_igvn(result_rgn);
4029 PhiNode* result_mem = new PhiNode(result_rgn, Type::MEMORY, TypePtr::BOTTOM);
4030 record_for_igvn(result_mem);
4031
4032 result_rgn->init_req(_true_path, _gvn.transform(valid_pin_count));
4033 result_rgn->init_req(_false_path, _gvn.transform(continuation_is_null));
4034 result_mem->init_req(_true_path, _gvn.transform(updated_pin_count_memory));
4035 result_mem->init_req(_false_path, _gvn.transform(input_memory_state));
4036
4037 // Set output state.
4038 set_control(_gvn.transform(result_rgn));
4039 set_all_memory(_gvn.transform(result_mem));
4040
4041 return true;
4042 }
4043
4044 //-----------------------load_klass_from_mirror_common-------------------------
4045 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
4046 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
4047 // and branch to the given path on the region.
4048 // If never_see_null, take an uncommon trap on null, so we can optimistically
4049 // compile for the non-null case.
4050 // If the region is null, force never_see_null = true.
4051 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
4052 bool never_see_null,
4053 RegionNode* region,
4054 int null_path,
4055 int offset) {
4056 if (region == nullptr) never_see_null = true;
4057 Node* p = basic_plus_adr(mirror, offset);
4058 const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
4059 Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
4060 Node* null_ctl = top();
4061 kls = null_check_oop(kls, &null_ctl, never_see_null);
4062 if (region != nullptr) {
4063 // Set region->in(null_path) if the mirror is a primitive (e.g, int.class).
4067 }
4068 return kls;
4069 }
4070
4071 //--------------------(inline_native_Class_query helpers)---------------------
4072 // Use this for JVM_ACC_INTERFACE.
4073 // Fall through if (mods & mask) == bits, take the guard otherwise.
4074 Node* LibraryCallKit::generate_klass_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region,
4075 ByteSize offset, const Type* type, BasicType bt) {
4076 // Branch around if the given klass has the given modifier bit set.
4077 // Like generate_guard, adds a new path onto the region.
4078 Node* modp = basic_plus_adr(kls, in_bytes(offset));
4079 Node* mods = make_load(nullptr, modp, type, bt, MemNode::unordered);
4080 Node* mask = intcon(modifier_mask);
4081 Node* bits = intcon(modifier_bits);
4082 Node* mbit = _gvn.transform(new AndINode(mods, mask));
4083 Node* cmp = _gvn.transform(new CmpINode(mbit, bits));
4084 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
4085 return generate_fair_guard(bol, region);
4086 }
4087
4088 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
4089 return generate_klass_flags_guard(kls, JVM_ACC_INTERFACE, 0, region,
4090 Klass::access_flags_offset(), TypeInt::INT, T_INT);
4091 }
4092
4093 // Use this for testing if Klass is_hidden, has_finalizer, and is_cloneable_fast.
4094 Node* LibraryCallKit::generate_misc_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
4095 return generate_klass_flags_guard(kls, modifier_mask, modifier_bits, region,
4096 Klass::misc_flags_offset(), TypeInt::UBYTE, T_BOOLEAN);
4097 }
4098
4099 Node* LibraryCallKit::generate_hidden_class_guard(Node* kls, RegionNode* region) {
4100 return generate_misc_flags_guard(kls, KlassFlags::_misc_is_hidden_class, 0, region);
4101 }
4102
4103 //-------------------------inline_native_Class_query-------------------
4104 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
4105 const Type* return_type = TypeInt::BOOL;
4106 Node* prim_return_value = top(); // what happens if it's a primitive class?
4107 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
4269
4270 case vmIntrinsics::_getClassAccessFlags:
4271 p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
4272 query_value = make_load(nullptr, p, TypeInt::INT, T_INT, MemNode::unordered);
4273 break;
4274
4275 default:
4276 fatal_unexpected_iid(id);
4277 break;
4278 }
4279
4280 // Fall-through is the normal case of a query to a real class.
4281 phi->init_req(1, query_value);
4282 region->init_req(1, control());
4283
4284 C->set_has_split_ifs(true); // Has chance for split-if optimization
4285 set_result(region, phi);
4286 return true;
4287 }
4288
4289
4290 //-------------------------inline_Class_cast-------------------
4291 bool LibraryCallKit::inline_Class_cast() {
4292 Node* mirror = argument(0); // Class
4293 Node* obj = argument(1);
4294 const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
4295 if (mirror_con == nullptr) {
4296 return false; // dead path (mirror->is_top()).
4297 }
4298 if (obj == nullptr || obj->is_top()) {
4299 return false; // dead path
4300 }
4301 const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr();
4302
4303 // First, see if Class.cast() can be folded statically.
4304 // java_mirror_type() returns non-null for compile-time Class constants.
4305 bool is_null_free_array = false;
4306 ciType* tm = mirror_con->java_mirror_type(&is_null_free_array);
4307 if (tm != nullptr && tm->is_klass() &&
4308 tp != nullptr) {
4309 if (!tp->is_loaded()) {
4310 // Don't use intrinsic when class is not loaded.
4311 return false;
4312 } else {
4313 const TypeKlassPtr* tklass = TypeKlassPtr::make(tm->as_klass(), Type::trust_interfaces);
4314 if (is_null_free_array) {
4315 tklass = tklass->is_aryklassptr()->cast_to_null_free();
4316 }
4317 int static_res = C->static_subtype_check(tklass, tp->as_klass_type());
4318 if (static_res == Compile::SSC_always_true) {
4319 // isInstance() is true - fold the code.
4320 set_result(obj);
4321 return true;
4322 } else if (static_res == Compile::SSC_always_false) {
4323 // Don't use intrinsic, have to throw ClassCastException.
4324 // If the reference is null, the non-intrinsic bytecode will
4325 // be optimized appropriately.
4326 return false;
4327 }
4328 }
4329 }
4330
4331 // Bailout intrinsic and do normal inlining if exception path is frequent.
4332 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
4333 return false;
4334 }
4335
4336 // Generate dynamic checks.
4337 // Class.cast() is java implementation of _checkcast bytecode.
4338 // Do checkcast (Parse::do_checkcast()) optimizations here.
4339
4340 mirror = null_check(mirror);
4341 // If mirror is dead, only null-path is taken.
4342 if (stopped()) {
4343 return true;
4344 }
4345
4346 // Not-subtype or the mirror's klass ptr is nullptr (in case it is a primitive).
4347 enum { _bad_type_path = 1, _prim_path = 2, _npe_path = 3, PATH_LIMIT };
4348 RegionNode* region = new RegionNode(PATH_LIMIT);
4349 record_for_igvn(region);
4350
4351 // Now load the mirror's klass metaobject, and null-check it.
4352 // If kls is null, we have a primitive mirror and
4353 // nothing is an instance of a primitive type.
4354 Node* kls = load_klass_from_mirror(mirror, false, region, _prim_path);
4355
4356 Node* res = top();
4357 Node* io = i_o();
4358 Node* mem = merged_memory();
4359 if (!stopped()) {
4360
4361 Node* bad_type_ctrl = top();
4362 // Do checkcast optimizations.
4363 res = gen_checkcast(obj, kls, &bad_type_ctrl);
4364 region->init_req(_bad_type_path, bad_type_ctrl);
4365 }
4366 if (region->in(_prim_path) != top() ||
4367 region->in(_bad_type_path) != top() ||
4368 region->in(_npe_path) != top()) {
4369 // Let Interpreter throw ClassCastException.
4370 PreserveJVMState pjvms(this);
4371 set_control(_gvn.transform(region));
4372 // Set IO and memory because gen_checkcast may override them when buffering inline types
4373 set_i_o(io);
4374 set_all_memory(mem);
4375 uncommon_trap(Deoptimization::Reason_intrinsic,
4376 Deoptimization::Action_maybe_recompile);
4377 }
4378 if (!stopped()) {
4379 set_result(res);
4380 }
4381 return true;
4382 }
4383
4384
4385 //--------------------------inline_native_subtype_check------------------------
4386 // This intrinsic takes the JNI calls out of the heart of
4387 // UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
4388 bool LibraryCallKit::inline_native_subtype_check() {
4389 // Pull both arguments off the stack.
4390 Node* args[2]; // two java.lang.Class mirrors: superc, subc
4391 args[0] = argument(0);
4392 args[1] = argument(1);
4393 Node* klasses[2]; // corresponding Klasses: superk, subk
4394 klasses[0] = klasses[1] = top();
4395
4396 enum {
4397 // A full decision tree on {superc is prim, subc is prim}:
4398 _prim_0_path = 1, // {P,N} => false
4399 // {P,P} & superc!=subc => false
4400 _prim_same_path, // {P,P} & superc==subc => true
4401 _prim_1_path, // {N,P} => false
4402 _ref_subtype_path, // {N,N} & subtype check wins => true
4403 _both_ref_path, // {N,N} & subtype check loses => false
4404 PATH_LIMIT
4405 };
4406
4407 RegionNode* region = new RegionNode(PATH_LIMIT);
4408 RegionNode* prim_region = new RegionNode(2);
4409 Node* phi = new PhiNode(region, TypeInt::BOOL);
4410 record_for_igvn(region);
4411 record_for_igvn(prim_region);
4412
4413 const TypePtr* adr_type = TypeRawPtr::BOTTOM; // memory type of loads
4414 const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
4415 int class_klass_offset = java_lang_Class::klass_offset();
4416
4417 // First null-check both mirrors and load each mirror's klass metaobject.
4418 int which_arg;
4419 for (which_arg = 0; which_arg <= 1; which_arg++) {
4420 Node* arg = args[which_arg];
4421 arg = null_check(arg);
4422 if (stopped()) break;
4423 args[which_arg] = arg;
4424
4425 Node* p = basic_plus_adr(arg, class_klass_offset);
4426 Node* kls = LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, adr_type, kls_type);
4427 klasses[which_arg] = _gvn.transform(kls);
4428 }
4429
4430 // Having loaded both klasses, test each for null.
4431 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
4432 for (which_arg = 0; which_arg <= 1; which_arg++) {
4433 Node* kls = klasses[which_arg];
4434 Node* null_ctl = top();
4435 kls = null_check_oop(kls, &null_ctl, never_see_null);
4436 if (which_arg == 0) {
4437 prim_region->init_req(1, null_ctl);
4438 } else {
4439 region->init_req(_prim_1_path, null_ctl);
4440 }
4441 if (stopped()) break;
4442 klasses[which_arg] = kls;
4443 }
4444
4445 if (!stopped()) {
4446 // now we have two reference types, in klasses[0..1]
4447 Node* subk = klasses[1]; // the argument to isAssignableFrom
4448 Node* superk = klasses[0]; // the receiver
4449 region->set_req(_both_ref_path, gen_subtype_check(subk, superk));
4450 region->set_req(_ref_subtype_path, control());
4451 }
4452
4453 // If both operands are primitive (both klasses null), then
4454 // we must return true when they are identical primitives.
4455 // It is convenient to test this after the first null klass check.
4456 // This path is also used if superc is a value mirror.
4457 set_control(_gvn.transform(prim_region));
4458 if (!stopped()) {
4459 // Since superc is primitive, make a guard for the superc==subc case.
4460 Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1]));
4461 Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq));
4462 generate_fair_guard(bol_eq, region);
4463 if (region->req() == PATH_LIMIT+1) {
4464 // A guard was added. If the added guard is taken, superc==subc.
4465 region->swap_edges(PATH_LIMIT, _prim_same_path);
4466 region->del_req(PATH_LIMIT);
4467 }
4468 region->set_req(_prim_0_path, control()); // Not equal after all.
4469 }
4470
4471 // these are the only paths that produce 'true':
4472 phi->set_req(_prim_same_path, intcon(1));
4473 phi->set_req(_ref_subtype_path, intcon(1));
4474
4475 // pull together the cases:
4476 assert(region->req() == PATH_LIMIT, "sane region");
4477 for (uint i = 1; i < region->req(); i++) {
4478 Node* ctl = region->in(i);
4479 if (ctl == nullptr || ctl == top()) {
4480 region->set_req(i, top());
4481 phi ->set_req(i, top());
4482 } else if (phi->in(i) == nullptr) {
4483 phi->set_req(i, intcon(0)); // all other paths produce 'false'
4484 }
4485 }
4486
4487 set_control(_gvn.transform(region));
4488 set_result(_gvn.transform(phi));
4489 return true;
4490 }
4491
4492 //---------------------generate_array_guard_common------------------------
4493 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region, ArrayKind kind) {
4494
4495 if (stopped()) {
4496 return nullptr;
4497 }
4498
4499 // Like generate_guard, adds a new path onto the region.
4500 jint layout_con = 0;
4501 Node* layout_val = get_layout_helper(kls, layout_con);
4502 if (layout_val == nullptr) {
4503 bool query = 0;
4504 switch(kind) {
4505 case ObjectArray: query = Klass::layout_helper_is_objArray(layout_con); break;
4506 case NonObjectArray: query = !Klass::layout_helper_is_objArray(layout_con); break;
4507 case TypeArray: query = Klass::layout_helper_is_typeArray(layout_con); break;
4508 case AnyArray: query = Klass::layout_helper_is_array(layout_con); break;
4509 case NonArray: query = !Klass::layout_helper_is_array(layout_con); break;
4510 default:
4511 ShouldNotReachHere();
4512 }
4513 if (!query) {
4514 return nullptr; // never a branch
4515 } else { // always a branch
4516 Node* always_branch = control();
4517 if (region != nullptr)
4518 region->add_req(always_branch);
4519 set_control(top());
4520 return always_branch;
4521 }
4522 }
4523 unsigned int value = 0;
4524 BoolTest::mask btest = BoolTest::illegal;
4525 switch(kind) {
4526 case ObjectArray:
4527 case NonObjectArray: {
4528 value = Klass::_lh_array_tag_obj_value;
4529 layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
4530 btest = (kind == ObjectArray) ? BoolTest::eq : BoolTest::ne;
4531 break;
4532 }
4533 case TypeArray: {
4534 value = Klass::_lh_array_tag_type_value;
4535 layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
4536 btest = BoolTest::eq;
4537 break;
4538 }
4539 case AnyArray: value = Klass::_lh_neutral_value; btest = BoolTest::lt; break;
4540 case NonArray: value = Klass::_lh_neutral_value; btest = BoolTest::gt; break;
4541 default:
4542 ShouldNotReachHere();
4543 }
4544 // Now test the correct condition.
4545 jint nval = (jint)value;
4546 Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval)));
4547 Node* bol = _gvn.transform(new BoolNode(cmp, btest));
4548 return generate_fair_guard(bol, region);
4549 }
4550
4551 //-----------------------inline_newNullRestrictedArray--------------------------
4552 // public static native Object[] newNullRestrictedArray(Class<?> componentType, int length);
4553 bool LibraryCallKit::inline_newNullRestrictedArray() {
4554 Node* componentType = argument(0);
4555 Node* length = argument(1);
4556
4557 const TypeInstPtr* tp = _gvn.type(componentType)->isa_instptr();
4558 if (tp != nullptr) {
4559 ciInstanceKlass* ik = tp->instance_klass();
4560 if (ik == C->env()->Class_klass()) {
4561 ciType* t = tp->java_mirror_type();
4562 if (t != nullptr && t->is_inlinetype()) {
4563 ciArrayKlass* array_klass = ciArrayKlass::make(t, true);
4564 if (array_klass->is_loaded() && array_klass->element_klass()->as_inline_klass()->is_initialized()) {
4565 const TypeAryKlassPtr* array_klass_type = TypeKlassPtr::make(array_klass, Type::trust_interfaces)->is_aryklassptr();
4566 array_klass_type = array_klass_type->cast_to_null_free();
4567 Node* obj = new_array(makecon(array_klass_type), length, 0, nullptr, false); // no arguments to push
4568 set_result(obj);
4569 assert(gvn().type(obj)->is_aryptr()->is_null_free(), "must be null-free");
4570 return true;
4571 }
4572 }
4573 }
4574 }
4575 return false;
4576 }
4577
4578 //-----------------------inline_native_newArray--------------------------
4579 // private static native Object java.lang.reflect.Array.newArray(Class<?> componentType, int length);
4580 // private native Object Unsafe.allocateUninitializedArray0(Class<?> cls, int size);
4581 bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) {
4582 Node* mirror;
4583 Node* count_val;
4584 if (uninitialized) {
4585 null_check_receiver();
4586 mirror = argument(1);
4587 count_val = argument(2);
4588 } else {
4589 mirror = argument(0);
4590 count_val = argument(1);
4591 }
4592
4593 mirror = null_check(mirror);
4594 // If mirror or obj is dead, only null-path is taken.
4595 if (stopped()) return true;
4596
4597 enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
4598 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4599 PhiNode* result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
4705 // the bytecode that invokes Arrays.copyOf if deoptimization happens.
4706 { PreserveReexecuteState preexecs(this);
4707 jvms()->set_should_reexecute(true);
4708
4709 array_type_mirror = null_check(array_type_mirror);
4710 original = null_check(original);
4711
4712 // Check if a null path was taken unconditionally.
4713 if (stopped()) return true;
4714
4715 Node* orig_length = load_array_length(original);
4716
4717 Node* klass_node = load_klass_from_mirror(array_type_mirror, false, nullptr, 0);
4718 klass_node = null_check(klass_node);
4719
4720 RegionNode* bailout = new RegionNode(1);
4721 record_for_igvn(bailout);
4722
4723 // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
4724 // Bail out if that is so.
4725 // Inline type array may have object field that would require a
4726 // write barrier. Conservatively, go to slow path.
4727 // TODO 8251971: Optimize for the case when flat src/dst are later found
4728 // to not contain oops (i.e., move this check to the macro expansion phase).
4729 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
4730 const TypeAryPtr* orig_t = _gvn.type(original)->isa_aryptr();
4731 const TypeKlassPtr* tklass = _gvn.type(klass_node)->is_klassptr();
4732 bool exclude_flat = UseFlatArray && bs->array_copy_requires_gc_barriers(true, T_OBJECT, false, false, BarrierSetC2::Parsing) &&
4733 // Can src array be flat and contain oops?
4734 (orig_t == nullptr || (!orig_t->is_not_flat() && (!orig_t->is_flat() || orig_t->elem()->inline_klass()->contains_oops()))) &&
4735 // Can dest array be flat and contain oops?
4736 tklass->can_be_inline_array() && (!tklass->is_flat() || tklass->is_aryklassptr()->elem()->is_instklassptr()->instance_klass()->as_inline_klass()->contains_oops());
4737 Node* not_objArray = exclude_flat ? generate_non_objArray_guard(klass_node, bailout) : generate_typeArray_guard(klass_node, bailout);
4738 if (not_objArray != nullptr) {
4739 // Improve the klass node's type from the new optimistic assumption:
4740 ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
4741 const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, Type::Offset(0));
4742 Node* cast = new CastPPNode(control(), klass_node, akls);
4743 klass_node = _gvn.transform(cast);
4744 }
4745
4746 // Bail out if either start or end is negative.
4747 generate_negative_guard(start, bailout, &start);
4748 generate_negative_guard(end, bailout, &end);
4749
4750 Node* length = end;
4751 if (_gvn.type(start) != TypeInt::ZERO) {
4752 length = _gvn.transform(new SubINode(end, start));
4753 }
4754
4755 // Bail out if length is negative (i.e., if start > end).
4756 // Without this the new_array would throw
4757 // NegativeArraySizeException but IllegalArgumentException is what
4758 // should be thrown
4759 generate_negative_guard(length, bailout, &length);
4760
4761 // Handle inline type arrays
4762 bool can_validate = !too_many_traps(Deoptimization::Reason_class_check);
4763 if (!stopped()) {
4764 // TODO JDK-8329224
4765 if (!orig_t->is_null_free()) {
4766 // Not statically known to be null free, add a check
4767 generate_fair_guard(null_free_array_test(original), bailout);
4768 }
4769 orig_t = _gvn.type(original)->isa_aryptr();
4770 if (orig_t != nullptr && orig_t->is_flat()) {
4771 // Src is flat, check that dest is flat as well
4772 if (exclude_flat) {
4773 // Dest can't be flat, bail out
4774 bailout->add_req(control());
4775 set_control(top());
4776 } else {
4777 generate_fair_guard(flat_array_test(klass_node, /* flat = */ false), bailout);
4778 }
4779 } else if (UseFlatArray && (orig_t == nullptr || !orig_t->is_not_flat()) &&
4780 // If dest is flat, src must be flat as well (guaranteed by src <: dest check if validated).
4781 ((!tklass->is_flat() && tklass->can_be_inline_array()) || !can_validate)) {
4782 // Src might be flat and dest might not be flat. Go to the slow path if src is flat.
4783 // TODO 8251971: Optimize for the case when src/dest are later found to be both flat.
4784 generate_fair_guard(flat_array_test(load_object_klass(original)), bailout);
4785 if (orig_t != nullptr) {
4786 orig_t = orig_t->cast_to_not_flat();
4787 original = _gvn.transform(new CheckCastPPNode(control(), original, orig_t));
4788 }
4789 }
4790 if (!can_validate) {
4791 // No validation. The subtype check emitted at macro expansion time will not go to the slow
4792 // path but call checkcast_arraycopy which can not handle flat/null-free inline type arrays.
4793 // TODO 8251971: Optimize for the case when src/dest are later found to be both flat/null-free.
4794 generate_fair_guard(flat_array_test(klass_node), bailout);
4795 generate_fair_guard(null_free_array_test(original), bailout);
4796 }
4797 }
4798
4799 // Bail out if start is larger than the original length
4800 Node* orig_tail = _gvn.transform(new SubINode(orig_length, start));
4801 generate_negative_guard(orig_tail, bailout, &orig_tail);
4802
4803 if (bailout->req() > 1) {
4804 PreserveJVMState pjvms(this);
4805 set_control(_gvn.transform(bailout));
4806 uncommon_trap(Deoptimization::Reason_intrinsic,
4807 Deoptimization::Action_maybe_recompile);
4808 }
4809
4810 if (!stopped()) {
4811 // How many elements will we copy from the original?
4812 // The answer is MinI(orig_tail, length).
4813 Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
4814
4815 // Generate a direct call to the right arraycopy function(s).
4816 // We know the copy is disjoint but we might not know if the
4817 // oop stores need checking.
4818 // Extreme case: Arrays.copyOf((Integer[])x, 10, String[].class).
4824 // to the copyOf to be validated, including that the copy to the
4825 // new array won't trigger an ArrayStoreException. That subtype
4826 // check can be optimized if we know something on the type of
4827 // the input array from type speculation.
4828 if (_gvn.type(klass_node)->singleton()) {
4829 const TypeKlassPtr* subk = _gvn.type(load_object_klass(original))->is_klassptr();
4830 const TypeKlassPtr* superk = _gvn.type(klass_node)->is_klassptr();
4831
4832 int test = C->static_subtype_check(superk, subk);
4833 if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
4834 const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();
4835 if (t_original->speculative_type() != nullptr) {
4836 original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true);
4837 }
4838 }
4839 }
4840
4841 bool validated = false;
4842 // Reason_class_check rather than Reason_intrinsic because we
4843 // want to intrinsify even if this traps.
4844 if (can_validate) {
4845 Node* not_subtype_ctrl = gen_subtype_check(original, klass_node);
4846
4847 if (not_subtype_ctrl != top()) {
4848 PreserveJVMState pjvms(this);
4849 set_control(not_subtype_ctrl);
4850 uncommon_trap(Deoptimization::Reason_class_check,
4851 Deoptimization::Action_make_not_entrant);
4852 assert(stopped(), "Should be stopped");
4853 }
4854 validated = true;
4855 }
4856
4857 if (!stopped()) {
4858 newcopy = new_array(klass_node, length, 0); // no arguments to push
4859
4860 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true, true,
4861 load_object_klass(original), klass_node);
4862 if (!is_copyOfRange) {
4863 ac->set_copyof(validated);
4864 } else {
4910
4911 //-----------------------generate_method_call----------------------------
4912 // Use generate_method_call to make a slow-call to the real
4913 // method if the fast path fails. An alternative would be to
4914 // use a stub like OptoRuntime::slow_arraycopy_Java.
4915 // This only works for expanding the current library call,
4916 // not another intrinsic. (E.g., don't use this for making an
4917 // arraycopy call inside of the copyOf intrinsic.)
4918 CallJavaNode*
4919 LibraryCallKit::generate_method_call(vmIntrinsicID method_id, bool is_virtual, bool is_static, bool res_not_null) {
4920 // When compiling the intrinsic method itself, do not use this technique.
4921 guarantee(callee() != C->method(), "cannot make slow-call to self");
4922
4923 ciMethod* method = callee();
4924 // ensure the JVMS we have will be correct for this call
4925 guarantee(method_id == method->intrinsic_id(), "must match");
4926
4927 const TypeFunc* tf = TypeFunc::make(method);
4928 if (res_not_null) {
4929 assert(tf->return_type() == T_OBJECT, "");
4930 const TypeTuple* range = tf->range_cc();
4931 const Type** fields = TypeTuple::fields(range->cnt());
4932 fields[TypeFunc::Parms] = range->field_at(TypeFunc::Parms)->filter_speculative(TypePtr::NOTNULL);
4933 const TypeTuple* new_range = TypeTuple::make(range->cnt(), fields);
4934 tf = TypeFunc::make(tf->domain_cc(), new_range);
4935 }
4936 CallJavaNode* slow_call;
4937 if (is_static) {
4938 assert(!is_virtual, "");
4939 slow_call = new CallStaticJavaNode(C, tf,
4940 SharedRuntime::get_resolve_static_call_stub(), method);
4941 } else if (is_virtual) {
4942 assert(!gvn().type(argument(0))->maybe_null(), "should not be null");
4943 int vtable_index = Method::invalid_vtable_index;
4944 if (UseInlineCaches) {
4945 // Suppress the vtable call
4946 } else {
4947 // hashCode and clone are not a miranda methods,
4948 // so the vtable index is fixed.
4949 // No need to use the linkResolver to get it.
4950 vtable_index = method->vtable_index();
4951 assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
4952 "bad index %d", vtable_index);
4953 }
4954 slow_call = new CallDynamicJavaNode(tf,
4971 set_edges_for_java_call(slow_call);
4972 return slow_call;
4973 }
4974
4975
4976 /**
4977 * Build special case code for calls to hashCode on an object. This call may
4978 * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
4979 * slightly different code.
4980 */
4981 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
4982 assert(is_static == callee()->is_static(), "correct intrinsic selection");
4983 assert(!(is_virtual && is_static), "either virtual, special, or static");
4984
4985 enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
4986
4987 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4988 PhiNode* result_val = new PhiNode(result_reg, TypeInt::INT);
4989 PhiNode* result_io = new PhiNode(result_reg, Type::ABIO);
4990 PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4991 Node* obj = argument(0);
4992
4993 // Don't intrinsify hashcode on inline types for now.
4994 // The "is locked" runtime check below also serves as inline type check and goes to the slow path.
4995 if (gvn().type(obj)->is_inlinetypeptr()) {
4996 return false;
4997 }
4998
4999 if (!is_static) {
5000 // Check for hashing null object
5001 obj = null_check_receiver();
5002 if (stopped()) return true; // unconditionally null
5003 result_reg->init_req(_null_path, top());
5004 result_val->init_req(_null_path, top());
5005 } else {
5006 // Do a null check, and return zero if null.
5007 // System.identityHashCode(null) == 0
5008 Node* null_ctl = top();
5009 obj = null_check_oop(obj, &null_ctl);
5010 result_reg->init_req(_null_path, null_ctl);
5011 result_val->init_req(_null_path, _gvn.intcon(0));
5012 }
5013
5014 // Unconditionally null? Then return right away.
5015 if (stopped()) {
5016 set_control( result_reg->in(_null_path));
5017 if (!stopped())
5018 set_result(result_val->in(_null_path));
5019 return true;
5020 }
5021
5022 // We only go to the fast case code if we pass a number of guards. The
5023 // paths which do not pass are accumulated in the slow_region.
5024 RegionNode* slow_region = new RegionNode(1);
5025 record_for_igvn(slow_region);
5026
5027 // If this is a virtual call, we generate a funny guard. We pull out
5028 // the vtable entry corresponding to hashCode() from the target object.
5029 // If the target method which we are calling happens to be the native
5030 // Object hashCode() method, we pass the guard. We do not need this
5031 // guard for non-virtual calls -- the caller is known to be the native
5032 // Object hashCode().
5033 if (is_virtual) {
5034 // After null check, get the object's klass.
5035 Node* obj_klass = load_object_klass(obj);
5036 generate_virtual_guard(obj_klass, slow_region);
5037 }
5038
5039 // Get the header out of the object, use LoadMarkNode when available
5040 Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
5041 // The control of the load must be null. Otherwise, the load can move before
5042 // the null check after castPP removal.
5043 Node* no_ctrl = nullptr;
5044 Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
5045
5046 if (!UseObjectMonitorTable) {
5047 // Test the header to see if it is safe to read w.r.t. locking.
5048 // This also serves as guard against inline types
5049 Node *lock_mask = _gvn.MakeConX(markWord::inline_type_mask_in_place);
5050 Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
5051 if (LockingMode == LM_LIGHTWEIGHT) {
5052 Node *monitor_val = _gvn.MakeConX(markWord::monitor_value);
5053 Node *chk_monitor = _gvn.transform(new CmpXNode(lmasked_header, monitor_val));
5054 Node *test_monitor = _gvn.transform(new BoolNode(chk_monitor, BoolTest::eq));
5055
5056 generate_slow_guard(test_monitor, slow_region);
5057 } else {
5058 Node *unlocked_val = _gvn.MakeConX(markWord::unlocked_value);
5059 Node *chk_unlocked = _gvn.transform(new CmpXNode(lmasked_header, unlocked_val));
5060 Node *test_not_unlocked = _gvn.transform(new BoolNode(chk_unlocked, BoolTest::ne));
5061
5062 generate_slow_guard(test_not_unlocked, slow_region);
5063 }
5064 }
5065
5066 // Get the hash value and check to see that it has been properly assigned.
5067 // We depend on hash_mask being at most 32 bits and avoid the use of
5068 // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
5069 // vm: see markWord.hpp.
5104 // this->control() comes from set_results_for_java_call
5105 result_reg->init_req(_slow_path, control());
5106 result_val->init_req(_slow_path, slow_result);
5107 result_io ->set_req(_slow_path, i_o());
5108 result_mem ->set_req(_slow_path, reset_memory());
5109 }
5110
5111 // Return the combined state.
5112 set_i_o( _gvn.transform(result_io) );
5113 set_all_memory( _gvn.transform(result_mem));
5114
5115 set_result(result_reg, result_val);
5116 return true;
5117 }
5118
5119 //---------------------------inline_native_getClass----------------------------
5120 // public final native Class<?> java.lang.Object.getClass();
5121 //
5122 // Build special case code for calls to getClass on an object.
5123 bool LibraryCallKit::inline_native_getClass() {
5124 Node* obj = argument(0);
5125 if (obj->is_InlineType()) {
5126 const Type* t = _gvn.type(obj);
5127 if (t->maybe_null()) {
5128 null_check(obj);
5129 }
5130 set_result(makecon(TypeInstPtr::make(t->inline_klass()->java_mirror())));
5131 return true;
5132 }
5133 obj = null_check_receiver();
5134 if (stopped()) return true;
5135 set_result(load_mirror_from_klass(load_object_klass(obj)));
5136 return true;
5137 }
5138
5139 //-----------------inline_native_Reflection_getCallerClass---------------------
5140 // public static native Class<?> sun.reflect.Reflection.getCallerClass();
5141 //
5142 // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
5143 //
5144 // NOTE: This code must perform the same logic as JVM_GetCallerClass
5145 // in that it must skip particular security frames and checks for
5146 // caller sensitive methods.
5147 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
5148 #ifndef PRODUCT
5149 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
5150 tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
5151 }
5152 #endif
5153
5465 dst_type = _gvn.type(dst_addr)->is_ptr(); // narrow out memory
5466
5467 flags |= RC_NARROW_MEM; // narrow in memory
5468 }
5469
5470 // Call it. Note that the length argument is not scaled.
5471 make_runtime_call(flags,
5472 OptoRuntime::make_setmemory_Type(),
5473 StubRoutines::unsafe_setmemory(),
5474 "unsafe_setmemory",
5475 dst_type,
5476 dst_addr, size XTOP, byte);
5477
5478 store_to_memory(control(), doing_unsafe_access_addr, intcon(0), doing_unsafe_access_bt, Compile::AliasIdxRaw, MemNode::unordered);
5479
5480 return true;
5481 }
5482
5483 #undef XTOP
5484
5485 //----------------------inline_unsafe_isFlatArray------------------------
5486 // public native boolean Unsafe.isFlatArray(Class<?> arrayClass);
5487 // This intrinsic exploits assumptions made by the native implementation
5488 // (arrayClass is neither null nor primitive) to avoid unnecessary null checks.
5489 bool LibraryCallKit::inline_unsafe_isFlatArray() {
5490 Node* cls = argument(1);
5491 Node* p = basic_plus_adr(cls, java_lang_Class::klass_offset());
5492 Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p,
5493 TypeRawPtr::BOTTOM, TypeInstKlassPtr::OBJECT));
5494 Node* result = flat_array_test(kls);
5495 set_result(result);
5496 return true;
5497 }
5498
5499 //------------------------clone_coping-----------------------------------
5500 // Helper function for inline_native_clone.
5501 void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array) {
5502 assert(obj_size != nullptr, "");
5503 Node* raw_obj = alloc_obj->in(1);
5504 assert(alloc_obj->is_CheckCastPP() && raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
5505
5506 AllocateNode* alloc = nullptr;
5507 if (ReduceBulkZeroing &&
5508 // If we are implementing an array clone without knowing its source type
5509 // (can happen when compiling the array-guarded branch of a reflective
5510 // Object.clone() invocation), initialize the array within the allocation.
5511 // This is needed because some GCs (e.g. ZGC) might fall back in this case
5512 // to a runtime clone call that assumes fully initialized source arrays.
5513 (!is_array || obj->get_ptr_type()->isa_aryptr() != nullptr)) {
5514 // We will be completely responsible for initializing this object -
5515 // mark Initialize node as complete.
5516 alloc = AllocateNode::Ideal_allocation(alloc_obj);
5517 // The object was just allocated - there should be no any stores!
5518 guarantee(alloc != nullptr && alloc->maybe_set_complete(&_gvn), "");
5549 // not cloneable or finalizer => slow path to out-of-line Object.clone
5550 //
5551 // The general case has two steps, allocation and copying.
5552 // Allocation has two cases, and uses GraphKit::new_instance or new_array.
5553 //
5554 // Copying also has two cases, oop arrays and everything else.
5555 // Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
5556 // Everything else uses the tight inline loop supplied by CopyArrayNode.
5557 //
5558 // These steps fold up nicely if and when the cloned object's klass
5559 // can be sharply typed as an object array, a type array, or an instance.
5560 //
5561 bool LibraryCallKit::inline_native_clone(bool is_virtual) {
5562 PhiNode* result_val;
5563
5564 // Set the reexecute bit for the interpreter to reexecute
5565 // the bytecode that invokes Object.clone if deoptimization happens.
5566 { PreserveReexecuteState preexecs(this);
5567 jvms()->set_should_reexecute(true);
5568
5569 Node* obj = argument(0);
5570 obj = null_check_receiver();
5571 if (stopped()) return true;
5572
5573 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
5574 if (obj_type->is_inlinetypeptr()) {
5575 // If the object to clone is an inline type, we can simply return it (i.e. a nop) since inline types have
5576 // no identity.
5577 set_result(obj);
5578 return true;
5579 }
5580
5581 // If we are going to clone an instance, we need its exact type to
5582 // know the number and types of fields to convert the clone to
5583 // loads/stores. Maybe a speculative type can help us.
5584 if (!obj_type->klass_is_exact() &&
5585 obj_type->speculative_type() != nullptr &&
5586 obj_type->speculative_type()->is_instance_klass() &&
5587 !obj_type->speculative_type()->is_inlinetype()) {
5588 ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass();
5589 if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem &&
5590 !spec_ik->has_injected_fields()) {
5591 if (!obj_type->isa_instptr() ||
5592 obj_type->is_instptr()->instance_klass()->has_subklass()) {
5593 obj = maybe_cast_profiled_obj(obj, obj_type->speculative_type(), false);
5594 }
5595 }
5596 }
5597
5598 // Conservatively insert a memory barrier on all memory slices.
5599 // Do not let writes into the original float below the clone.
5600 insert_mem_bar(Op_MemBarCPUOrder);
5601
5602 // paths into result_reg:
5603 enum {
5604 _slow_path = 1, // out-of-line call to clone method (virtual or not)
5605 _objArray_path, // plain array allocation, plus arrayof_oop_arraycopy
5606 _array_path, // plain array allocation, plus arrayof_long_arraycopy
5607 _instance_path, // plain instance allocation, plus arrayof_long_arraycopy
5608 PATH_LIMIT
5609 };
5610 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
5611 result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
5612 PhiNode* result_i_o = new PhiNode(result_reg, Type::ABIO);
5613 PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
5614 record_for_igvn(result_reg);
5615
5616 Node* obj_klass = load_object_klass(obj);
5617 // We only go to the fast case code if we pass a number of guards.
5618 // The paths which do not pass are accumulated in the slow_region.
5619 RegionNode* slow_region = new RegionNode(1);
5620 record_for_igvn(slow_region);
5621
5622 Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)nullptr);
5623 if (array_ctl != nullptr) {
5624 // It's an array.
5625 PreserveJVMState pjvms(this);
5626 set_control(array_ctl);
5627
5628 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
5629 const TypeAryPtr* ary_ptr = obj_type->isa_aryptr();
5630 if (UseFlatArray && bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Expansion) &&
5631 obj_type->can_be_inline_array() &&
5632 (ary_ptr == nullptr || (!ary_ptr->is_not_flat() && (!ary_ptr->is_flat() || ary_ptr->elem()->inline_klass()->contains_oops())))) {
5633 // Flat inline type array may have object field that would require a
5634 // write barrier. Conservatively, go to slow path.
5635 generate_fair_guard(flat_array_test(obj_klass), slow_region);
5636 }
5637
5638 if (!stopped()) {
5639 Node* obj_length = load_array_length(obj);
5640 Node* array_size = nullptr; // Size of the array without object alignment padding.
5641 Node* alloc_obj = new_array(obj_klass, obj_length, 0, &array_size, /*deoptimize_on_exception=*/true);
5642
5643 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
5644 if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Parsing)) {
5645 // If it is an oop array, it requires very special treatment,
5646 // because gc barriers are required when accessing the array.
5647 Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)nullptr);
5648 if (is_obja != nullptr) {
5649 PreserveJVMState pjvms2(this);
5650 set_control(is_obja);
5651 // Generate a direct call to the right arraycopy function(s).
5652 // Clones are always tightly coupled.
5653 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, true, false);
5654 ac->set_clone_oop_array();
5655 Node* n = _gvn.transform(ac);
5656 assert(n == ac, "cannot disappear");
5657 ac->connect_outputs(this, /*deoptimize_on_exception=*/true);
5658
5659 result_reg->init_req(_objArray_path, control());
5660 result_val->init_req(_objArray_path, alloc_obj);
5661 result_i_o ->set_req(_objArray_path, i_o());
5662 result_mem ->set_req(_objArray_path, reset_memory());
5663 }
5664 }
5665 // Otherwise, there are no barriers to worry about.
5666 // (We can dispense with card marks if we know the allocation
5667 // comes out of eden (TLAB)... In fact, ReduceInitialCardMarks
5668 // causes the non-eden paths to take compensating steps to
5669 // simulate a fresh allocation, so that no further
5670 // card marks are required in compiled code to initialize
5671 // the object.)
5672
5673 if (!stopped()) {
5674 copy_to_clone(obj, alloc_obj, array_size, true);
5675
5676 // Present the results of the copy.
5677 result_reg->init_req(_array_path, control());
5678 result_val->init_req(_array_path, alloc_obj);
5679 result_i_o ->set_req(_array_path, i_o());
5680 result_mem ->set_req(_array_path, reset_memory());
5681 }
5682 }
5683 }
5684
5685 if (!stopped()) {
5686 // It's an instance (we did array above). Make the slow-path tests.
5687 // If this is a virtual call, we generate a funny guard. We grab
5688 // the vtable entry corresponding to clone() from the target object.
5689 // If the target method which we are calling happens to be the
5690 // Object clone() method, we pass the guard. We do not need this
5691 // guard for non-virtual calls; the caller is known to be the native
5692 // Object clone().
5693 if (is_virtual) {
5694 generate_virtual_guard(obj_klass, slow_region);
5695 }
5696
5697 // The object must be easily cloneable and must not have a finalizer.
5698 // Both of these conditions may be checked in a single test.
5699 // We could optimize the test further, but we don't care.
5700 generate_misc_flags_guard(obj_klass,
5701 // Test both conditions:
5702 KlassFlags::_misc_is_cloneable_fast | KlassFlags::_misc_has_finalizer,
5703 // Must be cloneable but not finalizer:
5704 KlassFlags::_misc_is_cloneable_fast,
5796 set_jvms(sfpt->jvms());
5797 _reexecute_sp = jvms()->sp();
5798
5799 return saved_jvms;
5800 }
5801 }
5802 }
5803 return nullptr;
5804 }
5805
5806 // Clone the JVMState of the array allocation and create a new safepoint with it. Re-push the array length to the stack
5807 // such that uncommon traps can be emitted to re-execute the array allocation in the interpreter.
5808 SafePointNode* LibraryCallKit::create_safepoint_with_state_before_array_allocation(const AllocateArrayNode* alloc) const {
5809 JVMState* old_jvms = alloc->jvms()->clone_shallow(C);
5810 uint size = alloc->req();
5811 SafePointNode* sfpt = new SafePointNode(size, old_jvms);
5812 old_jvms->set_map(sfpt);
5813 for (uint i = 0; i < size; i++) {
5814 sfpt->init_req(i, alloc->in(i));
5815 }
5816 int adjustment = 1;
5817 const TypeAryKlassPtr* ary_klass_ptr = alloc->in(AllocateNode::KlassNode)->bottom_type()->is_aryklassptr();
5818 if (ary_klass_ptr->is_null_free()) {
5819 // A null-free, tightly coupled array allocation can only come from LibraryCallKit::inline_newNullRestrictedArray
5820 // which requires both the component type and the array length on stack for re-execution. Re-create and push
5821 // the component type.
5822 ciArrayKlass* klass = ary_klass_ptr->exact_klass()->as_array_klass();
5823 ciInstance* instance = klass->component_mirror_instance();
5824 const TypeInstPtr* t_instance = TypeInstPtr::make(instance);
5825 sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp(), makecon(t_instance));
5826 adjustment++;
5827 }
5828 // re-push array length for deoptimization
5829 sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp() + adjustment - 1, alloc->in(AllocateNode::ALength));
5830 old_jvms->set_sp(old_jvms->sp() + adjustment);
5831 old_jvms->set_monoff(old_jvms->monoff() + adjustment);
5832 old_jvms->set_scloff(old_jvms->scloff() + adjustment);
5833 old_jvms->set_endoff(old_jvms->endoff() + adjustment);
5834 old_jvms->set_should_reexecute(true);
5835
5836 sfpt->set_i_o(map()->i_o());
5837 sfpt->set_memory(map()->memory());
5838 sfpt->set_control(map()->control());
5839 return sfpt;
5840 }
5841
5842 // In case of a deoptimization, we restart execution at the
5843 // allocation, allocating a new array. We would leave an uninitialized
5844 // array in the heap that GCs wouldn't expect. Move the allocation
5845 // after the traps so we don't allocate the array if we
5846 // deoptimize. This is possible because tightly_coupled_allocation()
5847 // guarantees there's no observer of the allocated array at this point
5848 // and the control flow is simple enough.
5849 void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms_before_guards,
5850 int saved_reexecute_sp, uint new_idx) {
5851 if (saved_jvms_before_guards != nullptr && !stopped()) {
5852 replace_unrelated_uncommon_traps_with_alloc_state(alloc, saved_jvms_before_guards);
5853
5854 assert(alloc != nullptr, "only with a tightly coupled allocation");
5855 // restore JVM state to the state at the arraycopy
5856 saved_jvms_before_guards->map()->set_control(map()->control());
5857 assert(saved_jvms_before_guards->map()->memory() == map()->memory(), "memory state changed?");
5858 assert(saved_jvms_before_guards->map()->i_o() == map()->i_o(), "IO state changed?");
5859 // If we've improved the types of some nodes (null check) while
5860 // emitting the guards, propagate them to the current state
5861 map()->replaced_nodes().apply(saved_jvms_before_guards->map(), new_idx);
5862 set_jvms(saved_jvms_before_guards);
5863 _reexecute_sp = saved_reexecute_sp;
5864
5865 // Remove the allocation from above the guards
5866 CallProjections* callprojs = alloc->extract_projections(true);
5867 InitializeNode* init = alloc->initialization();
5868 Node* alloc_mem = alloc->in(TypeFunc::Memory);
5869 C->gvn_replace_by(callprojs->fallthrough_ioproj, alloc->in(TypeFunc::I_O));
5870 C->gvn_replace_by(init->proj_out(TypeFunc::Memory), alloc_mem);
5871
5872 // The CastIINode created in GraphKit::new_array (in AllocateArrayNode::make_ideal_length) must stay below
5873 // the allocation (i.e. is only valid if the allocation succeeds):
5874 // 1) replace CastIINode with AllocateArrayNode's length here
5875 // 2) Create CastIINode again once allocation has moved (see below) at the end of this method
5876 //
5877 // Multiple identical CastIINodes might exist here. Each GraphKit::load_array_length() call will generate
5878 // new separate CastIINode (arraycopy guard checks or any array length use between array allocation and ararycopy)
5879 Node* init_control = init->proj_out(TypeFunc::Control);
5880 Node* alloc_length = alloc->Ideal_length();
5881 #ifdef ASSERT
5882 Node* prev_cast = nullptr;
5883 #endif
5884 for (uint i = 0; i < init_control->outcnt(); i++) {
5885 Node* init_out = init_control->raw_out(i);
5886 if (init_out->is_CastII() && init_out->in(TypeFunc::Control) == init_control && init_out->in(1) == alloc_length) {
5887 #ifdef ASSERT
5888 if (prev_cast == nullptr) {
5889 prev_cast = init_out;
5891 if (prev_cast->cmp(*init_out) == false) {
5892 prev_cast->dump();
5893 init_out->dump();
5894 assert(false, "not equal CastIINode");
5895 }
5896 }
5897 #endif
5898 C->gvn_replace_by(init_out, alloc_length);
5899 }
5900 }
5901 C->gvn_replace_by(init->proj_out(TypeFunc::Control), alloc->in(0));
5902
5903 // move the allocation here (after the guards)
5904 _gvn.hash_delete(alloc);
5905 alloc->set_req(TypeFunc::Control, control());
5906 alloc->set_req(TypeFunc::I_O, i_o());
5907 Node *mem = reset_memory();
5908 set_all_memory(mem);
5909 alloc->set_req(TypeFunc::Memory, mem);
5910 set_control(init->proj_out_or_null(TypeFunc::Control));
5911 set_i_o(callprojs->fallthrough_ioproj);
5912
5913 // Update memory as done in GraphKit::set_output_for_allocation()
5914 const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
5915 const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
5916 if (ary_type->isa_aryptr() && length_type != nullptr) {
5917 ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
5918 }
5919 const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot);
5920 int elemidx = C->get_alias_index(telemref);
5921 set_memory(init->proj_out_or_null(TypeFunc::Memory), Compile::AliasIdxRaw);
5922 set_memory(init->proj_out_or_null(TypeFunc::Memory), elemidx);
5923
5924 Node* allocx = _gvn.transform(alloc);
5925 assert(allocx == alloc, "where has the allocation gone?");
5926 assert(dest->is_CheckCastPP(), "not an allocation result?");
5927
5928 _gvn.hash_delete(dest);
5929 dest->set_req(0, control());
5930 Node* destx = _gvn.transform(dest);
5931 assert(destx == dest, "where has the allocation result gone?");
6229 top_src = src_type->isa_aryptr();
6230 has_src = (top_src != nullptr && top_src->elem() != Type::BOTTOM);
6231 src_spec = true;
6232 }
6233 if (!has_dest) {
6234 dest = maybe_cast_profiled_obj(dest, dest_k, true);
6235 dest_type = _gvn.type(dest);
6236 top_dest = dest_type->isa_aryptr();
6237 has_dest = (top_dest != nullptr && top_dest->elem() != Type::BOTTOM);
6238 dest_spec = true;
6239 }
6240 }
6241 }
6242
6243 if (has_src && has_dest && can_emit_guards) {
6244 BasicType src_elem = top_src->isa_aryptr()->elem()->array_element_basic_type();
6245 BasicType dest_elem = top_dest->isa_aryptr()->elem()->array_element_basic_type();
6246 if (is_reference_type(src_elem, true)) src_elem = T_OBJECT;
6247 if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT;
6248
6249 if (src_elem == dest_elem && top_src->is_flat() == top_dest->is_flat() && src_elem == T_OBJECT) {
6250 // If both arrays are object arrays then having the exact types
6251 // for both will remove the need for a subtype check at runtime
6252 // before the call and may make it possible to pick a faster copy
6253 // routine (without a subtype check on every element)
6254 // Do we have the exact type of src?
6255 bool could_have_src = src_spec;
6256 // Do we have the exact type of dest?
6257 bool could_have_dest = dest_spec;
6258 ciKlass* src_k = nullptr;
6259 ciKlass* dest_k = nullptr;
6260 if (!src_spec) {
6261 src_k = src_type->speculative_type_not_null();
6262 if (src_k != nullptr && src_k->is_array_klass()) {
6263 could_have_src = true;
6264 }
6265 }
6266 if (!dest_spec) {
6267 dest_k = dest_type->speculative_type_not_null();
6268 if (dest_k != nullptr && dest_k->is_array_klass()) {
6269 could_have_dest = true;
6270 }
6271 }
6272 if (could_have_src && could_have_dest) {
6273 // If we can have both exact types, emit the missing guards
6274 if (could_have_src && !src_spec) {
6275 src = maybe_cast_profiled_obj(src, src_k, true);
6276 src_type = _gvn.type(src);
6277 top_src = src_type->isa_aryptr();
6278 }
6279 if (could_have_dest && !dest_spec) {
6280 dest = maybe_cast_profiled_obj(dest, dest_k, true);
6281 dest_type = _gvn.type(dest);
6282 top_dest = dest_type->isa_aryptr();
6283 }
6284 }
6285 }
6286 }
6287
6288 ciMethod* trap_method = method();
6289 int trap_bci = bci();
6290 if (saved_jvms_before_guards != nullptr) {
6291 trap_method = alloc->jvms()->method();
6292 trap_bci = alloc->jvms()->bci();
6293 }
6294
6295 bool negative_length_guard_generated = false;
6296
6297 if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
6298 can_emit_guards && !src->is_top() && !dest->is_top()) {
6299 // validate arguments: enables transformation the ArrayCopyNode
6300 validated = true;
6301
6302 RegionNode* slow_region = new RegionNode(1);
6303 record_for_igvn(slow_region);
6304
6305 // (1) src and dest are arrays.
6306 generate_non_array_guard(load_object_klass(src), slow_region);
6307 generate_non_array_guard(load_object_klass(dest), slow_region);
6308
6309 // (2) src and dest arrays must have elements of the same BasicType
6310 // done at macro expansion or at Ideal transformation time
6311
6312 // (4) src_offset must not be negative.
6313 generate_negative_guard(src_offset, slow_region);
6314
6315 // (5) dest_offset must not be negative.
6316 generate_negative_guard(dest_offset, slow_region);
6317
6318 // (7) src_offset + length must not exceed length of src.
6321 slow_region);
6322
6323 // (8) dest_offset + length must not exceed length of dest.
6324 generate_limit_guard(dest_offset, length,
6325 load_array_length(dest),
6326 slow_region);
6327
6328 // (6) length must not be negative.
6329 // This is also checked in generate_arraycopy() during macro expansion, but
6330 // we also have to check it here for the case where the ArrayCopyNode will
6331 // be eliminated by Escape Analysis.
6332 if (EliminateAllocations) {
6333 generate_negative_guard(length, slow_region);
6334 negative_length_guard_generated = true;
6335 }
6336
6337 // (9) each element of an oop array must be assignable
6338 Node* dest_klass = load_object_klass(dest);
6339 if (src != dest) {
6340 Node* not_subtype_ctrl = gen_subtype_check(src, dest_klass);
6341 slow_region->add_req(not_subtype_ctrl);
6342 }
6343
6344 const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr();
6345 const Type* toop = dest_klass_t->cast_to_exactness(false)->as_instance_type();
6346 src = _gvn.transform(new CheckCastPPNode(control(), src, toop));
6347 src_type = _gvn.type(src);
6348 top_src = src_type->isa_aryptr();
6349
6350 // Handle flat inline type arrays (null-free arrays are handled by the subtype check above)
6351 if (!stopped() && UseFlatArray) {
6352 // If dest is flat, src must be flat as well (guaranteed by src <: dest check). Handle flat src here.
6353 assert(top_dest == nullptr || !top_dest->is_flat() || top_src->is_flat(), "src array must be flat");
6354 if (top_src != nullptr && top_src->is_flat()) {
6355 // Src is flat, check that dest is flat as well
6356 if (top_dest != nullptr && !top_dest->is_flat()) {
6357 generate_fair_guard(flat_array_test(dest_klass, /* flat = */ false), slow_region);
6358 // Since dest is flat and src <: dest, dest must have the same type as src.
6359 top_dest = top_src->cast_to_exactness(false);
6360 assert(top_dest->is_flat(), "dest must be flat");
6361 dest = _gvn.transform(new CheckCastPPNode(control(), dest, top_dest));
6362 }
6363 } else if (top_src == nullptr || !top_src->is_not_flat()) {
6364 // Src might be flat and dest might not be flat. Go to the slow path if src is flat.
6365 // TODO 8251971: Optimize for the case when src/dest are later found to be both flat.
6366 assert(top_dest == nullptr || !top_dest->is_flat(), "dest array must not be flat");
6367 generate_fair_guard(flat_array_test(src), slow_region);
6368 if (top_src != nullptr) {
6369 top_src = top_src->cast_to_not_flat();
6370 src = _gvn.transform(new CheckCastPPNode(control(), src, top_src));
6371 }
6372 }
6373 }
6374
6375 {
6376 PreserveJVMState pjvms(this);
6377 set_control(_gvn.transform(slow_region));
6378 uncommon_trap(Deoptimization::Reason_intrinsic,
6379 Deoptimization::Action_make_not_entrant);
6380 assert(stopped(), "Should be stopped");
6381 }
6382 arraycopy_move_allocation_here(alloc, dest, saved_jvms_before_guards, saved_reexecute_sp, new_idx);
6383 }
6384
6385 if (stopped()) {
6386 return true;
6387 }
6388
6389 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != nullptr, negative_length_guard_generated,
6390 // Create LoadRange and LoadKlass nodes for use during macro expansion here
6391 // so the compiler has a chance to eliminate them: during macro expansion,
6392 // we have to set their control (CastPP nodes are eliminated).
6393 load_object_klass(src), load_object_klass(dest),
6394 load_array_length(src), load_array_length(dest));
6395
6396 ac->set_arraycopy(validated);
6397
6398 Node* n = _gvn.transform(ac);
6399 if (n == ac) {
6400 ac->connect_outputs(this);
6401 } else {
|