6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/macroAssembler.hpp"
26 #include "ci/ciUtilities.inline.hpp"
27 #include "ci/ciSymbols.hpp"
28 #include "classfile/vmIntrinsics.hpp"
29 #include "compiler/compileBroker.hpp"
30 #include "compiler/compileLog.hpp"
31 #include "gc/shared/barrierSet.hpp"
32 #include "jfr/support/jfrIntrinsics.hpp"
33 #include "memory/resourceArea.hpp"
34 #include "oops/klass.inline.hpp"
35 #include "oops/objArrayKlass.hpp"
36 #include "opto/addnode.hpp"
37 #include "opto/arraycopynode.hpp"
38 #include "opto/c2compiler.hpp"
39 #include "opto/castnode.hpp"
40 #include "opto/cfgnode.hpp"
41 #include "opto/convertnode.hpp"
42 #include "opto/countbitsnode.hpp"
43 #include "opto/idealKit.hpp"
44 #include "opto/library_call.hpp"
45 #include "opto/mathexactnode.hpp"
298 case vmIntrinsics::_indexOfIL: return inline_string_indexOfI(StrIntrinsicNode::LL);
299 case vmIntrinsics::_indexOfIU: return inline_string_indexOfI(StrIntrinsicNode::UU);
300 case vmIntrinsics::_indexOfIUL: return inline_string_indexOfI(StrIntrinsicNode::UL);
301 case vmIntrinsics::_indexOfU_char: return inline_string_indexOfChar(StrIntrinsicNode::U);
302 case vmIntrinsics::_indexOfL_char: return inline_string_indexOfChar(StrIntrinsicNode::L);
303
304 case vmIntrinsics::_equalsL: return inline_string_equals(StrIntrinsicNode::LL);
305
306 case vmIntrinsics::_vectorizedHashCode: return inline_vectorizedHashCode();
307
308 case vmIntrinsics::_toBytesStringU: return inline_string_toBytesU();
309 case vmIntrinsics::_getCharsStringU: return inline_string_getCharsU();
310 case vmIntrinsics::_getCharStringU: return inline_string_char_access(!is_store);
311 case vmIntrinsics::_putCharStringU: return inline_string_char_access( is_store);
312
313 case vmIntrinsics::_compressStringC:
314 case vmIntrinsics::_compressStringB: return inline_string_copy( is_compress);
315 case vmIntrinsics::_inflateStringC:
316 case vmIntrinsics::_inflateStringB: return inline_string_copy(!is_compress);
317
318 case vmIntrinsics::_getReference: return inline_unsafe_access(!is_store, T_OBJECT, Relaxed, false);
319 case vmIntrinsics::_getBoolean: return inline_unsafe_access(!is_store, T_BOOLEAN, Relaxed, false);
320 case vmIntrinsics::_getByte: return inline_unsafe_access(!is_store, T_BYTE, Relaxed, false);
321 case vmIntrinsics::_getShort: return inline_unsafe_access(!is_store, T_SHORT, Relaxed, false);
322 case vmIntrinsics::_getChar: return inline_unsafe_access(!is_store, T_CHAR, Relaxed, false);
323 case vmIntrinsics::_getInt: return inline_unsafe_access(!is_store, T_INT, Relaxed, false);
324 case vmIntrinsics::_getLong: return inline_unsafe_access(!is_store, T_LONG, Relaxed, false);
325 case vmIntrinsics::_getFloat: return inline_unsafe_access(!is_store, T_FLOAT, Relaxed, false);
326 case vmIntrinsics::_getDouble: return inline_unsafe_access(!is_store, T_DOUBLE, Relaxed, false);
327
328 case vmIntrinsics::_putReference: return inline_unsafe_access( is_store, T_OBJECT, Relaxed, false);
329 case vmIntrinsics::_putBoolean: return inline_unsafe_access( is_store, T_BOOLEAN, Relaxed, false);
330 case vmIntrinsics::_putByte: return inline_unsafe_access( is_store, T_BYTE, Relaxed, false);
331 case vmIntrinsics::_putShort: return inline_unsafe_access( is_store, T_SHORT, Relaxed, false);
332 case vmIntrinsics::_putChar: return inline_unsafe_access( is_store, T_CHAR, Relaxed, false);
333 case vmIntrinsics::_putInt: return inline_unsafe_access( is_store, T_INT, Relaxed, false);
334 case vmIntrinsics::_putLong: return inline_unsafe_access( is_store, T_LONG, Relaxed, false);
335 case vmIntrinsics::_putFloat: return inline_unsafe_access( is_store, T_FLOAT, Relaxed, false);
336 case vmIntrinsics::_putDouble: return inline_unsafe_access( is_store, T_DOUBLE, Relaxed, false);
337
338 case vmIntrinsics::_getReferenceVolatile: return inline_unsafe_access(!is_store, T_OBJECT, Volatile, false);
339 case vmIntrinsics::_getBooleanVolatile: return inline_unsafe_access(!is_store, T_BOOLEAN, Volatile, false);
340 case vmIntrinsics::_getByteVolatile: return inline_unsafe_access(!is_store, T_BYTE, Volatile, false);
341 case vmIntrinsics::_getShortVolatile: return inline_unsafe_access(!is_store, T_SHORT, Volatile, false);
342 case vmIntrinsics::_getCharVolatile: return inline_unsafe_access(!is_store, T_CHAR, Volatile, false);
343 case vmIntrinsics::_getIntVolatile: return inline_unsafe_access(!is_store, T_INT, Volatile, false);
344 case vmIntrinsics::_getLongVolatile: return inline_unsafe_access(!is_store, T_LONG, Volatile, false);
345 case vmIntrinsics::_getFloatVolatile: return inline_unsafe_access(!is_store, T_FLOAT, Volatile, false);
346 case vmIntrinsics::_getDoubleVolatile: return inline_unsafe_access(!is_store, T_DOUBLE, Volatile, false);
347
348 case vmIntrinsics::_putReferenceVolatile: return inline_unsafe_access( is_store, T_OBJECT, Volatile, false);
349 case vmIntrinsics::_putBooleanVolatile: return inline_unsafe_access( is_store, T_BOOLEAN, Volatile, false);
350 case vmIntrinsics::_putByteVolatile: return inline_unsafe_access( is_store, T_BYTE, Volatile, false);
351 case vmIntrinsics::_putShortVolatile: return inline_unsafe_access( is_store, T_SHORT, Volatile, false);
352 case vmIntrinsics::_putCharVolatile: return inline_unsafe_access( is_store, T_CHAR, Volatile, false);
353 case vmIntrinsics::_putIntVolatile: return inline_unsafe_access( is_store, T_INT, Volatile, false);
354 case vmIntrinsics::_putLongVolatile: return inline_unsafe_access( is_store, T_LONG, Volatile, false);
355 case vmIntrinsics::_putFloatVolatile: return inline_unsafe_access( is_store, T_FLOAT, Volatile, false);
356 case vmIntrinsics::_putDoubleVolatile: return inline_unsafe_access( is_store, T_DOUBLE, Volatile, false);
483 "notifyJvmtiEnd", false, true);
484 case vmIntrinsics::_notifyJvmtiVThreadMount: return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_mount()),
485 "notifyJvmtiMount", false, false);
486 case vmIntrinsics::_notifyJvmtiVThreadUnmount: return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_unmount()),
487 "notifyJvmtiUnmount", false, false);
488 case vmIntrinsics::_notifyJvmtiVThreadDisableSuspend: return inline_native_notify_jvmti_sync();
489 #endif
490
491 #ifdef JFR_HAVE_INTRINSICS
492 case vmIntrinsics::_counterTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, JfrTime::time_function()), "counterTime");
493 case vmIntrinsics::_getEventWriter: return inline_native_getEventWriter();
494 case vmIntrinsics::_jvm_commit: return inline_native_jvm_commit();
495 #endif
496 case vmIntrinsics::_currentTimeMillis: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
497 case vmIntrinsics::_nanoTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
498 case vmIntrinsics::_writeback0: return inline_unsafe_writeback0();
499 case vmIntrinsics::_writebackPreSync0: return inline_unsafe_writebackSync0(true);
500 case vmIntrinsics::_writebackPostSync0: return inline_unsafe_writebackSync0(false);
501 case vmIntrinsics::_allocateInstance: return inline_unsafe_allocate();
502 case vmIntrinsics::_copyMemory: return inline_unsafe_copyMemory();
503 case vmIntrinsics::_setMemory: return inline_unsafe_setMemory();
504 case vmIntrinsics::_getLength: return inline_native_getLength();
505 case vmIntrinsics::_copyOf: return inline_array_copyOf(false);
506 case vmIntrinsics::_copyOfRange: return inline_array_copyOf(true);
507 case vmIntrinsics::_equalsB: return inline_array_equals(StrIntrinsicNode::LL);
508 case vmIntrinsics::_equalsC: return inline_array_equals(StrIntrinsicNode::UU);
509 case vmIntrinsics::_Preconditions_checkIndex: return inline_preconditions_checkIndex(T_INT);
510 case vmIntrinsics::_Preconditions_checkLongIndex: return inline_preconditions_checkIndex(T_LONG);
511 case vmIntrinsics::_clone: return inline_native_clone(intrinsic()->is_virtual());
512
513 case vmIntrinsics::_allocateUninitializedArray: return inline_unsafe_newArray(true);
514 case vmIntrinsics::_newArray: return inline_unsafe_newArray(false);
515
516 case vmIntrinsics::_isAssignableFrom: return inline_native_subtype_check();
517
518 case vmIntrinsics::_isInstance:
519 case vmIntrinsics::_isHidden:
520 case vmIntrinsics::_getSuperclass:
521 case vmIntrinsics::_getClassAccessFlags: return inline_native_Class_query(intrinsic_id());
522
523 case vmIntrinsics::_floatToRawIntBits:
524 case vmIntrinsics::_floatToIntBits:
525 case vmIntrinsics::_intBitsToFloat:
526 case vmIntrinsics::_doubleToRawLongBits:
527 case vmIntrinsics::_doubleToLongBits:
528 case vmIntrinsics::_longBitsToDouble:
529 case vmIntrinsics::_floatToFloat16:
530 case vmIntrinsics::_float16ToFloat: return inline_fp_conversions(intrinsic_id());
531 case vmIntrinsics::_sqrt_float16: return inline_fp16_operations(intrinsic_id(), 1);
532 case vmIntrinsics::_fma_float16: return inline_fp16_operations(intrinsic_id(), 3);
533 case vmIntrinsics::_floatIsFinite:
534 case vmIntrinsics::_floatIsInfinite:
2266 case vmIntrinsics::_remainderUnsigned_l: {
2267 zero_check_long(argument(2));
2268 // Compile-time detect of null-exception
2269 if (stopped()) {
2270 return true; // keep the graph constructed so far
2271 }
2272 n = new UModLNode(control(), argument(0), argument(2));
2273 break;
2274 }
2275 default: fatal_unexpected_iid(id); break;
2276 }
2277 set_result(_gvn.transform(n));
2278 return true;
2279 }
2280
2281 //----------------------------inline_unsafe_access----------------------------
2282
2283 const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type) {
2284 // Attempt to infer a sharper value type from the offset and base type.
2285 ciKlass* sharpened_klass = nullptr;
2286
2287 // See if it is an instance field, with an object type.
2288 if (alias_type->field() != nullptr) {
2289 if (alias_type->field()->type()->is_klass()) {
2290 sharpened_klass = alias_type->field()->type()->as_klass();
2291 }
2292 }
2293
2294 const TypeOopPtr* result = nullptr;
2295 // See if it is a narrow oop array.
2296 if (adr_type->isa_aryptr()) {
2297 if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
2298 const TypeOopPtr* elem_type = adr_type->is_aryptr()->elem()->make_oopptr();
2299 if (elem_type != nullptr && elem_type->is_loaded()) {
2300 // Sharpen the value type.
2301 result = elem_type;
2302 }
2303 }
2304 }
2305
2306 // The sharpened class might be unloaded if there is no class loader
2307 // contraint in place.
2308 if (result == nullptr && sharpened_klass != nullptr && sharpened_klass->is_loaded()) {
2309 // Sharpen the value type.
2310 result = TypeOopPtr::make_from_klass(sharpened_klass);
2311 }
2312 if (result != nullptr) {
2313 #ifndef PRODUCT
2314 if (C->print_intrinsics() || C->print_inlining()) {
2315 tty->print(" from base type: "); adr_type->dump(); tty->cr();
2316 tty->print(" sharpened value: "); result->dump(); tty->cr();
2317 }
2318 #endif
2319 }
2320 return result;
2321 }
2322
2323 DecoratorSet LibraryCallKit::mo_decorator_for_access_kind(AccessKind kind) {
2324 switch (kind) {
2325 case Relaxed:
2326 return MO_UNORDERED;
2327 case Opaque:
2328 return MO_RELAXED;
2329 case Acquire:
2330 return MO_ACQUIRE;
2331 case Release:
2332 return MO_RELEASE;
2333 case Volatile:
2334 return MO_SEQ_CST;
2335 default:
2336 ShouldNotReachHere();
2337 return 0;
2338 }
2339 }
2340
2341 bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, const AccessKind kind, const bool unaligned) {
2342 if (callee()->is_static()) return false; // caller must have the capability!
2343 DecoratorSet decorators = C2_UNSAFE_ACCESS;
2344 guarantee(!is_store || kind != Acquire, "Acquire accesses can be produced only for loads");
2345 guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
2346 assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
2347
2348 if (is_reference_type(type)) {
2349 decorators |= ON_UNKNOWN_OOP_REF;
2350 }
2351
2352 if (unaligned) {
2353 decorators |= C2_UNALIGNED;
2354 }
2355
2356 #ifndef PRODUCT
2357 {
2358 ResourceMark rm;
2359 // Check the signatures.
2360 ciSignature* sig = callee()->signature();
2361 #ifdef ASSERT
2362 if (!is_store) {
2363 // Object getReference(Object base, int/long offset), etc.
2364 BasicType rtype = sig->return_type()->basic_type();
2365 assert(rtype == type, "getter must return the expected value");
2366 assert(sig->count() == 2, "oop getter has 2 arguments");
2367 assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2368 assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2369 } else {
2370 // void putReference(Object base, int/long offset, Object x), etc.
2371 assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
2372 assert(sig->count() == 3, "oop putter has 3 arguments");
2373 assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
2374 assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
2375 BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
2376 assert(vtype == type, "putter must accept the expected value");
2377 }
2378 #endif // ASSERT
2379 }
2380 #endif //PRODUCT
2381
2382 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
2383
2384 Node* receiver = argument(0); // type: oop
2385
2386 // Build address expression.
2387 Node* heap_base_oop = top();
2388
2389 // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2390 Node* base = argument(1); // type: oop
2391 // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2392 Node* offset = argument(2); // type: long
2393 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2394 // to be plain byte offsets, which are also the same as those accepted
2395 // by oopDesc::field_addr.
2396 assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2397 "fieldOffset must be byte-scaled");
2398 // 32-bit machines ignore the high half!
2399 offset = ConvL2X(offset);
2400
2401 // Save state and restore on bailout
2402 uint old_sp = sp();
2403 SafePointNode* old_map = clone_map();
2404
2405 Node* adr = make_unsafe_address(base, offset, type, kind == Relaxed);
2406 assert(!stopped(), "Inlining of unsafe access failed: address construction stopped unexpectedly");
2407
2408 if (_gvn.type(base->uncast())->isa_ptr() == TypePtr::NULL_PTR) {
2409 if (type != T_OBJECT) {
2410 decorators |= IN_NATIVE; // off-heap primitive access
2411 } else {
2412 set_map(old_map);
2413 set_sp(old_sp);
2414 return false; // off-heap oop accesses are not supported
2415 }
2416 } else {
2417 heap_base_oop = base; // on-heap or mixed access
2418 }
2419
2420 // Can base be null? Otherwise, always on-heap access.
2421 bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(base));
2422
2423 if (!can_access_non_heap) {
2424 decorators |= IN_HEAP;
2425 }
2426
2427 Node* val = is_store ? argument(4) : nullptr;
2428
2429 const TypePtr* adr_type = _gvn.type(adr)->isa_ptr();
2430 if (adr_type == TypePtr::NULL_PTR) {
2431 set_map(old_map);
2432 set_sp(old_sp);
2433 return false; // off-heap access with zero address
2434 }
2435
2436 // Try to categorize the address.
2437 Compile::AliasType* alias_type = C->alias_type(adr_type);
2438 assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2439
2440 if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2441 alias_type->adr_type() == TypeAryPtr::RANGE) {
2442 set_map(old_map);
2443 set_sp(old_sp);
2444 return false; // not supported
2445 }
2446
2447 bool mismatched = false;
2448 BasicType bt = alias_type->basic_type();
2449 if (bt != T_ILLEGAL) {
2450 assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
2451 if (bt == T_BYTE && adr_type->isa_aryptr()) {
2452 // Alias type doesn't differentiate between byte[] and boolean[]).
2453 // Use address type to get the element type.
2454 bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2455 }
2456 if (is_reference_type(bt, true)) {
2457 // accessing an array field with getReference is not a mismatch
2458 bt = T_OBJECT;
2459 }
2460 if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2461 // Don't intrinsify mismatched object accesses
2462 set_map(old_map);
2463 set_sp(old_sp);
2464 return false;
2465 }
2466 mismatched = (bt != type);
2467 } else if (alias_type->adr_type()->isa_oopptr()) {
2468 mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
2469 }
2470
2471 destruct_map_clone(old_map);
2472 assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2473
2474 if (mismatched) {
2475 decorators |= C2_MISMATCHED;
2476 }
2477
2478 // First guess at the value type.
2479 const Type *value_type = Type::get_const_basic_type(type);
2480
2481 // Figure out the memory ordering.
2482 decorators |= mo_decorator_for_access_kind(kind);
2483
2484 if (!is_store && type == T_OBJECT) {
2485 const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2486 if (tjp != nullptr) {
2487 value_type = tjp;
2488 }
2489 }
2490
2491 receiver = null_check(receiver);
2492 if (stopped()) {
2493 return true;
2494 }
2495 // Heap pointers get a null-check from the interpreter,
2496 // as a courtesy. However, this is not guaranteed by Unsafe,
2497 // and it is not possible to fully distinguish unintended nulls
2498 // from intended ones in this API.
2499
2500 if (!is_store) {
2501 Node* p = nullptr;
2502 // Try to constant fold a load from a constant field
2503 ciField* field = alias_type->field();
2504 if (heap_base_oop != top() && field != nullptr && field->is_constant() && !mismatched) {
2505 // final or stable field
2506 p = make_constant_from_field(field, heap_base_oop);
2507 }
2508
2509 if (p == nullptr) { // Could not constant fold the load
2510 p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);
2511 // Normalize the value returned by getBoolean in the following cases
2512 if (type == T_BOOLEAN &&
2513 (mismatched ||
2514 heap_base_oop == top() || // - heap_base_oop is null or
2515 (can_access_non_heap && field == nullptr)) // - heap_base_oop is potentially null
2516 // and the unsafe access is made to large offset
2517 // (i.e., larger than the maximum offset necessary for any
2518 // field access)
2519 ) {
2520 IdealKit ideal = IdealKit(this);
2521 #define __ ideal.
2522 IdealVariable normalized_result(ideal);
2523 __ declarations_done();
2524 __ set(normalized_result, p);
2525 __ if_then(p, BoolTest::ne, ideal.ConI(0));
2526 __ set(normalized_result, ideal.ConI(1));
2527 ideal.end_if();
2528 final_sync(ideal);
2529 p = __ value(normalized_result);
2530 #undef __
2531 }
2532 }
2533 if (type == T_ADDRESS) {
2534 p = gvn().transform(new CastP2XNode(nullptr, p));
2535 p = ConvX2UL(p);
2536 }
2537 // The load node has the control of the preceding MemBarCPUOrder. All
2538 // following nodes will have the control of the MemBarCPUOrder inserted at
2539 // the end of this method. So, pushing the load onto the stack at a later
2540 // point is fine.
2541 set_result(p);
2542 } else {
2543 if (bt == T_ADDRESS) {
2544 // Repackage the long as a pointer.
2545 val = ConvL2X(val);
2546 val = gvn().transform(new CastX2PNode(val));
2547 }
2548 access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators);
2549 }
2550
2551 return true;
2552 }
2553
2554 //----------------------------inline_unsafe_load_store----------------------------
2555 // This method serves a couple of different customers (depending on LoadStoreKind):
2556 //
2557 // LS_cmp_swap:
2558 //
2559 // boolean compareAndSetReference(Object o, long offset, Object expected, Object x);
2560 // boolean compareAndSetInt( Object o, long offset, int expected, int x);
2561 // boolean compareAndSetLong( Object o, long offset, long expected, long x);
2562 //
2563 // LS_cmp_swap_weak:
2564 //
2565 // boolean weakCompareAndSetReference( Object o, long offset, Object expected, Object x);
2566 // boolean weakCompareAndSetReferencePlain( Object o, long offset, Object expected, Object x);
2567 // boolean weakCompareAndSetReferenceAcquire(Object o, long offset, Object expected, Object x);
2568 // boolean weakCompareAndSetReferenceRelease(Object o, long offset, Object expected, Object x);
2569 //
2570 // boolean weakCompareAndSetInt( Object o, long offset, int expected, int x);
2739 }
2740 case LS_cmp_swap:
2741 case LS_cmp_swap_weak:
2742 case LS_get_add:
2743 break;
2744 default:
2745 ShouldNotReachHere();
2746 }
2747
2748 // Null check receiver.
2749 receiver = null_check(receiver);
2750 if (stopped()) {
2751 return true;
2752 }
2753
2754 int alias_idx = C->get_alias_index(adr_type);
2755
2756 if (is_reference_type(type)) {
2757 decorators |= IN_HEAP | ON_UNKNOWN_OOP_REF;
2758
2759 // Transformation of a value which could be null pointer (CastPP #null)
2760 // could be delayed during Parse (for example, in adjust_map_after_if()).
2761 // Execute transformation here to avoid barrier generation in such case.
2762 if (_gvn.type(newval) == TypePtr::NULL_PTR)
2763 newval = _gvn.makecon(TypePtr::NULL_PTR);
2764
2765 if (oldval != nullptr && _gvn.type(oldval) == TypePtr::NULL_PTR) {
2766 // Refine the value to a null constant, when it is known to be null
2767 oldval = _gvn.makecon(TypePtr::NULL_PTR);
2768 }
2769 }
2770
2771 Node* result = nullptr;
2772 switch (kind) {
2773 case LS_cmp_exchange: {
2774 result = access_atomic_cmpxchg_val_at(base, adr, adr_type, alias_idx,
2775 oldval, newval, value_type, type, decorators);
2776 break;
2777 }
2778 case LS_cmp_swap_weak:
2925 Deoptimization::Action_make_not_entrant);
2926 }
2927 if (stopped()) {
2928 return true;
2929 }
2930 #endif //INCLUDE_JVMTI
2931
2932 Node* test = nullptr;
2933 if (LibraryCallKit::klass_needs_init_guard(kls)) {
2934 // Note: The argument might still be an illegal value like
2935 // Serializable.class or Object[].class. The runtime will handle it.
2936 // But we must make an explicit check for initialization.
2937 Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset()));
2938 // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
2939 // can generate code to load it as unsigned byte.
2940 Node* inst = make_load(nullptr, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::acquire);
2941 Node* bits = intcon(InstanceKlass::fully_initialized);
2942 test = _gvn.transform(new SubINode(inst, bits));
2943 // The 'test' is non-zero if we need to take a slow path.
2944 }
2945
2946 Node* obj = new_instance(kls, test);
2947 set_result(obj);
2948 return true;
2949 }
2950
2951 //------------------------inline_native_time_funcs--------------
2952 // inline code for System.currentTimeMillis() and System.nanoTime()
2953 // these have the same type and signature
2954 bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
2955 const TypeFunc* tf = OptoRuntime::void_long_Type();
2956 const TypePtr* no_memory_effects = nullptr;
2957 Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
2958 Node* value = _gvn.transform(new ProjNode(time, TypeFunc::Parms+0));
2959 #ifdef ASSERT
2960 Node* value_top = _gvn.transform(new ProjNode(time, TypeFunc::Parms+1));
2961 assert(value_top == top(), "second value must be top");
2962 #endif
2963 set_result(value);
2964 return true;
2965 }
2966
3707 Node* thread = _gvn.transform(new ThreadLocalNode());
3708 Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::vthread_offset()));
3709 Node* thread_obj_handle
3710 = make_load(nullptr, p, p->bottom_type()->is_ptr(), T_OBJECT, MemNode::unordered);
3711 thread_obj_handle = _gvn.transform(thread_obj_handle);
3712 const TypePtr *adr_type = _gvn.type(thread_obj_handle)->isa_ptr();
3713 access_store_at(nullptr, thread_obj_handle, adr_type, arr, _gvn.type(arr), T_OBJECT, IN_NATIVE | MO_UNORDERED);
3714
3715 // Change the _monitor_owner_id of the JavaThread
3716 Node* tid = load_field_from_object(arr, "tid", "J");
3717 Node* monitor_owner_id_offset = basic_plus_adr(thread, in_bytes(JavaThread::monitor_owner_id_offset()));
3718 store_to_memory(control(), monitor_owner_id_offset, tid, T_LONG, MemNode::unordered, true);
3719
3720 JFR_ONLY(extend_setCurrentThread(thread, arr);)
3721 return true;
3722 }
3723
3724 const Type* LibraryCallKit::scopedValueCache_type() {
3725 ciKlass* objects_klass = ciObjArrayKlass::make(env()->Object_klass());
3726 const TypeOopPtr* etype = TypeOopPtr::make_from_klass(env()->Object_klass());
3727 const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS);
3728
3729 // Because we create the scopedValue cache lazily we have to make the
3730 // type of the result BotPTR.
3731 bool xk = etype->klass_is_exact();
3732 const Type* objects_type = TypeAryPtr::make(TypePtr::BotPTR, arr0, objects_klass, xk, 0);
3733 return objects_type;
3734 }
3735
3736 Node* LibraryCallKit::scopedValueCache_helper() {
3737 Node* thread = _gvn.transform(new ThreadLocalNode());
3738 Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::scopedValueCache_offset()));
3739 // We cannot use immutable_memory() because we might flip onto a
3740 // different carrier thread, at which point we'll need to use that
3741 // carrier thread's cache.
3742 // return _gvn.transform(LoadNode::make(_gvn, nullptr, immutable_memory(), p, p->bottom_type()->is_ptr(),
3743 // TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered));
3744 return make_load(nullptr, p, p->bottom_type()->is_ptr(), T_ADDRESS, MemNode::unordered);
3745 }
3746
3747 //------------------------inline_native_scopedValueCache------------------
3748 bool LibraryCallKit::inline_native_scopedValueCache() {
3749 Node* cache_obj_handle = scopedValueCache_helper();
3750 const Type* objects_type = scopedValueCache_type();
3751 set_result(access_load(cache_obj_handle, objects_type, T_OBJECT, IN_NATIVE));
3752
3836 store_to_memory(control(), pin_count_offset, next_pin_count, T_INT, MemNode::unordered);
3837
3838 // Result of top level CFG and Memory.
3839 RegionNode* result_rgn = new RegionNode(PATH_LIMIT);
3840 record_for_igvn(result_rgn);
3841 PhiNode* result_mem = new PhiNode(result_rgn, Type::MEMORY, TypePtr::BOTTOM);
3842 record_for_igvn(result_mem);
3843
3844 result_rgn->init_req(_true_path, _gvn.transform(valid_pin_count));
3845 result_rgn->init_req(_false_path, _gvn.transform(continuation_is_null));
3846 result_mem->init_req(_true_path, _gvn.transform(reset_memory()));
3847 result_mem->init_req(_false_path, _gvn.transform(input_memory_state));
3848
3849 // Set output state.
3850 set_control(_gvn.transform(result_rgn));
3851 set_all_memory(_gvn.transform(result_mem));
3852
3853 return true;
3854 }
3855
3856 //---------------------------load_mirror_from_klass----------------------------
3857 // Given a klass oop, load its java mirror (a java.lang.Class oop).
3858 Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
3859 Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
3860 Node* load = make_load(nullptr, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
3861 // mirror = ((OopHandle)mirror)->resolve();
3862 return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE);
3863 }
3864
3865 //-----------------------load_klass_from_mirror_common-------------------------
3866 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
3867 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
3868 // and branch to the given path on the region.
3869 // If never_see_null, take an uncommon trap on null, so we can optimistically
3870 // compile for the non-null case.
3871 // If the region is null, force never_see_null = true.
3872 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
3873 bool never_see_null,
3874 RegionNode* region,
3875 int null_path,
3876 int offset) {
3877 if (region == nullptr) never_see_null = true;
3878 Node* p = basic_plus_adr(mirror, offset);
3879 const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
3880 Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
3881 Node* null_ctl = top();
3882 kls = null_check_oop(kls, &null_ctl, never_see_null);
3883 if (region != nullptr) {
3884 // Set region->in(null_path) if the mirror is a primitive (e.g, int.class).
3888 }
3889 return kls;
3890 }
3891
3892 //--------------------(inline_native_Class_query helpers)---------------------
3893 // Use this for JVM_ACC_INTERFACE.
3894 // Fall through if (mods & mask) == bits, take the guard otherwise.
3895 Node* LibraryCallKit::generate_klass_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region,
3896 ByteSize offset, const Type* type, BasicType bt) {
3897 // Branch around if the given klass has the given modifier bit set.
3898 // Like generate_guard, adds a new path onto the region.
3899 Node* modp = basic_plus_adr(kls, in_bytes(offset));
3900 Node* mods = make_load(nullptr, modp, type, bt, MemNode::unordered);
3901 Node* mask = intcon(modifier_mask);
3902 Node* bits = intcon(modifier_bits);
3903 Node* mbit = _gvn.transform(new AndINode(mods, mask));
3904 Node* cmp = _gvn.transform(new CmpINode(mbit, bits));
3905 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
3906 return generate_fair_guard(bol, region);
3907 }
3908 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
3909 return generate_klass_flags_guard(kls, JVM_ACC_INTERFACE, 0, region,
3910 Klass::access_flags_offset(), TypeInt::CHAR, T_CHAR);
3911 }
3912
3913 // Use this for testing if Klass is_hidden, has_finalizer, and is_cloneable_fast.
3914 Node* LibraryCallKit::generate_misc_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
3915 return generate_klass_flags_guard(kls, modifier_mask, modifier_bits, region,
3916 Klass::misc_flags_offset(), TypeInt::UBYTE, T_BOOLEAN);
3917 }
3918
3919 Node* LibraryCallKit::generate_hidden_class_guard(Node* kls, RegionNode* region) {
3920 return generate_misc_flags_guard(kls, KlassFlags::_misc_is_hidden_class, 0, region);
3921 }
3922
3923 //-------------------------inline_native_Class_query-------------------
3924 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
3925 const Type* return_type = TypeInt::BOOL;
3926 Node* prim_return_value = top(); // what happens if it's a primitive class?
3927 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
4046
4047 case vmIntrinsics::_getClassAccessFlags:
4048 p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
4049 query_value = make_load(nullptr, p, TypeInt::CHAR, T_CHAR, MemNode::unordered);
4050 break;
4051
4052 default:
4053 fatal_unexpected_iid(id);
4054 break;
4055 }
4056
4057 // Fall-through is the normal case of a query to a real class.
4058 phi->init_req(1, query_value);
4059 region->init_req(1, control());
4060
4061 C->set_has_split_ifs(true); // Has chance for split-if optimization
4062 set_result(region, phi);
4063 return true;
4064 }
4065
4066 //-------------------------inline_Class_cast-------------------
4067 bool LibraryCallKit::inline_Class_cast() {
4068 Node* mirror = argument(0); // Class
4069 Node* obj = argument(1);
4070 const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
4071 if (mirror_con == nullptr) {
4072 return false; // dead path (mirror->is_top()).
4073 }
4074 if (obj == nullptr || obj->is_top()) {
4075 return false; // dead path
4076 }
4077 const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr();
4078
4079 // First, see if Class.cast() can be folded statically.
4080 // java_mirror_type() returns non-null for compile-time Class constants.
4081 ciType* tm = mirror_con->java_mirror_type();
4082 if (tm != nullptr && tm->is_klass() &&
4083 tp != nullptr) {
4084 if (!tp->is_loaded()) {
4085 // Don't use intrinsic when class is not loaded.
4086 return false;
4087 } else {
4088 int static_res = C->static_subtype_check(TypeKlassPtr::make(tm->as_klass(), Type::trust_interfaces), tp->as_klass_type());
4089 if (static_res == Compile::SSC_always_true) {
4090 // isInstance() is true - fold the code.
4091 set_result(obj);
4092 return true;
4093 } else if (static_res == Compile::SSC_always_false) {
4094 // Don't use intrinsic, have to throw ClassCastException.
4095 // If the reference is null, the non-intrinsic bytecode will
4096 // be optimized appropriately.
4097 return false;
4098 }
4099 }
4100 }
4101
4102 // Bailout intrinsic and do normal inlining if exception path is frequent.
4103 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
4104 return false;
4105 }
4106
4107 // Generate dynamic checks.
4108 // Class.cast() is java implementation of _checkcast bytecode.
4109 // Do checkcast (Parse::do_checkcast()) optimizations here.
4110
4111 mirror = null_check(mirror);
4112 // If mirror is dead, only null-path is taken.
4113 if (stopped()) {
4114 return true;
4115 }
4116
4117 // Not-subtype or the mirror's klass ptr is null (in case it is a primitive).
4118 enum { _bad_type_path = 1, _prim_path = 2, PATH_LIMIT };
4119 RegionNode* region = new RegionNode(PATH_LIMIT);
4120 record_for_igvn(region);
4121
4122 // Now load the mirror's klass metaobject, and null-check it.
4123 // If kls is null, we have a primitive mirror and
4124 // nothing is an instance of a primitive type.
4125 Node* kls = load_klass_from_mirror(mirror, false, region, _prim_path);
4126
4127 Node* res = top();
4128 if (!stopped()) {
4129 Node* bad_type_ctrl = top();
4130 // Do checkcast optimizations.
4131 res = gen_checkcast(obj, kls, &bad_type_ctrl);
4132 region->init_req(_bad_type_path, bad_type_ctrl);
4133 }
4134 if (region->in(_prim_path) != top() ||
4135 region->in(_bad_type_path) != top()) {
4136 // Let Interpreter throw ClassCastException.
4137 PreserveJVMState pjvms(this);
4138 set_control(_gvn.transform(region));
4139 uncommon_trap(Deoptimization::Reason_intrinsic,
4140 Deoptimization::Action_maybe_recompile);
4141 }
4142 if (!stopped()) {
4143 set_result(res);
4144 }
4145 return true;
4146 }
4147
4148
4149 //--------------------------inline_native_subtype_check------------------------
4150 // This intrinsic takes the JNI calls out of the heart of
4151 // UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
4152 bool LibraryCallKit::inline_native_subtype_check() {
4153 // Pull both arguments off the stack.
4154 Node* args[2]; // two java.lang.Class mirrors: superc, subc
4155 args[0] = argument(0);
4156 args[1] = argument(1);
4157 Node* klasses[2]; // corresponding Klasses: superk, subk
4158 klasses[0] = klasses[1] = top();
4159
4160 enum {
4161 // A full decision tree on {superc is prim, subc is prim}:
4162 _prim_0_path = 1, // {P,N} => false
4163 // {P,P} & superc!=subc => false
4164 _prim_same_path, // {P,P} & superc==subc => true
4165 _prim_1_path, // {N,P} => false
4166 _ref_subtype_path, // {N,N} & subtype check wins => true
4167 _both_ref_path, // {N,N} & subtype check loses => false
4168 PATH_LIMIT
4169 };
4170
4171 RegionNode* region = new RegionNode(PATH_LIMIT);
4172 Node* phi = new PhiNode(region, TypeInt::BOOL);
4173 record_for_igvn(region);
4174
4175 const TypePtr* adr_type = TypeRawPtr::BOTTOM; // memory type of loads
4176 const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
4177 int class_klass_offset = java_lang_Class::klass_offset();
4178
4179 // First null-check both mirrors and load each mirror's klass metaobject.
4180 int which_arg;
4181 for (which_arg = 0; which_arg <= 1; which_arg++) {
4182 Node* arg = args[which_arg];
4183 arg = null_check(arg);
4184 if (stopped()) break;
4185 args[which_arg] = arg;
4186
4187 Node* p = basic_plus_adr(arg, class_klass_offset);
4188 Node* kls = LoadKlassNode::make(_gvn, immutable_memory(), p, adr_type, kls_type);
4189 klasses[which_arg] = _gvn.transform(kls);
4190 }
4191
4192 // Having loaded both klasses, test each for null.
4193 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
4194 for (which_arg = 0; which_arg <= 1; which_arg++) {
4195 Node* kls = klasses[which_arg];
4196 Node* null_ctl = top();
4197 kls = null_check_oop(kls, &null_ctl, never_see_null);
4198 int prim_path = (which_arg == 0 ? _prim_0_path : _prim_1_path);
4199 region->init_req(prim_path, null_ctl);
4200 if (stopped()) break;
4201 klasses[which_arg] = kls;
4202 }
4203
4204 if (!stopped()) {
4205 // now we have two reference types, in klasses[0..1]
4206 Node* subk = klasses[1]; // the argument to isAssignableFrom
4207 Node* superk = klasses[0]; // the receiver
4208 region->set_req(_both_ref_path, gen_subtype_check(subk, superk));
4209 // now we have a successful reference subtype check
4210 region->set_req(_ref_subtype_path, control());
4211 }
4212
4213 // If both operands are primitive (both klasses null), then
4214 // we must return true when they are identical primitives.
4215 // It is convenient to test this after the first null klass check.
4216 set_control(region->in(_prim_0_path)); // go back to first null check
4217 if (!stopped()) {
4218 // Since superc is primitive, make a guard for the superc==subc case.
4219 Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1]));
4220 Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq));
4221 generate_guard(bol_eq, region, PROB_FAIR);
4222 if (region->req() == PATH_LIMIT+1) {
4223 // A guard was added. If the added guard is taken, superc==subc.
4224 region->swap_edges(PATH_LIMIT, _prim_same_path);
4225 region->del_req(PATH_LIMIT);
4226 }
4227 region->set_req(_prim_0_path, control()); // Not equal after all.
4228 }
4229
4230 // these are the only paths that produce 'true':
4231 phi->set_req(_prim_same_path, intcon(1));
4232 phi->set_req(_ref_subtype_path, intcon(1));
4233
4234 // pull together the cases:
4235 assert(region->req() == PATH_LIMIT, "sane region");
4236 for (uint i = 1; i < region->req(); i++) {
4237 Node* ctl = region->in(i);
4238 if (ctl == nullptr || ctl == top()) {
4239 region->set_req(i, top());
4240 phi ->set_req(i, top());
4241 } else if (phi->in(i) == nullptr) {
4242 phi->set_req(i, intcon(0)); // all other paths produce 'false'
4243 }
4244 }
4245
4246 set_control(_gvn.transform(region));
4247 set_result(_gvn.transform(phi));
4248 return true;
4249 }
4250
4251 //---------------------generate_array_guard_common------------------------
4252 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region,
4253 bool obj_array, bool not_array, Node** obj) {
4254
4255 if (stopped()) {
4256 return nullptr;
4257 }
4258
4259 // If obj_array/non_array==false/false:
4260 // Branch around if the given klass is in fact an array (either obj or prim).
4261 // If obj_array/non_array==false/true:
4262 // Branch around if the given klass is not an array klass of any kind.
4263 // If obj_array/non_array==true/true:
4264 // Branch around if the kls is not an oop array (kls is int[], String, etc.)
4265 // If obj_array/non_array==true/false:
4266 // Branch around if the kls is an oop array (Object[] or subtype)
4267 //
4268 // Like generate_guard, adds a new path onto the region.
4269 jint layout_con = 0;
4270 Node* layout_val = get_layout_helper(kls, layout_con);
4271 if (layout_val == nullptr) {
4272 bool query = (obj_array
4273 ? Klass::layout_helper_is_objArray(layout_con)
4274 : Klass::layout_helper_is_array(layout_con));
4275 if (query == not_array) {
4276 return nullptr; // never a branch
4277 } else { // always a branch
4278 Node* always_branch = control();
4279 if (region != nullptr)
4280 region->add_req(always_branch);
4281 set_control(top());
4282 return always_branch;
4283 }
4284 }
4285 // Now test the correct condition.
4286 jint nval = (obj_array
4287 ? (jint)(Klass::_lh_array_tag_type_value
4288 << Klass::_lh_array_tag_shift)
4289 : Klass::_lh_neutral_value);
4290 Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval)));
4291 BoolTest::mask btest = BoolTest::lt; // correct for testing is_[obj]array
4292 // invert the test if we are looking for a non-array
4293 if (not_array) btest = BoolTest(btest).negate();
4294 Node* bol = _gvn.transform(new BoolNode(cmp, btest));
4295 Node* ctrl = generate_fair_guard(bol, region);
4296 Node* is_array_ctrl = not_array ? control() : ctrl;
4297 if (obj != nullptr && is_array_ctrl != nullptr && is_array_ctrl != top()) {
4298 // Keep track of the fact that 'obj' is an array to prevent
4299 // array specific accesses from floating above the guard.
4300 Node* cast = _gvn.transform(new CastPPNode(is_array_ctrl, *obj, TypeAryPtr::BOTTOM));
4301 // Check for top because in rare cases, the type system can determine that
4302 // the object can't be an array but the layout helper check is not folded.
4303 if (!cast->is_top()) {
4304 *obj = cast;
4305 }
4306 }
4307 return ctrl;
4308 }
4309
4310
4311 //-----------------------inline_native_newArray--------------------------
4312 // private static native Object java.lang.reflect.newArray(Class<?> componentType, int length);
4313 // private native Object Unsafe.allocateUninitializedArray0(Class<?> cls, int size);
4314 bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) {
4315 Node* mirror;
4316 Node* count_val;
4317 if (uninitialized) {
4318 null_check_receiver();
4319 mirror = argument(1);
4320 count_val = argument(2);
4321 } else {
4322 mirror = argument(0);
4323 count_val = argument(1);
4324 }
4325
4326 mirror = null_check(mirror);
4327 // If mirror or obj is dead, only null-path is taken.
4328 if (stopped()) return true;
4329
4330 enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
4331 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4332 PhiNode* result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
4438 // the bytecode that invokes Arrays.copyOf if deoptimization happens.
4439 { PreserveReexecuteState preexecs(this);
4440 jvms()->set_should_reexecute(true);
4441
4442 array_type_mirror = null_check(array_type_mirror);
4443 original = null_check(original);
4444
4445 // Check if a null path was taken unconditionally.
4446 if (stopped()) return true;
4447
4448 Node* orig_length = load_array_length(original);
4449
4450 Node* klass_node = load_klass_from_mirror(array_type_mirror, false, nullptr, 0);
4451 klass_node = null_check(klass_node);
4452
4453 RegionNode* bailout = new RegionNode(1);
4454 record_for_igvn(bailout);
4455
4456 // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
4457 // Bail out if that is so.
4458 Node* not_objArray = generate_non_objArray_guard(klass_node, bailout);
4459 if (not_objArray != nullptr) {
4460 // Improve the klass node's type from the new optimistic assumption:
4461 ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
4462 const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/);
4463 Node* cast = new CastPPNode(control(), klass_node, akls);
4464 klass_node = _gvn.transform(cast);
4465 }
4466
4467 // Bail out if either start or end is negative.
4468 generate_negative_guard(start, bailout, &start);
4469 generate_negative_guard(end, bailout, &end);
4470
4471 Node* length = end;
4472 if (_gvn.type(start) != TypeInt::ZERO) {
4473 length = _gvn.transform(new SubINode(end, start));
4474 }
4475
4476 // Bail out if length is negative (i.e., if start > end).
4477 // Without this the new_array would throw
4478 // NegativeArraySizeException but IllegalArgumentException is what
4479 // should be thrown
4480 generate_negative_guard(length, bailout, &length);
4481
4482 // Bail out if start is larger than the original length
4483 Node* orig_tail = _gvn.transform(new SubINode(orig_length, start));
4484 generate_negative_guard(orig_tail, bailout, &orig_tail);
4485
4486 if (bailout->req() > 1) {
4487 PreserveJVMState pjvms(this);
4488 set_control(_gvn.transform(bailout));
4489 uncommon_trap(Deoptimization::Reason_intrinsic,
4490 Deoptimization::Action_maybe_recompile);
4491 }
4492
4493 if (!stopped()) {
4494 // How many elements will we copy from the original?
4495 // The answer is MinI(orig_tail, length).
4496 Node* moved = _gvn.transform(new MinINode(orig_tail, length));
4497
4498 // Generate a direct call to the right arraycopy function(s).
4499 // We know the copy is disjoint but we might not know if the
4500 // oop stores need checking.
4501 // Extreme case: Arrays.copyOf((Integer[])x, 10, String[].class).
4507 // to the copyOf to be validated, including that the copy to the
4508 // new array won't trigger an ArrayStoreException. That subtype
4509 // check can be optimized if we know something on the type of
4510 // the input array from type speculation.
4511 if (_gvn.type(klass_node)->singleton()) {
4512 const TypeKlassPtr* subk = _gvn.type(load_object_klass(original))->is_klassptr();
4513 const TypeKlassPtr* superk = _gvn.type(klass_node)->is_klassptr();
4514
4515 int test = C->static_subtype_check(superk, subk);
4516 if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
4517 const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();
4518 if (t_original->speculative_type() != nullptr) {
4519 original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true);
4520 }
4521 }
4522 }
4523
4524 bool validated = false;
4525 // Reason_class_check rather than Reason_intrinsic because we
4526 // want to intrinsify even if this traps.
4527 if (!too_many_traps(Deoptimization::Reason_class_check)) {
4528 Node* not_subtype_ctrl = gen_subtype_check(original, klass_node);
4529
4530 if (not_subtype_ctrl != top()) {
4531 PreserveJVMState pjvms(this);
4532 set_control(not_subtype_ctrl);
4533 uncommon_trap(Deoptimization::Reason_class_check,
4534 Deoptimization::Action_make_not_entrant);
4535 assert(stopped(), "Should be stopped");
4536 }
4537 validated = true;
4538 }
4539
4540 if (!stopped()) {
4541 newcopy = new_array(klass_node, length, 0); // no arguments to push
4542
4543 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true, true,
4544 load_object_klass(original), klass_node);
4545 if (!is_copyOfRange) {
4546 ac->set_copyof(validated);
4547 } else {
4593
4594 //-----------------------generate_method_call----------------------------
4595 // Use generate_method_call to make a slow-call to the real
4596 // method if the fast path fails. An alternative would be to
4597 // use a stub like OptoRuntime::slow_arraycopy_Java.
4598 // This only works for expanding the current library call,
4599 // not another intrinsic. (E.g., don't use this for making an
4600 // arraycopy call inside of the copyOf intrinsic.)
4601 CallJavaNode*
4602 LibraryCallKit::generate_method_call(vmIntrinsicID method_id, bool is_virtual, bool is_static, bool res_not_null) {
4603 // When compiling the intrinsic method itself, do not use this technique.
4604 guarantee(callee() != C->method(), "cannot make slow-call to self");
4605
4606 ciMethod* method = callee();
4607 // ensure the JVMS we have will be correct for this call
4608 guarantee(method_id == method->intrinsic_id(), "must match");
4609
4610 const TypeFunc* tf = TypeFunc::make(method);
4611 if (res_not_null) {
4612 assert(tf->return_type() == T_OBJECT, "");
4613 const TypeTuple* range = tf->range();
4614 const Type** fields = TypeTuple::fields(range->cnt());
4615 fields[TypeFunc::Parms] = range->field_at(TypeFunc::Parms)->filter_speculative(TypePtr::NOTNULL);
4616 const TypeTuple* new_range = TypeTuple::make(range->cnt(), fields);
4617 tf = TypeFunc::make(tf->domain(), new_range);
4618 }
4619 CallJavaNode* slow_call;
4620 if (is_static) {
4621 assert(!is_virtual, "");
4622 slow_call = new CallStaticJavaNode(C, tf,
4623 SharedRuntime::get_resolve_static_call_stub(), method);
4624 } else if (is_virtual) {
4625 assert(!gvn().type(argument(0))->maybe_null(), "should not be null");
4626 int vtable_index = Method::invalid_vtable_index;
4627 if (UseInlineCaches) {
4628 // Suppress the vtable call
4629 } else {
4630 // hashCode and clone are not a miranda methods,
4631 // so the vtable index is fixed.
4632 // No need to use the linkResolver to get it.
4633 vtable_index = method->vtable_index();
4634 assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
4635 "bad index %d", vtable_index);
4636 }
4637 slow_call = new CallDynamicJavaNode(tf,
4654 set_edges_for_java_call(slow_call);
4655 return slow_call;
4656 }
4657
4658
4659 /**
4660 * Build special case code for calls to hashCode on an object. This call may
4661 * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
4662 * slightly different code.
4663 */
4664 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
4665 assert(is_static == callee()->is_static(), "correct intrinsic selection");
4666 assert(!(is_virtual && is_static), "either virtual, special, or static");
4667
4668 enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
4669
4670 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4671 PhiNode* result_val = new PhiNode(result_reg, TypeInt::INT);
4672 PhiNode* result_io = new PhiNode(result_reg, Type::ABIO);
4673 PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4674 Node* obj = nullptr;
4675 if (!is_static) {
4676 // Check for hashing null object
4677 obj = null_check_receiver();
4678 if (stopped()) return true; // unconditionally null
4679 result_reg->init_req(_null_path, top());
4680 result_val->init_req(_null_path, top());
4681 } else {
4682 // Do a null check, and return zero if null.
4683 // System.identityHashCode(null) == 0
4684 obj = argument(0);
4685 Node* null_ctl = top();
4686 obj = null_check_oop(obj, &null_ctl);
4687 result_reg->init_req(_null_path, null_ctl);
4688 result_val->init_req(_null_path, _gvn.intcon(0));
4689 }
4690
4691 // Unconditionally null? Then return right away.
4692 if (stopped()) {
4693 set_control( result_reg->in(_null_path));
4694 if (!stopped())
4695 set_result(result_val->in(_null_path));
4696 return true;
4697 }
4698
4699 // We only go to the fast case code if we pass a number of guards. The
4700 // paths which do not pass are accumulated in the slow_region.
4701 RegionNode* slow_region = new RegionNode(1);
4702 record_for_igvn(slow_region);
4703
4704 // If this is a virtual call, we generate a funny guard. We pull out
4705 // the vtable entry corresponding to hashCode() from the target object.
4706 // If the target method which we are calling happens to be the native
4707 // Object hashCode() method, we pass the guard. We do not need this
4708 // guard for non-virtual calls -- the caller is known to be the native
4709 // Object hashCode().
4710 if (is_virtual) {
4711 // After null check, get the object's klass.
4712 Node* obj_klass = load_object_klass(obj);
4713 generate_virtual_guard(obj_klass, slow_region);
4714 }
4715
4716 // Get the header out of the object, use LoadMarkNode when available
4717 Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
4718 // The control of the load must be null. Otherwise, the load can move before
4719 // the null check after castPP removal.
4720 Node* no_ctrl = nullptr;
4721 Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
4722
4723 if (!UseObjectMonitorTable) {
4724 // Test the header to see if it is safe to read w.r.t. locking.
4725 Node *lock_mask = _gvn.MakeConX(markWord::lock_mask_in_place);
4726 Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
4727 if (LockingMode == LM_LIGHTWEIGHT) {
4728 Node *monitor_val = _gvn.MakeConX(markWord::monitor_value);
4729 Node *chk_monitor = _gvn.transform(new CmpXNode(lmasked_header, monitor_val));
4730 Node *test_monitor = _gvn.transform(new BoolNode(chk_monitor, BoolTest::eq));
4731
4732 generate_slow_guard(test_monitor, slow_region);
4733 } else {
4734 Node *unlocked_val = _gvn.MakeConX(markWord::unlocked_value);
4735 Node *chk_unlocked = _gvn.transform(new CmpXNode(lmasked_header, unlocked_val));
4736 Node *test_not_unlocked = _gvn.transform(new BoolNode(chk_unlocked, BoolTest::ne));
4737
4738 generate_slow_guard(test_not_unlocked, slow_region);
4739 }
4740 }
4741
4742 // Get the hash value and check to see that it has been properly assigned.
4743 // We depend on hash_mask being at most 32 bits and avoid the use of
4744 // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
4745 // vm: see markWord.hpp.
4780 // this->control() comes from set_results_for_java_call
4781 result_reg->init_req(_slow_path, control());
4782 result_val->init_req(_slow_path, slow_result);
4783 result_io ->set_req(_slow_path, i_o());
4784 result_mem ->set_req(_slow_path, reset_memory());
4785 }
4786
4787 // Return the combined state.
4788 set_i_o( _gvn.transform(result_io) );
4789 set_all_memory( _gvn.transform(result_mem));
4790
4791 set_result(result_reg, result_val);
4792 return true;
4793 }
4794
4795 //---------------------------inline_native_getClass----------------------------
4796 // public final native Class<?> java.lang.Object.getClass();
4797 //
4798 // Build special case code for calls to getClass on an object.
4799 bool LibraryCallKit::inline_native_getClass() {
4800 Node* obj = null_check_receiver();
4801 if (stopped()) return true;
4802 set_result(load_mirror_from_klass(load_object_klass(obj)));
4803 return true;
4804 }
4805
4806 //-----------------inline_native_Reflection_getCallerClass---------------------
4807 // public static native Class<?> sun.reflect.Reflection.getCallerClass();
4808 //
4809 // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
4810 //
4811 // NOTE: This code must perform the same logic as JVM_GetCallerClass
4812 // in that it must skip particular security frames and checks for
4813 // caller sensitive methods.
4814 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
4815 #ifndef PRODUCT
4816 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4817 tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
4818 }
4819 #endif
4820
5132 dst_type = _gvn.type(dst_addr)->is_ptr(); // narrow out memory
5133
5134 flags |= RC_NARROW_MEM; // narrow in memory
5135 }
5136
5137 // Call it. Note that the length argument is not scaled.
5138 make_runtime_call(flags,
5139 OptoRuntime::unsafe_setmemory_Type(),
5140 StubRoutines::unsafe_setmemory(),
5141 "unsafe_setmemory",
5142 dst_type,
5143 dst_addr, size XTOP, byte);
5144
5145 store_to_memory(control(), doing_unsafe_access_addr, intcon(0), doing_unsafe_access_bt, MemNode::unordered);
5146
5147 return true;
5148 }
5149
5150 #undef XTOP
5151
5152 //------------------------clone_coping-----------------------------------
5153 // Helper function for inline_native_clone.
5154 void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array) {
5155 assert(obj_size != nullptr, "");
5156 Node* raw_obj = alloc_obj->in(1);
5157 assert(alloc_obj->is_CheckCastPP() && raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
5158
5159 AllocateNode* alloc = nullptr;
5160 if (ReduceBulkZeroing &&
5161 // If we are implementing an array clone without knowing its source type
5162 // (can happen when compiling the array-guarded branch of a reflective
5163 // Object.clone() invocation), initialize the array within the allocation.
5164 // This is needed because some GCs (e.g. ZGC) might fall back in this case
5165 // to a runtime clone call that assumes fully initialized source arrays.
5166 (!is_array || obj->get_ptr_type()->isa_aryptr() != nullptr)) {
5167 // We will be completely responsible for initializing this object -
5168 // mark Initialize node as complete.
5169 alloc = AllocateNode::Ideal_allocation(alloc_obj);
5170 // The object was just allocated - there should be no any stores!
5171 guarantee(alloc != nullptr && alloc->maybe_set_complete(&_gvn), "");
5202 // not cloneable or finalizer => slow path to out-of-line Object.clone
5203 //
5204 // The general case has two steps, allocation and copying.
5205 // Allocation has two cases, and uses GraphKit::new_instance or new_array.
5206 //
5207 // Copying also has two cases, oop arrays and everything else.
5208 // Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
5209 // Everything else uses the tight inline loop supplied by CopyArrayNode.
5210 //
5211 // These steps fold up nicely if and when the cloned object's klass
5212 // can be sharply typed as an object array, a type array, or an instance.
5213 //
5214 bool LibraryCallKit::inline_native_clone(bool is_virtual) {
5215 PhiNode* result_val;
5216
5217 // Set the reexecute bit for the interpreter to reexecute
5218 // the bytecode that invokes Object.clone if deoptimization happens.
5219 { PreserveReexecuteState preexecs(this);
5220 jvms()->set_should_reexecute(true);
5221
5222 Node* obj = null_check_receiver();
5223 if (stopped()) return true;
5224
5225 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
5226
5227 // If we are going to clone an instance, we need its exact type to
5228 // know the number and types of fields to convert the clone to
5229 // loads/stores. Maybe a speculative type can help us.
5230 if (!obj_type->klass_is_exact() &&
5231 obj_type->speculative_type() != nullptr &&
5232 obj_type->speculative_type()->is_instance_klass()) {
5233 ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass();
5234 if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem &&
5235 !spec_ik->has_injected_fields()) {
5236 if (!obj_type->isa_instptr() ||
5237 obj_type->is_instptr()->instance_klass()->has_subklass()) {
5238 obj = maybe_cast_profiled_obj(obj, obj_type->speculative_type(), false);
5239 }
5240 }
5241 }
5242
5243 // Conservatively insert a memory barrier on all memory slices.
5244 // Do not let writes into the original float below the clone.
5245 insert_mem_bar(Op_MemBarCPUOrder);
5246
5247 // paths into result_reg:
5248 enum {
5249 _slow_path = 1, // out-of-line call to clone method (virtual or not)
5250 _objArray_path, // plain array allocation, plus arrayof_oop_arraycopy
5251 _array_path, // plain array allocation, plus arrayof_long_arraycopy
5252 _instance_path, // plain instance allocation, plus arrayof_long_arraycopy
5253 PATH_LIMIT
5254 };
5255 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
5256 result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
5257 PhiNode* result_i_o = new PhiNode(result_reg, Type::ABIO);
5258 PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
5259 record_for_igvn(result_reg);
5260
5261 Node* obj_klass = load_object_klass(obj);
5262 Node* array_obj = obj;
5263 Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)nullptr, &array_obj);
5264 if (array_ctl != nullptr) {
5265 // It's an array.
5266 PreserveJVMState pjvms(this);
5267 set_control(array_ctl);
5268 Node* obj_length = load_array_length(array_obj);
5269 Node* array_size = nullptr; // Size of the array without object alignment padding.
5270 Node* alloc_obj = new_array(obj_klass, obj_length, 0, &array_size, /*deoptimize_on_exception=*/true);
5271
5272 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
5273 if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Parsing)) {
5274 // If it is an oop array, it requires very special treatment,
5275 // because gc barriers are required when accessing the array.
5276 Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)nullptr);
5277 if (is_obja != nullptr) {
5278 PreserveJVMState pjvms2(this);
5279 set_control(is_obja);
5280 // Generate a direct call to the right arraycopy function(s).
5281 // Clones are always tightly coupled.
5282 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, array_obj, intcon(0), alloc_obj, intcon(0), obj_length, true, false);
5283 ac->set_clone_oop_array();
5284 Node* n = _gvn.transform(ac);
5285 assert(n == ac, "cannot disappear");
5286 ac->connect_outputs(this, /*deoptimize_on_exception=*/true);
5287
5288 result_reg->init_req(_objArray_path, control());
5289 result_val->init_req(_objArray_path, alloc_obj);
5290 result_i_o ->set_req(_objArray_path, i_o());
5291 result_mem ->set_req(_objArray_path, reset_memory());
5292 }
5293 }
5294 // Otherwise, there are no barriers to worry about.
5295 // (We can dispense with card marks if we know the allocation
5296 // comes out of eden (TLAB)... In fact, ReduceInitialCardMarks
5297 // causes the non-eden paths to take compensating steps to
5298 // simulate a fresh allocation, so that no further
5299 // card marks are required in compiled code to initialize
5300 // the object.)
5301
5302 if (!stopped()) {
5303 copy_to_clone(array_obj, alloc_obj, array_size, true);
5304
5305 // Present the results of the copy.
5306 result_reg->init_req(_array_path, control());
5307 result_val->init_req(_array_path, alloc_obj);
5308 result_i_o ->set_req(_array_path, i_o());
5309 result_mem ->set_req(_array_path, reset_memory());
5310 }
5311 }
5312
5313 // We only go to the instance fast case code if we pass a number of guards.
5314 // The paths which do not pass are accumulated in the slow_region.
5315 RegionNode* slow_region = new RegionNode(1);
5316 record_for_igvn(slow_region);
5317 if (!stopped()) {
5318 // It's an instance (we did array above). Make the slow-path tests.
5319 // If this is a virtual call, we generate a funny guard. We grab
5320 // the vtable entry corresponding to clone() from the target object.
5321 // If the target method which we are calling happens to be the
5322 // Object clone() method, we pass the guard. We do not need this
5323 // guard for non-virtual calls; the caller is known to be the native
5324 // Object clone().
5325 if (is_virtual) {
5326 generate_virtual_guard(obj_klass, slow_region);
5327 }
5328
5329 // The object must be easily cloneable and must not have a finalizer.
5330 // Both of these conditions may be checked in a single test.
5331 // We could optimize the test further, but we don't care.
5332 generate_misc_flags_guard(obj_klass,
5333 // Test both conditions:
5334 KlassFlags::_misc_is_cloneable_fast | KlassFlags::_misc_has_finalizer,
5335 // Must be cloneable but not finalizer:
5336 KlassFlags::_misc_is_cloneable_fast,
5428 set_jvms(sfpt->jvms());
5429 _reexecute_sp = jvms()->sp();
5430
5431 return saved_jvms;
5432 }
5433 }
5434 }
5435 return nullptr;
5436 }
5437
5438 // Clone the JVMState of the array allocation and create a new safepoint with it. Re-push the array length to the stack
5439 // such that uncommon traps can be emitted to re-execute the array allocation in the interpreter.
5440 SafePointNode* LibraryCallKit::create_safepoint_with_state_before_array_allocation(const AllocateArrayNode* alloc) const {
5441 JVMState* old_jvms = alloc->jvms()->clone_shallow(C);
5442 uint size = alloc->req();
5443 SafePointNode* sfpt = new SafePointNode(size, old_jvms);
5444 old_jvms->set_map(sfpt);
5445 for (uint i = 0; i < size; i++) {
5446 sfpt->init_req(i, alloc->in(i));
5447 }
5448 // re-push array length for deoptimization
5449 sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp(), alloc->in(AllocateNode::ALength));
5450 old_jvms->set_sp(old_jvms->sp()+1);
5451 old_jvms->set_monoff(old_jvms->monoff()+1);
5452 old_jvms->set_scloff(old_jvms->scloff()+1);
5453 old_jvms->set_endoff(old_jvms->endoff()+1);
5454 old_jvms->set_should_reexecute(true);
5455
5456 sfpt->set_i_o(map()->i_o());
5457 sfpt->set_memory(map()->memory());
5458 sfpt->set_control(map()->control());
5459 return sfpt;
5460 }
5461
5462 // In case of a deoptimization, we restart execution at the
5463 // allocation, allocating a new array. We would leave an uninitialized
5464 // array in the heap that GCs wouldn't expect. Move the allocation
5465 // after the traps so we don't allocate the array if we
5466 // deoptimize. This is possible because tightly_coupled_allocation()
5467 // guarantees there's no observer of the allocated array at this point
5468 // and the control flow is simple enough.
5469 void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms_before_guards,
5470 int saved_reexecute_sp, uint new_idx) {
5471 if (saved_jvms_before_guards != nullptr && !stopped()) {
5472 replace_unrelated_uncommon_traps_with_alloc_state(alloc, saved_jvms_before_guards);
5473
5474 assert(alloc != nullptr, "only with a tightly coupled allocation");
5475 // restore JVM state to the state at the arraycopy
5476 saved_jvms_before_guards->map()->set_control(map()->control());
5477 assert(saved_jvms_before_guards->map()->memory() == map()->memory(), "memory state changed?");
5478 assert(saved_jvms_before_guards->map()->i_o() == map()->i_o(), "IO state changed?");
5479 // If we've improved the types of some nodes (null check) while
5480 // emitting the guards, propagate them to the current state
5481 map()->replaced_nodes().apply(saved_jvms_before_guards->map(), new_idx);
5482 set_jvms(saved_jvms_before_guards);
5483 _reexecute_sp = saved_reexecute_sp;
5484
5485 // Remove the allocation from above the guards
5486 CallProjections callprojs;
5487 alloc->extract_projections(&callprojs, true);
5488 InitializeNode* init = alloc->initialization();
5489 Node* alloc_mem = alloc->in(TypeFunc::Memory);
5490 C->gvn_replace_by(callprojs.fallthrough_ioproj, alloc->in(TypeFunc::I_O));
5491 C->gvn_replace_by(init->proj_out(TypeFunc::Memory), alloc_mem);
5492
5493 // The CastIINode created in GraphKit::new_array (in AllocateArrayNode::make_ideal_length) must stay below
5494 // the allocation (i.e. is only valid if the allocation succeeds):
5495 // 1) replace CastIINode with AllocateArrayNode's length here
5496 // 2) Create CastIINode again once allocation has moved (see below) at the end of this method
5497 //
5498 // Multiple identical CastIINodes might exist here. Each GraphKit::load_array_length() call will generate
5499 // new separate CastIINode (arraycopy guard checks or any array length use between array allocation and ararycopy)
5500 Node* init_control = init->proj_out(TypeFunc::Control);
5501 Node* alloc_length = alloc->Ideal_length();
5502 #ifdef ASSERT
5503 Node* prev_cast = nullptr;
5504 #endif
5505 for (uint i = 0; i < init_control->outcnt(); i++) {
5506 Node* init_out = init_control->raw_out(i);
5507 if (init_out->is_CastII() && init_out->in(TypeFunc::Control) == init_control && init_out->in(1) == alloc_length) {
5508 #ifdef ASSERT
5509 if (prev_cast == nullptr) {
5510 prev_cast = init_out;
5512 if (prev_cast->cmp(*init_out) == false) {
5513 prev_cast->dump();
5514 init_out->dump();
5515 assert(false, "not equal CastIINode");
5516 }
5517 }
5518 #endif
5519 C->gvn_replace_by(init_out, alloc_length);
5520 }
5521 }
5522 C->gvn_replace_by(init->proj_out(TypeFunc::Control), alloc->in(0));
5523
5524 // move the allocation here (after the guards)
5525 _gvn.hash_delete(alloc);
5526 alloc->set_req(TypeFunc::Control, control());
5527 alloc->set_req(TypeFunc::I_O, i_o());
5528 Node *mem = reset_memory();
5529 set_all_memory(mem);
5530 alloc->set_req(TypeFunc::Memory, mem);
5531 set_control(init->proj_out_or_null(TypeFunc::Control));
5532 set_i_o(callprojs.fallthrough_ioproj);
5533
5534 // Update memory as done in GraphKit::set_output_for_allocation()
5535 const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
5536 const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
5537 if (ary_type->isa_aryptr() && length_type != nullptr) {
5538 ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
5539 }
5540 const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot);
5541 int elemidx = C->get_alias_index(telemref);
5542 set_memory(init->proj_out_or_null(TypeFunc::Memory), Compile::AliasIdxRaw);
5543 set_memory(init->proj_out_or_null(TypeFunc::Memory), elemidx);
5544
5545 Node* allocx = _gvn.transform(alloc);
5546 assert(allocx == alloc, "where has the allocation gone?");
5547 assert(dest->is_CheckCastPP(), "not an allocation result?");
5548
5549 _gvn.hash_delete(dest);
5550 dest->set_req(0, control());
5551 Node* destx = _gvn.transform(dest);
5552 assert(destx == dest, "where has the allocation result gone?");
5850 top_src = src_type->isa_aryptr();
5851 has_src = (top_src != nullptr && top_src->elem() != Type::BOTTOM);
5852 src_spec = true;
5853 }
5854 if (!has_dest) {
5855 dest = maybe_cast_profiled_obj(dest, dest_k, true);
5856 dest_type = _gvn.type(dest);
5857 top_dest = dest_type->isa_aryptr();
5858 has_dest = (top_dest != nullptr && top_dest->elem() != Type::BOTTOM);
5859 dest_spec = true;
5860 }
5861 }
5862 }
5863
5864 if (has_src && has_dest && can_emit_guards) {
5865 BasicType src_elem = top_src->isa_aryptr()->elem()->array_element_basic_type();
5866 BasicType dest_elem = top_dest->isa_aryptr()->elem()->array_element_basic_type();
5867 if (is_reference_type(src_elem, true)) src_elem = T_OBJECT;
5868 if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT;
5869
5870 if (src_elem == dest_elem && src_elem == T_OBJECT) {
5871 // If both arrays are object arrays then having the exact types
5872 // for both will remove the need for a subtype check at runtime
5873 // before the call and may make it possible to pick a faster copy
5874 // routine (without a subtype check on every element)
5875 // Do we have the exact type of src?
5876 bool could_have_src = src_spec;
5877 // Do we have the exact type of dest?
5878 bool could_have_dest = dest_spec;
5879 ciKlass* src_k = nullptr;
5880 ciKlass* dest_k = nullptr;
5881 if (!src_spec) {
5882 src_k = src_type->speculative_type_not_null();
5883 if (src_k != nullptr && src_k->is_array_klass()) {
5884 could_have_src = true;
5885 }
5886 }
5887 if (!dest_spec) {
5888 dest_k = dest_type->speculative_type_not_null();
5889 if (dest_k != nullptr && dest_k->is_array_klass()) {
5890 could_have_dest = true;
5891 }
5892 }
5893 if (could_have_src && could_have_dest) {
5894 // If we can have both exact types, emit the missing guards
5895 if (could_have_src && !src_spec) {
5896 src = maybe_cast_profiled_obj(src, src_k, true);
5897 }
5898 if (could_have_dest && !dest_spec) {
5899 dest = maybe_cast_profiled_obj(dest, dest_k, true);
5900 }
5901 }
5902 }
5903 }
5904
5905 ciMethod* trap_method = method();
5906 int trap_bci = bci();
5907 if (saved_jvms_before_guards != nullptr) {
5908 trap_method = alloc->jvms()->method();
5909 trap_bci = alloc->jvms()->bci();
5910 }
5911
5912 bool negative_length_guard_generated = false;
5913
5914 if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
5915 can_emit_guards &&
5916 !src->is_top() && !dest->is_top()) {
5917 // validate arguments: enables transformation the ArrayCopyNode
5918 validated = true;
5919
5920 RegionNode* slow_region = new RegionNode(1);
5921 record_for_igvn(slow_region);
5922
5923 // (1) src and dest are arrays.
5924 generate_non_array_guard(load_object_klass(src), slow_region, &src);
5925 generate_non_array_guard(load_object_klass(dest), slow_region, &dest);
5926
5927 // (2) src and dest arrays must have elements of the same BasicType
5928 // done at macro expansion or at Ideal transformation time
5929
5930 // (4) src_offset must not be negative.
5931 generate_negative_guard(src_offset, slow_region);
5932
5933 // (5) dest_offset must not be negative.
5934 generate_negative_guard(dest_offset, slow_region);
5935
5936 // (7) src_offset + length must not exceed length of src.
5939 slow_region);
5940
5941 // (8) dest_offset + length must not exceed length of dest.
5942 generate_limit_guard(dest_offset, length,
5943 load_array_length(dest),
5944 slow_region);
5945
5946 // (6) length must not be negative.
5947 // This is also checked in generate_arraycopy() during macro expansion, but
5948 // we also have to check it here for the case where the ArrayCopyNode will
5949 // be eliminated by Escape Analysis.
5950 if (EliminateAllocations) {
5951 generate_negative_guard(length, slow_region);
5952 negative_length_guard_generated = true;
5953 }
5954
5955 // (9) each element of an oop array must be assignable
5956 Node* dest_klass = load_object_klass(dest);
5957 if (src != dest) {
5958 Node* not_subtype_ctrl = gen_subtype_check(src, dest_klass);
5959
5960 if (not_subtype_ctrl != top()) {
5961 PreserveJVMState pjvms(this);
5962 set_control(not_subtype_ctrl);
5963 uncommon_trap(Deoptimization::Reason_intrinsic,
5964 Deoptimization::Action_make_not_entrant);
5965 assert(stopped(), "Should be stopped");
5966 }
5967 }
5968 {
5969 PreserveJVMState pjvms(this);
5970 set_control(_gvn.transform(slow_region));
5971 uncommon_trap(Deoptimization::Reason_intrinsic,
5972 Deoptimization::Action_make_not_entrant);
5973 assert(stopped(), "Should be stopped");
5974 }
5975
5976 const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr();
5977 const Type *toop = dest_klass_t->cast_to_exactness(false)->as_instance_type();
5978 src = _gvn.transform(new CheckCastPPNode(control(), src, toop));
5979 arraycopy_move_allocation_here(alloc, dest, saved_jvms_before_guards, saved_reexecute_sp, new_idx);
5980 }
5981
5982 if (stopped()) {
5983 return true;
5984 }
5985
5986 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != nullptr, negative_length_guard_generated,
5987 // Create LoadRange and LoadKlass nodes for use during macro expansion here
5988 // so the compiler has a chance to eliminate them: during macro expansion,
5989 // we have to set their control (CastPP nodes are eliminated).
5990 load_object_klass(src), load_object_klass(dest),
5991 load_array_length(src), load_array_length(dest));
5992
5993 ac->set_arraycopy(validated);
5994
5995 Node* n = _gvn.transform(ac);
5996 if (n == ac) {
5997 ac->connect_outputs(this);
5998 } else {
|
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/macroAssembler.hpp"
26 #include "ci/ciFlatArrayKlass.hpp"
27 #include "ci/ciUtilities.inline.hpp"
28 #include "ci/ciSymbols.hpp"
29 #include "classfile/vmIntrinsics.hpp"
30 #include "compiler/compileBroker.hpp"
31 #include "compiler/compileLog.hpp"
32 #include "gc/shared/barrierSet.hpp"
33 #include "jfr/support/jfrIntrinsics.hpp"
34 #include "memory/resourceArea.hpp"
35 #include "oops/klass.inline.hpp"
36 #include "oops/objArrayKlass.hpp"
37 #include "opto/addnode.hpp"
38 #include "opto/arraycopynode.hpp"
39 #include "opto/c2compiler.hpp"
40 #include "opto/castnode.hpp"
41 #include "opto/cfgnode.hpp"
42 #include "opto/convertnode.hpp"
43 #include "opto/countbitsnode.hpp"
44 #include "opto/idealKit.hpp"
45 #include "opto/library_call.hpp"
46 #include "opto/mathexactnode.hpp"
299 case vmIntrinsics::_indexOfIL: return inline_string_indexOfI(StrIntrinsicNode::LL);
300 case vmIntrinsics::_indexOfIU: return inline_string_indexOfI(StrIntrinsicNode::UU);
301 case vmIntrinsics::_indexOfIUL: return inline_string_indexOfI(StrIntrinsicNode::UL);
302 case vmIntrinsics::_indexOfU_char: return inline_string_indexOfChar(StrIntrinsicNode::U);
303 case vmIntrinsics::_indexOfL_char: return inline_string_indexOfChar(StrIntrinsicNode::L);
304
305 case vmIntrinsics::_equalsL: return inline_string_equals(StrIntrinsicNode::LL);
306
307 case vmIntrinsics::_vectorizedHashCode: return inline_vectorizedHashCode();
308
309 case vmIntrinsics::_toBytesStringU: return inline_string_toBytesU();
310 case vmIntrinsics::_getCharsStringU: return inline_string_getCharsU();
311 case vmIntrinsics::_getCharStringU: return inline_string_char_access(!is_store);
312 case vmIntrinsics::_putCharStringU: return inline_string_char_access( is_store);
313
314 case vmIntrinsics::_compressStringC:
315 case vmIntrinsics::_compressStringB: return inline_string_copy( is_compress);
316 case vmIntrinsics::_inflateStringC:
317 case vmIntrinsics::_inflateStringB: return inline_string_copy(!is_compress);
318
319 case vmIntrinsics::_makePrivateBuffer: return inline_unsafe_make_private_buffer();
320 case vmIntrinsics::_finishPrivateBuffer: return inline_unsafe_finish_private_buffer();
321 case vmIntrinsics::_getReference: return inline_unsafe_access(!is_store, T_OBJECT, Relaxed, false);
322 case vmIntrinsics::_getBoolean: return inline_unsafe_access(!is_store, T_BOOLEAN, Relaxed, false);
323 case vmIntrinsics::_getByte: return inline_unsafe_access(!is_store, T_BYTE, Relaxed, false);
324 case vmIntrinsics::_getShort: return inline_unsafe_access(!is_store, T_SHORT, Relaxed, false);
325 case vmIntrinsics::_getChar: return inline_unsafe_access(!is_store, T_CHAR, Relaxed, false);
326 case vmIntrinsics::_getInt: return inline_unsafe_access(!is_store, T_INT, Relaxed, false);
327 case vmIntrinsics::_getLong: return inline_unsafe_access(!is_store, T_LONG, Relaxed, false);
328 case vmIntrinsics::_getFloat: return inline_unsafe_access(!is_store, T_FLOAT, Relaxed, false);
329 case vmIntrinsics::_getDouble: return inline_unsafe_access(!is_store, T_DOUBLE, Relaxed, false);
330 case vmIntrinsics::_getValue: return inline_unsafe_access(!is_store, T_OBJECT, Relaxed, false, true);
331
332 case vmIntrinsics::_putReference: return inline_unsafe_access( is_store, T_OBJECT, Relaxed, false);
333 case vmIntrinsics::_putBoolean: return inline_unsafe_access( is_store, T_BOOLEAN, Relaxed, false);
334 case vmIntrinsics::_putByte: return inline_unsafe_access( is_store, T_BYTE, Relaxed, false);
335 case vmIntrinsics::_putShort: return inline_unsafe_access( is_store, T_SHORT, Relaxed, false);
336 case vmIntrinsics::_putChar: return inline_unsafe_access( is_store, T_CHAR, Relaxed, false);
337 case vmIntrinsics::_putInt: return inline_unsafe_access( is_store, T_INT, Relaxed, false);
338 case vmIntrinsics::_putLong: return inline_unsafe_access( is_store, T_LONG, Relaxed, false);
339 case vmIntrinsics::_putFloat: return inline_unsafe_access( is_store, T_FLOAT, Relaxed, false);
340 case vmIntrinsics::_putDouble: return inline_unsafe_access( is_store, T_DOUBLE, Relaxed, false);
341 case vmIntrinsics::_putValue: return inline_unsafe_access( is_store, T_OBJECT, Relaxed, false, true);
342
343 case vmIntrinsics::_getReferenceVolatile: return inline_unsafe_access(!is_store, T_OBJECT, Volatile, false);
344 case vmIntrinsics::_getBooleanVolatile: return inline_unsafe_access(!is_store, T_BOOLEAN, Volatile, false);
345 case vmIntrinsics::_getByteVolatile: return inline_unsafe_access(!is_store, T_BYTE, Volatile, false);
346 case vmIntrinsics::_getShortVolatile: return inline_unsafe_access(!is_store, T_SHORT, Volatile, false);
347 case vmIntrinsics::_getCharVolatile: return inline_unsafe_access(!is_store, T_CHAR, Volatile, false);
348 case vmIntrinsics::_getIntVolatile: return inline_unsafe_access(!is_store, T_INT, Volatile, false);
349 case vmIntrinsics::_getLongVolatile: return inline_unsafe_access(!is_store, T_LONG, Volatile, false);
350 case vmIntrinsics::_getFloatVolatile: return inline_unsafe_access(!is_store, T_FLOAT, Volatile, false);
351 case vmIntrinsics::_getDoubleVolatile: return inline_unsafe_access(!is_store, T_DOUBLE, Volatile, false);
352
353 case vmIntrinsics::_putReferenceVolatile: return inline_unsafe_access( is_store, T_OBJECT, Volatile, false);
354 case vmIntrinsics::_putBooleanVolatile: return inline_unsafe_access( is_store, T_BOOLEAN, Volatile, false);
355 case vmIntrinsics::_putByteVolatile: return inline_unsafe_access( is_store, T_BYTE, Volatile, false);
356 case vmIntrinsics::_putShortVolatile: return inline_unsafe_access( is_store, T_SHORT, Volatile, false);
357 case vmIntrinsics::_putCharVolatile: return inline_unsafe_access( is_store, T_CHAR, Volatile, false);
358 case vmIntrinsics::_putIntVolatile: return inline_unsafe_access( is_store, T_INT, Volatile, false);
359 case vmIntrinsics::_putLongVolatile: return inline_unsafe_access( is_store, T_LONG, Volatile, false);
360 case vmIntrinsics::_putFloatVolatile: return inline_unsafe_access( is_store, T_FLOAT, Volatile, false);
361 case vmIntrinsics::_putDoubleVolatile: return inline_unsafe_access( is_store, T_DOUBLE, Volatile, false);
488 "notifyJvmtiEnd", false, true);
489 case vmIntrinsics::_notifyJvmtiVThreadMount: return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_mount()),
490 "notifyJvmtiMount", false, false);
491 case vmIntrinsics::_notifyJvmtiVThreadUnmount: return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_unmount()),
492 "notifyJvmtiUnmount", false, false);
493 case vmIntrinsics::_notifyJvmtiVThreadDisableSuspend: return inline_native_notify_jvmti_sync();
494 #endif
495
496 #ifdef JFR_HAVE_INTRINSICS
497 case vmIntrinsics::_counterTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, JfrTime::time_function()), "counterTime");
498 case vmIntrinsics::_getEventWriter: return inline_native_getEventWriter();
499 case vmIntrinsics::_jvm_commit: return inline_native_jvm_commit();
500 #endif
501 case vmIntrinsics::_currentTimeMillis: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
502 case vmIntrinsics::_nanoTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
503 case vmIntrinsics::_writeback0: return inline_unsafe_writeback0();
504 case vmIntrinsics::_writebackPreSync0: return inline_unsafe_writebackSync0(true);
505 case vmIntrinsics::_writebackPostSync0: return inline_unsafe_writebackSync0(false);
506 case vmIntrinsics::_allocateInstance: return inline_unsafe_allocate();
507 case vmIntrinsics::_copyMemory: return inline_unsafe_copyMemory();
508 case vmIntrinsics::_isFlatArray: return inline_unsafe_isFlatArray();
509 case vmIntrinsics::_setMemory: return inline_unsafe_setMemory();
510 case vmIntrinsics::_getLength: return inline_native_getLength();
511 case vmIntrinsics::_copyOf: return inline_array_copyOf(false);
512 case vmIntrinsics::_copyOfRange: return inline_array_copyOf(true);
513 case vmIntrinsics::_equalsB: return inline_array_equals(StrIntrinsicNode::LL);
514 case vmIntrinsics::_equalsC: return inline_array_equals(StrIntrinsicNode::UU);
515 case vmIntrinsics::_Preconditions_checkIndex: return inline_preconditions_checkIndex(T_INT);
516 case vmIntrinsics::_Preconditions_checkLongIndex: return inline_preconditions_checkIndex(T_LONG);
517 case vmIntrinsics::_clone: return inline_native_clone(intrinsic()->is_virtual());
518
519 case vmIntrinsics::_allocateUninitializedArray: return inline_unsafe_newArray(true);
520 case vmIntrinsics::_newArray: return inline_unsafe_newArray(false);
521 case vmIntrinsics::_newNullRestrictedNonAtomicArray: return inline_newArray(/* null_free */ true, /* atomic */ false);
522 case vmIntrinsics::_newNullRestrictedAtomicArray: return inline_newArray(/* null_free */ true, /* atomic */ true);
523 case vmIntrinsics::_newNullableAtomicArray: return inline_newArray(/* null_free */ false, /* atomic */ true);
524
525 case vmIntrinsics::_isAssignableFrom: return inline_native_subtype_check();
526
527 case vmIntrinsics::_isInstance:
528 case vmIntrinsics::_isHidden:
529 case vmIntrinsics::_getSuperclass:
530 case vmIntrinsics::_getClassAccessFlags: return inline_native_Class_query(intrinsic_id());
531
532 case vmIntrinsics::_floatToRawIntBits:
533 case vmIntrinsics::_floatToIntBits:
534 case vmIntrinsics::_intBitsToFloat:
535 case vmIntrinsics::_doubleToRawLongBits:
536 case vmIntrinsics::_doubleToLongBits:
537 case vmIntrinsics::_longBitsToDouble:
538 case vmIntrinsics::_floatToFloat16:
539 case vmIntrinsics::_float16ToFloat: return inline_fp_conversions(intrinsic_id());
540 case vmIntrinsics::_sqrt_float16: return inline_fp16_operations(intrinsic_id(), 1);
541 case vmIntrinsics::_fma_float16: return inline_fp16_operations(intrinsic_id(), 3);
542 case vmIntrinsics::_floatIsFinite:
543 case vmIntrinsics::_floatIsInfinite:
2275 case vmIntrinsics::_remainderUnsigned_l: {
2276 zero_check_long(argument(2));
2277 // Compile-time detect of null-exception
2278 if (stopped()) {
2279 return true; // keep the graph constructed so far
2280 }
2281 n = new UModLNode(control(), argument(0), argument(2));
2282 break;
2283 }
2284 default: fatal_unexpected_iid(id); break;
2285 }
2286 set_result(_gvn.transform(n));
2287 return true;
2288 }
2289
2290 //----------------------------inline_unsafe_access----------------------------
2291
2292 const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type) {
2293 // Attempt to infer a sharper value type from the offset and base type.
2294 ciKlass* sharpened_klass = nullptr;
2295 bool null_free = false;
2296
2297 // See if it is an instance field, with an object type.
2298 if (alias_type->field() != nullptr) {
2299 if (alias_type->field()->type()->is_klass()) {
2300 sharpened_klass = alias_type->field()->type()->as_klass();
2301 null_free = alias_type->field()->is_null_free();
2302 }
2303 }
2304
2305 const TypeOopPtr* result = nullptr;
2306 // See if it is a narrow oop array.
2307 if (adr_type->isa_aryptr()) {
2308 if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
2309 const TypeOopPtr* elem_type = adr_type->is_aryptr()->elem()->make_oopptr();
2310 null_free = adr_type->is_aryptr()->is_null_free();
2311 if (elem_type != nullptr && elem_type->is_loaded()) {
2312 // Sharpen the value type.
2313 result = elem_type;
2314 }
2315 }
2316 }
2317
2318 // The sharpened class might be unloaded if there is no class loader
2319 // contraint in place.
2320 if (result == nullptr && sharpened_klass != nullptr && sharpened_klass->is_loaded()) {
2321 // Sharpen the value type.
2322 result = TypeOopPtr::make_from_klass(sharpened_klass);
2323 if (null_free) {
2324 result = result->join_speculative(TypePtr::NOTNULL)->is_oopptr();
2325 }
2326 }
2327 if (result != nullptr) {
2328 #ifndef PRODUCT
2329 if (C->print_intrinsics() || C->print_inlining()) {
2330 tty->print(" from base type: "); adr_type->dump(); tty->cr();
2331 tty->print(" sharpened value: "); result->dump(); tty->cr();
2332 }
2333 #endif
2334 }
2335 return result;
2336 }
2337
2338 DecoratorSet LibraryCallKit::mo_decorator_for_access_kind(AccessKind kind) {
2339 switch (kind) {
2340 case Relaxed:
2341 return MO_UNORDERED;
2342 case Opaque:
2343 return MO_RELAXED;
2344 case Acquire:
2345 return MO_ACQUIRE;
2346 case Release:
2347 return MO_RELEASE;
2348 case Volatile:
2349 return MO_SEQ_CST;
2350 default:
2351 ShouldNotReachHere();
2352 return 0;
2353 }
2354 }
2355
2356 bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, const AccessKind kind, const bool unaligned, const bool is_flat) {
2357 if (callee()->is_static()) return false; // caller must have the capability!
2358 DecoratorSet decorators = C2_UNSAFE_ACCESS;
2359 guarantee(!is_store || kind != Acquire, "Acquire accesses can be produced only for loads");
2360 guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
2361 assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
2362
2363 if (is_reference_type(type)) {
2364 decorators |= ON_UNKNOWN_OOP_REF;
2365 }
2366
2367 if (unaligned) {
2368 decorators |= C2_UNALIGNED;
2369 }
2370
2371 #ifndef PRODUCT
2372 {
2373 ResourceMark rm;
2374 // Check the signatures.
2375 ciSignature* sig = callee()->signature();
2376 #ifdef ASSERT
2377 if (!is_store) {
2378 // Object getReference(Object base, int/long offset), etc.
2379 BasicType rtype = sig->return_type()->basic_type();
2380 assert(rtype == type, "getter must return the expected value");
2381 assert(sig->count() == 2 || (is_flat && sig->count() == 3), "oop getter has 2 or 3 arguments");
2382 assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2383 assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2384 } else {
2385 // void putReference(Object base, int/long offset, Object x), etc.
2386 assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
2387 assert(sig->count() == 3 || (is_flat && sig->count() == 4), "oop putter has 3 arguments");
2388 assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
2389 assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
2390 BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
2391 assert(vtype == type, "putter must accept the expected value");
2392 }
2393 #endif // ASSERT
2394 }
2395 #endif //PRODUCT
2396
2397 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
2398
2399 Node* receiver = argument(0); // type: oop
2400
2401 // Build address expression.
2402 Node* heap_base_oop = top();
2403
2404 // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2405 Node* base = argument(1); // type: oop
2406 // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2407 Node* offset = argument(2); // type: long
2408 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2409 // to be plain byte offsets, which are also the same as those accepted
2410 // by oopDesc::field_addr.
2411 assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2412 "fieldOffset must be byte-scaled");
2413
2414 ciInlineKlass* inline_klass = nullptr;
2415 if (is_flat) {
2416 const TypeInstPtr* cls = _gvn.type(argument(4))->isa_instptr();
2417 if (cls == nullptr || cls->const_oop() == nullptr) {
2418 return false;
2419 }
2420 ciType* mirror_type = cls->const_oop()->as_instance()->java_mirror_type();
2421 if (!mirror_type->is_inlinetype()) {
2422 return false;
2423 }
2424 inline_klass = mirror_type->as_inline_klass();
2425 }
2426
2427 if (base->is_InlineType()) {
2428 InlineTypeNode* vt = base->as_InlineType();
2429 if (is_store) {
2430 if (!vt->is_allocated(&_gvn)) {
2431 return false;
2432 }
2433 base = vt->get_oop();
2434 } else {
2435 if (offset->is_Con()) {
2436 long off = find_long_con(offset, 0);
2437 ciInlineKlass* vk = vt->type()->inline_klass();
2438 if ((long)(int)off != off || !vk->contains_field_offset(off)) {
2439 return false;
2440 }
2441
2442 ciField* field = vk->get_non_flat_field_by_offset(off);
2443 if (field != nullptr) {
2444 BasicType bt = type2field[field->type()->basic_type()];
2445 if (bt == T_ARRAY || bt == T_NARROWOOP) {
2446 bt = T_OBJECT;
2447 }
2448 if (bt == type && (!field->is_flat() || field->type() == inline_klass)) {
2449 Node* value = vt->field_value_by_offset(off, false);
2450 if (value->is_InlineType()) {
2451 value = value->as_InlineType()->adjust_scalarization_depth(this);
2452 }
2453 set_result(value);
2454 return true;
2455 }
2456 }
2457 }
2458 {
2459 // Re-execute the unsafe access if allocation triggers deoptimization.
2460 PreserveReexecuteState preexecs(this);
2461 jvms()->set_should_reexecute(true);
2462 vt = vt->buffer(this);
2463 }
2464 base = vt->get_oop();
2465 }
2466 }
2467
2468 // 32-bit machines ignore the high half!
2469 offset = ConvL2X(offset);
2470
2471 // Save state and restore on bailout
2472 uint old_sp = sp();
2473 SafePointNode* old_map = clone_map();
2474
2475 Node* adr = make_unsafe_address(base, offset, type, kind == Relaxed);
2476 assert(!stopped(), "Inlining of unsafe access failed: address construction stopped unexpectedly");
2477
2478 if (_gvn.type(base->uncast())->isa_ptr() == TypePtr::NULL_PTR) {
2479 if (type != T_OBJECT && (inline_klass == nullptr || !inline_klass->has_object_fields())) {
2480 decorators |= IN_NATIVE; // off-heap primitive access
2481 } else {
2482 set_map(old_map);
2483 set_sp(old_sp);
2484 return false; // off-heap oop accesses are not supported
2485 }
2486 } else {
2487 heap_base_oop = base; // on-heap or mixed access
2488 }
2489
2490 // Can base be null? Otherwise, always on-heap access.
2491 bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(base));
2492
2493 if (!can_access_non_heap) {
2494 decorators |= IN_HEAP;
2495 }
2496
2497 Node* val = is_store ? argument(4 + (is_flat ? 1 : 0)) : nullptr;
2498
2499 const TypePtr* adr_type = _gvn.type(adr)->isa_ptr();
2500 if (adr_type == TypePtr::NULL_PTR) {
2501 set_map(old_map);
2502 set_sp(old_sp);
2503 return false; // off-heap access with zero address
2504 }
2505
2506 // Try to categorize the address.
2507 Compile::AliasType* alias_type = C->alias_type(adr_type);
2508 assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2509
2510 if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2511 alias_type->adr_type() == TypeAryPtr::RANGE) {
2512 set_map(old_map);
2513 set_sp(old_sp);
2514 return false; // not supported
2515 }
2516
2517 bool mismatched = false;
2518 BasicType bt = T_ILLEGAL;
2519 ciField* field = nullptr;
2520 if (adr_type->isa_instptr()) {
2521 const TypeInstPtr* instptr = adr_type->is_instptr();
2522 ciInstanceKlass* k = instptr->instance_klass();
2523 int off = instptr->offset();
2524 if (instptr->const_oop() != nullptr &&
2525 k == ciEnv::current()->Class_klass() &&
2526 instptr->offset() >= (k->size_helper() * wordSize)) {
2527 k = instptr->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
2528 field = k->get_field_by_offset(off, true);
2529 } else {
2530 field = k->get_non_flat_field_by_offset(off);
2531 }
2532 if (field != nullptr) {
2533 bt = type2field[field->type()->basic_type()];
2534 }
2535 if (bt != alias_type->basic_type()) {
2536 // Type mismatch. Is it an access to a nested flat field?
2537 field = k->get_field_by_offset(off, false);
2538 if (field != nullptr) {
2539 bt = type2field[field->type()->basic_type()];
2540 }
2541 }
2542 assert(bt == alias_type->basic_type() || is_flat, "should match");
2543 } else {
2544 bt = alias_type->basic_type();
2545 }
2546
2547 if (bt != T_ILLEGAL) {
2548 assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
2549 if (bt == T_BYTE && adr_type->isa_aryptr()) {
2550 // Alias type doesn't differentiate between byte[] and boolean[]).
2551 // Use address type to get the element type.
2552 bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2553 }
2554 if (is_reference_type(bt, true)) {
2555 // accessing an array field with getReference is not a mismatch
2556 bt = T_OBJECT;
2557 }
2558 if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2559 // Don't intrinsify mismatched object accesses
2560 set_map(old_map);
2561 set_sp(old_sp);
2562 return false;
2563 }
2564 mismatched = (bt != type);
2565 } else if (alias_type->adr_type()->isa_oopptr()) {
2566 mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
2567 }
2568
2569 if (is_flat) {
2570 if (adr_type->isa_instptr()) {
2571 if (field == nullptr || field->type() != inline_klass) {
2572 mismatched = true;
2573 }
2574 } else if (adr_type->isa_aryptr()) {
2575 const Type* elem = adr_type->is_aryptr()->elem();
2576 if (!adr_type->is_flat() || elem->inline_klass() != inline_klass) {
2577 mismatched = true;
2578 }
2579 } else {
2580 mismatched = true;
2581 }
2582 if (is_store) {
2583 const Type* val_t = _gvn.type(val);
2584 if (!val_t->is_inlinetypeptr() || val_t->inline_klass() != inline_klass) {
2585 set_map(old_map);
2586 set_sp(old_sp);
2587 return false;
2588 }
2589 }
2590 }
2591
2592 destruct_map_clone(old_map);
2593 assert(!mismatched || is_flat || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2594
2595 if (mismatched) {
2596 decorators |= C2_MISMATCHED;
2597 }
2598
2599 // First guess at the value type.
2600 const Type *value_type = Type::get_const_basic_type(type);
2601
2602 // Figure out the memory ordering.
2603 decorators |= mo_decorator_for_access_kind(kind);
2604
2605 if (!is_store) {
2606 if (type == T_OBJECT && !is_flat) {
2607 const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2608 if (tjp != nullptr) {
2609 value_type = tjp;
2610 }
2611 }
2612 }
2613
2614 receiver = null_check(receiver);
2615 if (stopped()) {
2616 return true;
2617 }
2618 // Heap pointers get a null-check from the interpreter,
2619 // as a courtesy. However, this is not guaranteed by Unsafe,
2620 // and it is not possible to fully distinguish unintended nulls
2621 // from intended ones in this API.
2622
2623 if (!is_store) {
2624 Node* p = nullptr;
2625 // Try to constant fold a load from a constant field
2626
2627 if (heap_base_oop != top() && field != nullptr && field->is_constant() && !field->is_flat() && !mismatched) {
2628 // final or stable field
2629 p = make_constant_from_field(field, heap_base_oop);
2630 }
2631
2632 if (p == nullptr) { // Could not constant fold the load
2633 if (is_flat) {
2634 if (adr_type->isa_instptr() && !mismatched) {
2635 ciInstanceKlass* holder = adr_type->is_instptr()->instance_klass();
2636 int offset = adr_type->is_instptr()->offset();
2637 p = InlineTypeNode::make_from_flat(this, inline_klass, base, base, nullptr, holder, offset, false, -1, decorators);
2638 } else {
2639 p = InlineTypeNode::make_from_flat(this, inline_klass, base, adr, nullptr, nullptr, 0, false, -1, decorators);
2640 }
2641 } else {
2642 p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);
2643 const TypeOopPtr* ptr = value_type->make_oopptr();
2644 if (ptr != nullptr && ptr->is_inlinetypeptr()) {
2645 // Load a non-flattened inline type from memory
2646 p = InlineTypeNode::make_from_oop(this, p, ptr->inline_klass());
2647 }
2648 }
2649 // Normalize the value returned by getBoolean in the following cases
2650 if (type == T_BOOLEAN &&
2651 (mismatched ||
2652 heap_base_oop == top() || // - heap_base_oop is null or
2653 (can_access_non_heap && field == nullptr)) // - heap_base_oop is potentially null
2654 // and the unsafe access is made to large offset
2655 // (i.e., larger than the maximum offset necessary for any
2656 // field access)
2657 ) {
2658 IdealKit ideal = IdealKit(this);
2659 #define __ ideal.
2660 IdealVariable normalized_result(ideal);
2661 __ declarations_done();
2662 __ set(normalized_result, p);
2663 __ if_then(p, BoolTest::ne, ideal.ConI(0));
2664 __ set(normalized_result, ideal.ConI(1));
2665 ideal.end_if();
2666 final_sync(ideal);
2667 p = __ value(normalized_result);
2668 #undef __
2669 }
2670 }
2671 if (type == T_ADDRESS) {
2672 p = gvn().transform(new CastP2XNode(nullptr, p));
2673 p = ConvX2UL(p);
2674 }
2675 // The load node has the control of the preceding MemBarCPUOrder. All
2676 // following nodes will have the control of the MemBarCPUOrder inserted at
2677 // the end of this method. So, pushing the load onto the stack at a later
2678 // point is fine.
2679 set_result(p);
2680 } else {
2681 if (bt == T_ADDRESS) {
2682 // Repackage the long as a pointer.
2683 val = ConvL2X(val);
2684 val = gvn().transform(new CastX2PNode(val));
2685 }
2686 if (is_flat) {
2687 if (adr_type->isa_instptr() && !mismatched) {
2688 ciInstanceKlass* holder = adr_type->is_instptr()->instance_klass();
2689 int offset = adr_type->is_instptr()->offset();
2690 val->as_InlineType()->store_flat(this, base, base, nullptr, holder, offset, false, -1, decorators);
2691 } else {
2692 val->as_InlineType()->store_flat(this, base, adr, nullptr, nullptr, 0, false, -1, decorators);
2693 }
2694 } else {
2695 access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators);
2696 }
2697 }
2698
2699 if (argument(1)->is_InlineType() && is_store) {
2700 InlineTypeNode* value = InlineTypeNode::make_from_oop(this, base, _gvn.type(argument(1))->inline_klass());
2701 value = value->make_larval(this, false);
2702 replace_in_map(argument(1), value);
2703 }
2704
2705 return true;
2706 }
2707
2708 bool LibraryCallKit::inline_unsafe_make_private_buffer() {
2709 Node* receiver = argument(0);
2710 Node* value = argument(1);
2711
2712 const Type* type = gvn().type(value);
2713 if (!type->is_inlinetypeptr()) {
2714 C->record_method_not_compilable("value passed to Unsafe::makePrivateBuffer is not of a constant value type");
2715 return false;
2716 }
2717
2718 null_check(receiver);
2719 if (stopped()) {
2720 return true;
2721 }
2722
2723 value = null_check(value);
2724 if (stopped()) {
2725 return true;
2726 }
2727
2728 ciInlineKlass* vk = type->inline_klass();
2729 Node* klass = makecon(TypeKlassPtr::make(vk));
2730 Node* obj = new_instance(klass);
2731 AllocateNode::Ideal_allocation(obj)->_larval = true;
2732
2733 assert(value->is_InlineType(), "must be an InlineTypeNode");
2734 value->as_InlineType()->store(this, obj, obj, vk);
2735
2736 set_result(obj);
2737 return true;
2738 }
2739
2740 bool LibraryCallKit::inline_unsafe_finish_private_buffer() {
2741 Node* receiver = argument(0);
2742 Node* buffer = argument(1);
2743
2744 const Type* type = gvn().type(buffer);
2745 if (!type->is_inlinetypeptr()) {
2746 C->record_method_not_compilable("value passed to Unsafe::finishPrivateBuffer is not of a constant value type");
2747 return false;
2748 }
2749
2750 AllocateNode* alloc = AllocateNode::Ideal_allocation(buffer);
2751 if (alloc == nullptr) {
2752 C->record_method_not_compilable("value passed to Unsafe::finishPrivateBuffer must be allocated by Unsafe::makePrivateBuffer");
2753 return false;
2754 }
2755
2756 null_check(receiver);
2757 if (stopped()) {
2758 return true;
2759 }
2760
2761 // Unset the larval bit in the object header
2762 Node* old_header = make_load(control(), buffer, TypeX_X, TypeX_X->basic_type(), MemNode::unordered, LoadNode::Pinned);
2763 Node* new_header = gvn().transform(new AndXNode(old_header, MakeConX(~markWord::larval_bit_in_place)));
2764 access_store_at(buffer, buffer, type->is_ptr(), new_header, TypeX_X, TypeX_X->basic_type(), MO_UNORDERED | IN_HEAP);
2765
2766 // We must ensure that the buffer is properly published
2767 insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress));
2768 assert(!type->maybe_null(), "result of an allocation should not be null");
2769 set_result(InlineTypeNode::make_from_oop(this, buffer, type->inline_klass(), false));
2770 return true;
2771 }
2772
2773 //----------------------------inline_unsafe_load_store----------------------------
2774 // This method serves a couple of different customers (depending on LoadStoreKind):
2775 //
2776 // LS_cmp_swap:
2777 //
2778 // boolean compareAndSetReference(Object o, long offset, Object expected, Object x);
2779 // boolean compareAndSetInt( Object o, long offset, int expected, int x);
2780 // boolean compareAndSetLong( Object o, long offset, long expected, long x);
2781 //
2782 // LS_cmp_swap_weak:
2783 //
2784 // boolean weakCompareAndSetReference( Object o, long offset, Object expected, Object x);
2785 // boolean weakCompareAndSetReferencePlain( Object o, long offset, Object expected, Object x);
2786 // boolean weakCompareAndSetReferenceAcquire(Object o, long offset, Object expected, Object x);
2787 // boolean weakCompareAndSetReferenceRelease(Object o, long offset, Object expected, Object x);
2788 //
2789 // boolean weakCompareAndSetInt( Object o, long offset, int expected, int x);
2958 }
2959 case LS_cmp_swap:
2960 case LS_cmp_swap_weak:
2961 case LS_get_add:
2962 break;
2963 default:
2964 ShouldNotReachHere();
2965 }
2966
2967 // Null check receiver.
2968 receiver = null_check(receiver);
2969 if (stopped()) {
2970 return true;
2971 }
2972
2973 int alias_idx = C->get_alias_index(adr_type);
2974
2975 if (is_reference_type(type)) {
2976 decorators |= IN_HEAP | ON_UNKNOWN_OOP_REF;
2977
2978 if (oldval != nullptr && oldval->is_InlineType()) {
2979 // Re-execute the unsafe access if allocation triggers deoptimization.
2980 PreserveReexecuteState preexecs(this);
2981 jvms()->set_should_reexecute(true);
2982 oldval = oldval->as_InlineType()->buffer(this)->get_oop();
2983 }
2984 if (newval != nullptr && newval->is_InlineType()) {
2985 // Re-execute the unsafe access if allocation triggers deoptimization.
2986 PreserveReexecuteState preexecs(this);
2987 jvms()->set_should_reexecute(true);
2988 newval = newval->as_InlineType()->buffer(this)->get_oop();
2989 }
2990
2991 // Transformation of a value which could be null pointer (CastPP #null)
2992 // could be delayed during Parse (for example, in adjust_map_after_if()).
2993 // Execute transformation here to avoid barrier generation in such case.
2994 if (_gvn.type(newval) == TypePtr::NULL_PTR)
2995 newval = _gvn.makecon(TypePtr::NULL_PTR);
2996
2997 if (oldval != nullptr && _gvn.type(oldval) == TypePtr::NULL_PTR) {
2998 // Refine the value to a null constant, when it is known to be null
2999 oldval = _gvn.makecon(TypePtr::NULL_PTR);
3000 }
3001 }
3002
3003 Node* result = nullptr;
3004 switch (kind) {
3005 case LS_cmp_exchange: {
3006 result = access_atomic_cmpxchg_val_at(base, adr, adr_type, alias_idx,
3007 oldval, newval, value_type, type, decorators);
3008 break;
3009 }
3010 case LS_cmp_swap_weak:
3157 Deoptimization::Action_make_not_entrant);
3158 }
3159 if (stopped()) {
3160 return true;
3161 }
3162 #endif //INCLUDE_JVMTI
3163
3164 Node* test = nullptr;
3165 if (LibraryCallKit::klass_needs_init_guard(kls)) {
3166 // Note: The argument might still be an illegal value like
3167 // Serializable.class or Object[].class. The runtime will handle it.
3168 // But we must make an explicit check for initialization.
3169 Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset()));
3170 // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
3171 // can generate code to load it as unsigned byte.
3172 Node* inst = make_load(nullptr, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::acquire);
3173 Node* bits = intcon(InstanceKlass::fully_initialized);
3174 test = _gvn.transform(new SubINode(inst, bits));
3175 // The 'test' is non-zero if we need to take a slow path.
3176 }
3177 Node* obj = nullptr;
3178 const TypeInstKlassPtr* tkls = _gvn.type(kls)->isa_instklassptr();
3179 if (tkls != nullptr && tkls->instance_klass()->is_inlinetype()) {
3180 obj = InlineTypeNode::make_all_zero(_gvn, tkls->instance_klass()->as_inline_klass())->buffer(this);
3181 } else {
3182 obj = new_instance(kls, test);
3183 }
3184 set_result(obj);
3185 return true;
3186 }
3187
3188 //------------------------inline_native_time_funcs--------------
3189 // inline code for System.currentTimeMillis() and System.nanoTime()
3190 // these have the same type and signature
3191 bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
3192 const TypeFunc* tf = OptoRuntime::void_long_Type();
3193 const TypePtr* no_memory_effects = nullptr;
3194 Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
3195 Node* value = _gvn.transform(new ProjNode(time, TypeFunc::Parms+0));
3196 #ifdef ASSERT
3197 Node* value_top = _gvn.transform(new ProjNode(time, TypeFunc::Parms+1));
3198 assert(value_top == top(), "second value must be top");
3199 #endif
3200 set_result(value);
3201 return true;
3202 }
3203
3944 Node* thread = _gvn.transform(new ThreadLocalNode());
3945 Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::vthread_offset()));
3946 Node* thread_obj_handle
3947 = make_load(nullptr, p, p->bottom_type()->is_ptr(), T_OBJECT, MemNode::unordered);
3948 thread_obj_handle = _gvn.transform(thread_obj_handle);
3949 const TypePtr *adr_type = _gvn.type(thread_obj_handle)->isa_ptr();
3950 access_store_at(nullptr, thread_obj_handle, adr_type, arr, _gvn.type(arr), T_OBJECT, IN_NATIVE | MO_UNORDERED);
3951
3952 // Change the _monitor_owner_id of the JavaThread
3953 Node* tid = load_field_from_object(arr, "tid", "J");
3954 Node* monitor_owner_id_offset = basic_plus_adr(thread, in_bytes(JavaThread::monitor_owner_id_offset()));
3955 store_to_memory(control(), monitor_owner_id_offset, tid, T_LONG, MemNode::unordered, true);
3956
3957 JFR_ONLY(extend_setCurrentThread(thread, arr);)
3958 return true;
3959 }
3960
3961 const Type* LibraryCallKit::scopedValueCache_type() {
3962 ciKlass* objects_klass = ciObjArrayKlass::make(env()->Object_klass());
3963 const TypeOopPtr* etype = TypeOopPtr::make_from_klass(env()->Object_klass());
3964 const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS, /* stable= */ false, /* flat= */ false, /* not_flat= */ true, /* not_null_free= */ true);
3965
3966 // Because we create the scopedValue cache lazily we have to make the
3967 // type of the result BotPTR.
3968 bool xk = etype->klass_is_exact();
3969 const Type* objects_type = TypeAryPtr::make(TypePtr::BotPTR, arr0, objects_klass, xk, TypeAryPtr::Offset(0));
3970 return objects_type;
3971 }
3972
3973 Node* LibraryCallKit::scopedValueCache_helper() {
3974 Node* thread = _gvn.transform(new ThreadLocalNode());
3975 Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::scopedValueCache_offset()));
3976 // We cannot use immutable_memory() because we might flip onto a
3977 // different carrier thread, at which point we'll need to use that
3978 // carrier thread's cache.
3979 // return _gvn.transform(LoadNode::make(_gvn, nullptr, immutable_memory(), p, p->bottom_type()->is_ptr(),
3980 // TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered));
3981 return make_load(nullptr, p, p->bottom_type()->is_ptr(), T_ADDRESS, MemNode::unordered);
3982 }
3983
3984 //------------------------inline_native_scopedValueCache------------------
3985 bool LibraryCallKit::inline_native_scopedValueCache() {
3986 Node* cache_obj_handle = scopedValueCache_helper();
3987 const Type* objects_type = scopedValueCache_type();
3988 set_result(access_load(cache_obj_handle, objects_type, T_OBJECT, IN_NATIVE));
3989
4073 store_to_memory(control(), pin_count_offset, next_pin_count, T_INT, MemNode::unordered);
4074
4075 // Result of top level CFG and Memory.
4076 RegionNode* result_rgn = new RegionNode(PATH_LIMIT);
4077 record_for_igvn(result_rgn);
4078 PhiNode* result_mem = new PhiNode(result_rgn, Type::MEMORY, TypePtr::BOTTOM);
4079 record_for_igvn(result_mem);
4080
4081 result_rgn->init_req(_true_path, _gvn.transform(valid_pin_count));
4082 result_rgn->init_req(_false_path, _gvn.transform(continuation_is_null));
4083 result_mem->init_req(_true_path, _gvn.transform(reset_memory()));
4084 result_mem->init_req(_false_path, _gvn.transform(input_memory_state));
4085
4086 // Set output state.
4087 set_control(_gvn.transform(result_rgn));
4088 set_all_memory(_gvn.transform(result_mem));
4089
4090 return true;
4091 }
4092
4093 //-----------------------load_klass_from_mirror_common-------------------------
4094 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
4095 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
4096 // and branch to the given path on the region.
4097 // If never_see_null, take an uncommon trap on null, so we can optimistically
4098 // compile for the non-null case.
4099 // If the region is null, force never_see_null = true.
4100 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
4101 bool never_see_null,
4102 RegionNode* region,
4103 int null_path,
4104 int offset) {
4105 if (region == nullptr) never_see_null = true;
4106 Node* p = basic_plus_adr(mirror, offset);
4107 const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
4108 Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
4109 Node* null_ctl = top();
4110 kls = null_check_oop(kls, &null_ctl, never_see_null);
4111 if (region != nullptr) {
4112 // Set region->in(null_path) if the mirror is a primitive (e.g, int.class).
4116 }
4117 return kls;
4118 }
4119
4120 //--------------------(inline_native_Class_query helpers)---------------------
4121 // Use this for JVM_ACC_INTERFACE.
4122 // Fall through if (mods & mask) == bits, take the guard otherwise.
4123 Node* LibraryCallKit::generate_klass_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region,
4124 ByteSize offset, const Type* type, BasicType bt) {
4125 // Branch around if the given klass has the given modifier bit set.
4126 // Like generate_guard, adds a new path onto the region.
4127 Node* modp = basic_plus_adr(kls, in_bytes(offset));
4128 Node* mods = make_load(nullptr, modp, type, bt, MemNode::unordered);
4129 Node* mask = intcon(modifier_mask);
4130 Node* bits = intcon(modifier_bits);
4131 Node* mbit = _gvn.transform(new AndINode(mods, mask));
4132 Node* cmp = _gvn.transform(new CmpINode(mbit, bits));
4133 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
4134 return generate_fair_guard(bol, region);
4135 }
4136
4137 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
4138 return generate_klass_flags_guard(kls, JVM_ACC_INTERFACE, 0, region,
4139 Klass::access_flags_offset(), TypeInt::CHAR, T_CHAR);
4140 }
4141
4142 // Use this for testing if Klass is_hidden, has_finalizer, and is_cloneable_fast.
4143 Node* LibraryCallKit::generate_misc_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
4144 return generate_klass_flags_guard(kls, modifier_mask, modifier_bits, region,
4145 Klass::misc_flags_offset(), TypeInt::UBYTE, T_BOOLEAN);
4146 }
4147
4148 Node* LibraryCallKit::generate_hidden_class_guard(Node* kls, RegionNode* region) {
4149 return generate_misc_flags_guard(kls, KlassFlags::_misc_is_hidden_class, 0, region);
4150 }
4151
4152 //-------------------------inline_native_Class_query-------------------
4153 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
4154 const Type* return_type = TypeInt::BOOL;
4155 Node* prim_return_value = top(); // what happens if it's a primitive class?
4156 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
4275
4276 case vmIntrinsics::_getClassAccessFlags:
4277 p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
4278 query_value = make_load(nullptr, p, TypeInt::CHAR, T_CHAR, MemNode::unordered);
4279 break;
4280
4281 default:
4282 fatal_unexpected_iid(id);
4283 break;
4284 }
4285
4286 // Fall-through is the normal case of a query to a real class.
4287 phi->init_req(1, query_value);
4288 region->init_req(1, control());
4289
4290 C->set_has_split_ifs(true); // Has chance for split-if optimization
4291 set_result(region, phi);
4292 return true;
4293 }
4294
4295
4296 //-------------------------inline_Class_cast-------------------
4297 bool LibraryCallKit::inline_Class_cast() {
4298 Node* mirror = argument(0); // Class
4299 Node* obj = argument(1);
4300 const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
4301 if (mirror_con == nullptr) {
4302 return false; // dead path (mirror->is_top()).
4303 }
4304 if (obj == nullptr || obj->is_top()) {
4305 return false; // dead path
4306 }
4307 const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr();
4308
4309 // First, see if Class.cast() can be folded statically.
4310 // java_mirror_type() returns non-null for compile-time Class constants.
4311 bool is_null_free_array = false;
4312 ciType* tm = mirror_con->java_mirror_type(&is_null_free_array);
4313 if (tm != nullptr && tm->is_klass() &&
4314 tp != nullptr) {
4315 if (!tp->is_loaded()) {
4316 // Don't use intrinsic when class is not loaded.
4317 return false;
4318 } else {
4319 const TypeKlassPtr* tklass = TypeKlassPtr::make(tm->as_klass(), Type::trust_interfaces);
4320 if (is_null_free_array) {
4321 tklass = tklass->is_aryklassptr()->cast_to_null_free();
4322 }
4323 int static_res = C->static_subtype_check(tklass, tp->as_klass_type());
4324 if (static_res == Compile::SSC_always_true) {
4325 // isInstance() is true - fold the code.
4326 set_result(obj);
4327 return true;
4328 } else if (static_res == Compile::SSC_always_false) {
4329 // Don't use intrinsic, have to throw ClassCastException.
4330 // If the reference is null, the non-intrinsic bytecode will
4331 // be optimized appropriately.
4332 return false;
4333 }
4334 }
4335 }
4336
4337 // Bailout intrinsic and do normal inlining if exception path is frequent.
4338 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
4339 return false;
4340 }
4341
4342 // Generate dynamic checks.
4343 // Class.cast() is java implementation of _checkcast bytecode.
4344 // Do checkcast (Parse::do_checkcast()) optimizations here.
4345
4346 mirror = null_check(mirror);
4347 // If mirror is dead, only null-path is taken.
4348 if (stopped()) {
4349 return true;
4350 }
4351
4352 // Not-subtype or the mirror's klass ptr is nullptr (in case it is a primitive).
4353 enum { _bad_type_path = 1, _prim_path = 2, _npe_path = 3, PATH_LIMIT };
4354 RegionNode* region = new RegionNode(PATH_LIMIT);
4355 record_for_igvn(region);
4356
4357 // Now load the mirror's klass metaobject, and null-check it.
4358 // If kls is null, we have a primitive mirror and
4359 // nothing is an instance of a primitive type.
4360 Node* kls = load_klass_from_mirror(mirror, false, region, _prim_path);
4361
4362 Node* res = top();
4363 Node* io = i_o();
4364 Node* mem = merged_memory();
4365 if (!stopped()) {
4366
4367 Node* bad_type_ctrl = top();
4368 // Do checkcast optimizations.
4369 res = gen_checkcast(obj, kls, &bad_type_ctrl);
4370 region->init_req(_bad_type_path, bad_type_ctrl);
4371 }
4372 if (region->in(_prim_path) != top() ||
4373 region->in(_bad_type_path) != top() ||
4374 region->in(_npe_path) != top()) {
4375 // Let Interpreter throw ClassCastException.
4376 PreserveJVMState pjvms(this);
4377 set_control(_gvn.transform(region));
4378 // Set IO and memory because gen_checkcast may override them when buffering inline types
4379 set_i_o(io);
4380 set_all_memory(mem);
4381 uncommon_trap(Deoptimization::Reason_intrinsic,
4382 Deoptimization::Action_maybe_recompile);
4383 }
4384 if (!stopped()) {
4385 set_result(res);
4386 }
4387 return true;
4388 }
4389
4390
4391 //--------------------------inline_native_subtype_check------------------------
4392 // This intrinsic takes the JNI calls out of the heart of
4393 // UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
4394 bool LibraryCallKit::inline_native_subtype_check() {
4395 // Pull both arguments off the stack.
4396 Node* args[2]; // two java.lang.Class mirrors: superc, subc
4397 args[0] = argument(0);
4398 args[1] = argument(1);
4399 Node* klasses[2]; // corresponding Klasses: superk, subk
4400 klasses[0] = klasses[1] = top();
4401
4402 enum {
4403 // A full decision tree on {superc is prim, subc is prim}:
4404 _prim_0_path = 1, // {P,N} => false
4405 // {P,P} & superc!=subc => false
4406 _prim_same_path, // {P,P} & superc==subc => true
4407 _prim_1_path, // {N,P} => false
4408 _ref_subtype_path, // {N,N} & subtype check wins => true
4409 _both_ref_path, // {N,N} & subtype check loses => false
4410 PATH_LIMIT
4411 };
4412
4413 RegionNode* region = new RegionNode(PATH_LIMIT);
4414 RegionNode* prim_region = new RegionNode(2);
4415 Node* phi = new PhiNode(region, TypeInt::BOOL);
4416 record_for_igvn(region);
4417 record_for_igvn(prim_region);
4418
4419 const TypePtr* adr_type = TypeRawPtr::BOTTOM; // memory type of loads
4420 const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
4421 int class_klass_offset = java_lang_Class::klass_offset();
4422
4423 // First null-check both mirrors and load each mirror's klass metaobject.
4424 int which_arg;
4425 for (which_arg = 0; which_arg <= 1; which_arg++) {
4426 Node* arg = args[which_arg];
4427 arg = null_check(arg);
4428 if (stopped()) break;
4429 args[which_arg] = arg;
4430
4431 Node* p = basic_plus_adr(arg, class_klass_offset);
4432 Node* kls = LoadKlassNode::make(_gvn, immutable_memory(), p, adr_type, kls_type);
4433 klasses[which_arg] = _gvn.transform(kls);
4434 }
4435
4436 // Having loaded both klasses, test each for null.
4437 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
4438 for (which_arg = 0; which_arg <= 1; which_arg++) {
4439 Node* kls = klasses[which_arg];
4440 Node* null_ctl = top();
4441 kls = null_check_oop(kls, &null_ctl, never_see_null);
4442 if (which_arg == 0) {
4443 prim_region->init_req(1, null_ctl);
4444 } else {
4445 region->init_req(_prim_1_path, null_ctl);
4446 }
4447 if (stopped()) break;
4448 klasses[which_arg] = kls;
4449 }
4450
4451 if (!stopped()) {
4452 // now we have two reference types, in klasses[0..1]
4453 Node* subk = klasses[1]; // the argument to isAssignableFrom
4454 Node* superk = klasses[0]; // the receiver
4455 region->set_req(_both_ref_path, gen_subtype_check(subk, superk));
4456 region->set_req(_ref_subtype_path, control());
4457 }
4458
4459 // If both operands are primitive (both klasses null), then
4460 // we must return true when they are identical primitives.
4461 // It is convenient to test this after the first null klass check.
4462 // This path is also used if superc is a value mirror.
4463 set_control(_gvn.transform(prim_region));
4464 if (!stopped()) {
4465 // Since superc is primitive, make a guard for the superc==subc case.
4466 Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1]));
4467 Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq));
4468 generate_fair_guard(bol_eq, region);
4469 if (region->req() == PATH_LIMIT+1) {
4470 // A guard was added. If the added guard is taken, superc==subc.
4471 region->swap_edges(PATH_LIMIT, _prim_same_path);
4472 region->del_req(PATH_LIMIT);
4473 }
4474 region->set_req(_prim_0_path, control()); // Not equal after all.
4475 }
4476
4477 // these are the only paths that produce 'true':
4478 phi->set_req(_prim_same_path, intcon(1));
4479 phi->set_req(_ref_subtype_path, intcon(1));
4480
4481 // pull together the cases:
4482 assert(region->req() == PATH_LIMIT, "sane region");
4483 for (uint i = 1; i < region->req(); i++) {
4484 Node* ctl = region->in(i);
4485 if (ctl == nullptr || ctl == top()) {
4486 region->set_req(i, top());
4487 phi ->set_req(i, top());
4488 } else if (phi->in(i) == nullptr) {
4489 phi->set_req(i, intcon(0)); // all other paths produce 'false'
4490 }
4491 }
4492
4493 set_control(_gvn.transform(region));
4494 set_result(_gvn.transform(phi));
4495 return true;
4496 }
4497
4498 //---------------------generate_array_guard_common------------------------
4499 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region, ArrayKind kind, Node** obj) {
4500
4501 if (stopped()) {
4502 return nullptr;
4503 }
4504
4505 // Like generate_guard, adds a new path onto the region.
4506 jint layout_con = 0;
4507 Node* layout_val = get_layout_helper(kls, layout_con);
4508 if (layout_val == nullptr) {
4509 bool query = 0;
4510 switch(kind) {
4511 case ObjectArray: query = Klass::layout_helper_is_objArray(layout_con); break;
4512 case NonObjectArray: query = !Klass::layout_helper_is_objArray(layout_con); break;
4513 case TypeArray: query = Klass::layout_helper_is_typeArray(layout_con); break;
4514 case AnyArray: query = Klass::layout_helper_is_array(layout_con); break;
4515 case NonArray: query = !Klass::layout_helper_is_array(layout_con); break;
4516 default:
4517 ShouldNotReachHere();
4518 }
4519 if (!query) {
4520 return nullptr; // never a branch
4521 } else { // always a branch
4522 Node* always_branch = control();
4523 if (region != nullptr)
4524 region->add_req(always_branch);
4525 set_control(top());
4526 return always_branch;
4527 }
4528 }
4529 unsigned int value = 0;
4530 BoolTest::mask btest = BoolTest::illegal;
4531 switch(kind) {
4532 case ObjectArray:
4533 case NonObjectArray: {
4534 value = Klass::_lh_array_tag_obj_value;
4535 layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
4536 btest = (kind == ObjectArray) ? BoolTest::eq : BoolTest::ne;
4537 break;
4538 }
4539 case TypeArray: {
4540 value = Klass::_lh_array_tag_type_value;
4541 layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
4542 btest = BoolTest::eq;
4543 break;
4544 }
4545 case AnyArray: value = Klass::_lh_neutral_value; btest = BoolTest::lt; break;
4546 case NonArray: value = Klass::_lh_neutral_value; btest = BoolTest::gt; break;
4547 default:
4548 ShouldNotReachHere();
4549 }
4550 // Now test the correct condition.
4551 jint nval = (jint)value;
4552 Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval)));
4553 Node* bol = _gvn.transform(new BoolNode(cmp, btest));
4554 Node* ctrl = generate_fair_guard(bol, region);
4555 Node* is_array_ctrl = kind == NonArray ? control() : ctrl;
4556 if (obj != nullptr && is_array_ctrl != nullptr && is_array_ctrl != top()) {
4557 // Keep track of the fact that 'obj' is an array to prevent
4558 // array specific accesses from floating above the guard.
4559 Node* cast = _gvn.transform(new CastPPNode(is_array_ctrl, *obj, TypeAryPtr::BOTTOM));
4560 // Check for top because in rare cases, the type system can determine that
4561 // the object can't be an array but the layout helper check is not folded.
4562 if (!cast->is_top()) {
4563 *obj = cast;
4564 }
4565 }
4566 return ctrl;
4567 }
4568
4569 // public static native Object[] newNullRestrictedAtomicArray(Class<?> componentType, int length, Object initVal);
4570 // public static native Object[] newNullRestrictedNonAtomicArray(Class<?> componentType, int length, Object initVal);
4571 // public static native Object[] newNullableAtomicArray(Class<?> componentType, int length);
4572 bool LibraryCallKit::inline_newArray(bool null_free, bool atomic) {
4573 assert(null_free || atomic, "nullable implies atomic");
4574 Node* componentType = argument(0);
4575 Node* length = argument(1);
4576 Node* init_val = null_free ? argument(2) : nullptr;
4577
4578 const TypeInstPtr* tp = _gvn.type(componentType)->isa_instptr();
4579 if (tp != nullptr) {
4580 ciInstanceKlass* ik = tp->instance_klass();
4581 if (ik == C->env()->Class_klass()) {
4582 ciType* t = tp->java_mirror_type();
4583 if (t != nullptr && t->is_inlinetype()) {
4584 ciInlineKlass* vk = t->as_inline_klass();
4585 bool flat = vk->flat_in_array();
4586 if (flat && atomic) {
4587 // Only flat if we have a corresponding atomic layout
4588 flat = null_free ? vk->has_atomic_layout() : vk->has_nullable_atomic_layout();
4589 }
4590 // TODO 8350865 refactor
4591 if (flat && !atomic) {
4592 flat = vk->has_non_atomic_layout();
4593 }
4594
4595 // TOOD 8350865 ZGC needs card marks on initializing oop stores
4596 if (UseZGC && null_free && !flat) {
4597 return false;
4598 }
4599
4600 ciArrayKlass* array_klass = ciArrayKlass::make(t, flat, null_free, atomic);
4601 if (array_klass->is_loaded() && array_klass->element_klass()->as_inline_klass()->is_initialized()) {
4602 const TypeAryKlassPtr* array_klass_type = TypeKlassPtr::make(array_klass, Type::trust_interfaces)->is_aryklassptr();
4603 if (null_free) {
4604 if (init_val->is_InlineType()) {
4605 if (array_klass_type->is_flat() && init_val->as_InlineType()->is_all_zero(&gvn(), /* flat */ true)) {
4606 // Zeroing is enough because the init value is the all-zero value
4607 init_val = nullptr;
4608 } else {
4609 init_val = init_val->as_InlineType()->buffer(this);
4610 }
4611 }
4612 // TODO 8350865 Should we add a check of the init_val type (maybe in debug only + halt)?
4613 }
4614 Node* obj = new_array(makecon(array_klass_type), length, 0, nullptr, false, init_val);
4615 const TypeAryPtr* arytype = gvn().type(obj)->is_aryptr();
4616 assert(arytype->is_null_free() == null_free, "inconsistency");
4617 assert(arytype->is_not_null_free() == !null_free, "inconsistency");
4618 assert(arytype->is_flat() == flat, "inconsistency");
4619 assert(arytype->is_aryptr()->is_not_flat() == !flat, "inconsistency");
4620 set_result(obj);
4621 return true;
4622 }
4623 }
4624 }
4625 }
4626 return false;
4627 }
4628
4629 //-----------------------inline_native_newArray--------------------------
4630 // private static native Object java.lang.reflect.Array.newArray(Class<?> componentType, int length);
4631 // private native Object Unsafe.allocateUninitializedArray0(Class<?> cls, int size);
4632 bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) {
4633 Node* mirror;
4634 Node* count_val;
4635 if (uninitialized) {
4636 null_check_receiver();
4637 mirror = argument(1);
4638 count_val = argument(2);
4639 } else {
4640 mirror = argument(0);
4641 count_val = argument(1);
4642 }
4643
4644 mirror = null_check(mirror);
4645 // If mirror or obj is dead, only null-path is taken.
4646 if (stopped()) return true;
4647
4648 enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
4649 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4650 PhiNode* result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
4756 // the bytecode that invokes Arrays.copyOf if deoptimization happens.
4757 { PreserveReexecuteState preexecs(this);
4758 jvms()->set_should_reexecute(true);
4759
4760 array_type_mirror = null_check(array_type_mirror);
4761 original = null_check(original);
4762
4763 // Check if a null path was taken unconditionally.
4764 if (stopped()) return true;
4765
4766 Node* orig_length = load_array_length(original);
4767
4768 Node* klass_node = load_klass_from_mirror(array_type_mirror, false, nullptr, 0);
4769 klass_node = null_check(klass_node);
4770
4771 RegionNode* bailout = new RegionNode(1);
4772 record_for_igvn(bailout);
4773
4774 // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
4775 // Bail out if that is so.
4776 // Inline type array may have object field that would require a
4777 // write barrier. Conservatively, go to slow path.
4778 // TODO 8251971: Optimize for the case when flat src/dst are later found
4779 // to not contain oops (i.e., move this check to the macro expansion phase).
4780 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
4781 const TypeAryPtr* orig_t = _gvn.type(original)->isa_aryptr();
4782 const TypeKlassPtr* tklass = _gvn.type(klass_node)->is_klassptr();
4783 bool exclude_flat = UseArrayFlattening && bs->array_copy_requires_gc_barriers(true, T_OBJECT, false, false, BarrierSetC2::Parsing) &&
4784 // Can src array be flat and contain oops?
4785 (orig_t == nullptr || (!orig_t->is_not_flat() && (!orig_t->is_flat() || orig_t->elem()->inline_klass()->contains_oops()))) &&
4786 // Can dest array be flat and contain oops?
4787 tklass->can_be_inline_array() && (!tklass->is_flat() || tklass->is_aryklassptr()->elem()->is_instklassptr()->instance_klass()->as_inline_klass()->contains_oops());
4788 Node* not_objArray = exclude_flat ? generate_non_objArray_guard(klass_node, bailout) : generate_typeArray_guard(klass_node, bailout);
4789 if (not_objArray != nullptr) {
4790 // Improve the klass node's type from the new optimistic assumption:
4791 ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
4792 const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, Type::Offset(0));
4793 Node* cast = new CastPPNode(control(), klass_node, akls);
4794 klass_node = _gvn.transform(cast);
4795 }
4796
4797 // Bail out if either start or end is negative.
4798 generate_negative_guard(start, bailout, &start);
4799 generate_negative_guard(end, bailout, &end);
4800
4801 Node* length = end;
4802 if (_gvn.type(start) != TypeInt::ZERO) {
4803 length = _gvn.transform(new SubINode(end, start));
4804 }
4805
4806 // Bail out if length is negative (i.e., if start > end).
4807 // Without this the new_array would throw
4808 // NegativeArraySizeException but IllegalArgumentException is what
4809 // should be thrown
4810 generate_negative_guard(length, bailout, &length);
4811
4812 // Handle inline type arrays
4813 bool can_validate = !too_many_traps(Deoptimization::Reason_class_check);
4814 if (!stopped()) {
4815 // TODO JDK-8329224
4816 if (!orig_t->is_null_free()) {
4817 // Not statically known to be null free, add a check
4818 generate_fair_guard(null_free_array_test(original), bailout);
4819 }
4820 orig_t = _gvn.type(original)->isa_aryptr();
4821 if (orig_t != nullptr && orig_t->is_flat()) {
4822 // Src is flat, check that dest is flat as well
4823 if (exclude_flat) {
4824 // Dest can't be flat, bail out
4825 bailout->add_req(control());
4826 set_control(top());
4827 } else {
4828 generate_fair_guard(flat_array_test(klass_node, /* flat = */ false), bailout);
4829 }
4830 // TODO 8350865 This is not correct anymore. Write tests and fix logic similar to arraycopy.
4831 } else if (UseArrayFlattening && (orig_t == nullptr || !orig_t->is_not_flat()) &&
4832 // If dest is flat, src must be flat as well (guaranteed by src <: dest check if validated).
4833 ((!tklass->is_flat() && tklass->can_be_inline_array()) || !can_validate)) {
4834 // Src might be flat and dest might not be flat. Go to the slow path if src is flat.
4835 // TODO 8251971: Optimize for the case when src/dest are later found to be both flat.
4836 generate_fair_guard(flat_array_test(load_object_klass(original)), bailout);
4837 if (orig_t != nullptr) {
4838 orig_t = orig_t->cast_to_not_flat();
4839 original = _gvn.transform(new CheckCastPPNode(control(), original, orig_t));
4840 }
4841 }
4842 if (!can_validate) {
4843 // No validation. The subtype check emitted at macro expansion time will not go to the slow
4844 // path but call checkcast_arraycopy which can not handle flat/null-free inline type arrays.
4845 // TODO 8251971: Optimize for the case when src/dest are later found to be both flat/null-free.
4846 generate_fair_guard(flat_array_test(klass_node), bailout);
4847 generate_fair_guard(null_free_array_test(original), bailout);
4848 }
4849 }
4850
4851 // Bail out if start is larger than the original length
4852 Node* orig_tail = _gvn.transform(new SubINode(orig_length, start));
4853 generate_negative_guard(orig_tail, bailout, &orig_tail);
4854
4855 if (bailout->req() > 1) {
4856 PreserveJVMState pjvms(this);
4857 set_control(_gvn.transform(bailout));
4858 uncommon_trap(Deoptimization::Reason_intrinsic,
4859 Deoptimization::Action_maybe_recompile);
4860 }
4861
4862 if (!stopped()) {
4863 // How many elements will we copy from the original?
4864 // The answer is MinI(orig_tail, length).
4865 Node* moved = _gvn.transform(new MinINode(orig_tail, length));
4866
4867 // Generate a direct call to the right arraycopy function(s).
4868 // We know the copy is disjoint but we might not know if the
4869 // oop stores need checking.
4870 // Extreme case: Arrays.copyOf((Integer[])x, 10, String[].class).
4876 // to the copyOf to be validated, including that the copy to the
4877 // new array won't trigger an ArrayStoreException. That subtype
4878 // check can be optimized if we know something on the type of
4879 // the input array from type speculation.
4880 if (_gvn.type(klass_node)->singleton()) {
4881 const TypeKlassPtr* subk = _gvn.type(load_object_klass(original))->is_klassptr();
4882 const TypeKlassPtr* superk = _gvn.type(klass_node)->is_klassptr();
4883
4884 int test = C->static_subtype_check(superk, subk);
4885 if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
4886 const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();
4887 if (t_original->speculative_type() != nullptr) {
4888 original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true);
4889 }
4890 }
4891 }
4892
4893 bool validated = false;
4894 // Reason_class_check rather than Reason_intrinsic because we
4895 // want to intrinsify even if this traps.
4896 if (can_validate) {
4897 Node* not_subtype_ctrl = gen_subtype_check(original, klass_node);
4898
4899 if (not_subtype_ctrl != top()) {
4900 PreserveJVMState pjvms(this);
4901 set_control(not_subtype_ctrl);
4902 uncommon_trap(Deoptimization::Reason_class_check,
4903 Deoptimization::Action_make_not_entrant);
4904 assert(stopped(), "Should be stopped");
4905 }
4906 validated = true;
4907 }
4908
4909 if (!stopped()) {
4910 newcopy = new_array(klass_node, length, 0); // no arguments to push
4911
4912 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true, true,
4913 load_object_klass(original), klass_node);
4914 if (!is_copyOfRange) {
4915 ac->set_copyof(validated);
4916 } else {
4962
4963 //-----------------------generate_method_call----------------------------
4964 // Use generate_method_call to make a slow-call to the real
4965 // method if the fast path fails. An alternative would be to
4966 // use a stub like OptoRuntime::slow_arraycopy_Java.
4967 // This only works for expanding the current library call,
4968 // not another intrinsic. (E.g., don't use this for making an
4969 // arraycopy call inside of the copyOf intrinsic.)
4970 CallJavaNode*
4971 LibraryCallKit::generate_method_call(vmIntrinsicID method_id, bool is_virtual, bool is_static, bool res_not_null) {
4972 // When compiling the intrinsic method itself, do not use this technique.
4973 guarantee(callee() != C->method(), "cannot make slow-call to self");
4974
4975 ciMethod* method = callee();
4976 // ensure the JVMS we have will be correct for this call
4977 guarantee(method_id == method->intrinsic_id(), "must match");
4978
4979 const TypeFunc* tf = TypeFunc::make(method);
4980 if (res_not_null) {
4981 assert(tf->return_type() == T_OBJECT, "");
4982 const TypeTuple* range = tf->range_cc();
4983 const Type** fields = TypeTuple::fields(range->cnt());
4984 fields[TypeFunc::Parms] = range->field_at(TypeFunc::Parms)->filter_speculative(TypePtr::NOTNULL);
4985 const TypeTuple* new_range = TypeTuple::make(range->cnt(), fields);
4986 tf = TypeFunc::make(tf->domain_cc(), new_range);
4987 }
4988 CallJavaNode* slow_call;
4989 if (is_static) {
4990 assert(!is_virtual, "");
4991 slow_call = new CallStaticJavaNode(C, tf,
4992 SharedRuntime::get_resolve_static_call_stub(), method);
4993 } else if (is_virtual) {
4994 assert(!gvn().type(argument(0))->maybe_null(), "should not be null");
4995 int vtable_index = Method::invalid_vtable_index;
4996 if (UseInlineCaches) {
4997 // Suppress the vtable call
4998 } else {
4999 // hashCode and clone are not a miranda methods,
5000 // so the vtable index is fixed.
5001 // No need to use the linkResolver to get it.
5002 vtable_index = method->vtable_index();
5003 assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
5004 "bad index %d", vtable_index);
5005 }
5006 slow_call = new CallDynamicJavaNode(tf,
5023 set_edges_for_java_call(slow_call);
5024 return slow_call;
5025 }
5026
5027
5028 /**
5029 * Build special case code for calls to hashCode on an object. This call may
5030 * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
5031 * slightly different code.
5032 */
5033 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
5034 assert(is_static == callee()->is_static(), "correct intrinsic selection");
5035 assert(!(is_virtual && is_static), "either virtual, special, or static");
5036
5037 enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
5038
5039 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
5040 PhiNode* result_val = new PhiNode(result_reg, TypeInt::INT);
5041 PhiNode* result_io = new PhiNode(result_reg, Type::ABIO);
5042 PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
5043 Node* obj = argument(0);
5044
5045 // Don't intrinsify hashcode on inline types for now.
5046 // The "is locked" runtime check below also serves as inline type check and goes to the slow path.
5047 if (gvn().type(obj)->is_inlinetypeptr()) {
5048 return false;
5049 }
5050
5051 if (!is_static) {
5052 // Check for hashing null object
5053 obj = null_check_receiver();
5054 if (stopped()) return true; // unconditionally null
5055 result_reg->init_req(_null_path, top());
5056 result_val->init_req(_null_path, top());
5057 } else {
5058 // Do a null check, and return zero if null.
5059 // System.identityHashCode(null) == 0
5060 Node* null_ctl = top();
5061 obj = null_check_oop(obj, &null_ctl);
5062 result_reg->init_req(_null_path, null_ctl);
5063 result_val->init_req(_null_path, _gvn.intcon(0));
5064 }
5065
5066 // Unconditionally null? Then return right away.
5067 if (stopped()) {
5068 set_control( result_reg->in(_null_path));
5069 if (!stopped())
5070 set_result(result_val->in(_null_path));
5071 return true;
5072 }
5073
5074 // We only go to the fast case code if we pass a number of guards. The
5075 // paths which do not pass are accumulated in the slow_region.
5076 RegionNode* slow_region = new RegionNode(1);
5077 record_for_igvn(slow_region);
5078
5079 // If this is a virtual call, we generate a funny guard. We pull out
5080 // the vtable entry corresponding to hashCode() from the target object.
5081 // If the target method which we are calling happens to be the native
5082 // Object hashCode() method, we pass the guard. We do not need this
5083 // guard for non-virtual calls -- the caller is known to be the native
5084 // Object hashCode().
5085 if (is_virtual) {
5086 // After null check, get the object's klass.
5087 Node* obj_klass = load_object_klass(obj);
5088 generate_virtual_guard(obj_klass, slow_region);
5089 }
5090
5091 // Get the header out of the object, use LoadMarkNode when available
5092 Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
5093 // The control of the load must be null. Otherwise, the load can move before
5094 // the null check after castPP removal.
5095 Node* no_ctrl = nullptr;
5096 Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
5097
5098 if (!UseObjectMonitorTable) {
5099 // Test the header to see if it is safe to read w.r.t. locking.
5100 // This also serves as guard against inline types
5101 Node *lock_mask = _gvn.MakeConX(markWord::inline_type_mask_in_place);
5102 Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
5103 if (LockingMode == LM_LIGHTWEIGHT) {
5104 Node *monitor_val = _gvn.MakeConX(markWord::monitor_value);
5105 Node *chk_monitor = _gvn.transform(new CmpXNode(lmasked_header, monitor_val));
5106 Node *test_monitor = _gvn.transform(new BoolNode(chk_monitor, BoolTest::eq));
5107
5108 generate_slow_guard(test_monitor, slow_region);
5109 } else {
5110 Node *unlocked_val = _gvn.MakeConX(markWord::unlocked_value);
5111 Node *chk_unlocked = _gvn.transform(new CmpXNode(lmasked_header, unlocked_val));
5112 Node *test_not_unlocked = _gvn.transform(new BoolNode(chk_unlocked, BoolTest::ne));
5113
5114 generate_slow_guard(test_not_unlocked, slow_region);
5115 }
5116 }
5117
5118 // Get the hash value and check to see that it has been properly assigned.
5119 // We depend on hash_mask being at most 32 bits and avoid the use of
5120 // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
5121 // vm: see markWord.hpp.
5156 // this->control() comes from set_results_for_java_call
5157 result_reg->init_req(_slow_path, control());
5158 result_val->init_req(_slow_path, slow_result);
5159 result_io ->set_req(_slow_path, i_o());
5160 result_mem ->set_req(_slow_path, reset_memory());
5161 }
5162
5163 // Return the combined state.
5164 set_i_o( _gvn.transform(result_io) );
5165 set_all_memory( _gvn.transform(result_mem));
5166
5167 set_result(result_reg, result_val);
5168 return true;
5169 }
5170
5171 //---------------------------inline_native_getClass----------------------------
5172 // public final native Class<?> java.lang.Object.getClass();
5173 //
5174 // Build special case code for calls to getClass on an object.
5175 bool LibraryCallKit::inline_native_getClass() {
5176 Node* obj = argument(0);
5177 if (obj->is_InlineType()) {
5178 const Type* t = _gvn.type(obj);
5179 if (t->maybe_null()) {
5180 null_check(obj);
5181 }
5182 set_result(makecon(TypeInstPtr::make(t->inline_klass()->java_mirror())));
5183 return true;
5184 }
5185 obj = null_check_receiver();
5186 if (stopped()) return true;
5187 set_result(load_mirror_from_klass(load_object_klass(obj)));
5188 return true;
5189 }
5190
5191 //-----------------inline_native_Reflection_getCallerClass---------------------
5192 // public static native Class<?> sun.reflect.Reflection.getCallerClass();
5193 //
5194 // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
5195 //
5196 // NOTE: This code must perform the same logic as JVM_GetCallerClass
5197 // in that it must skip particular security frames and checks for
5198 // caller sensitive methods.
5199 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
5200 #ifndef PRODUCT
5201 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
5202 tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
5203 }
5204 #endif
5205
5517 dst_type = _gvn.type(dst_addr)->is_ptr(); // narrow out memory
5518
5519 flags |= RC_NARROW_MEM; // narrow in memory
5520 }
5521
5522 // Call it. Note that the length argument is not scaled.
5523 make_runtime_call(flags,
5524 OptoRuntime::unsafe_setmemory_Type(),
5525 StubRoutines::unsafe_setmemory(),
5526 "unsafe_setmemory",
5527 dst_type,
5528 dst_addr, size XTOP, byte);
5529
5530 store_to_memory(control(), doing_unsafe_access_addr, intcon(0), doing_unsafe_access_bt, MemNode::unordered);
5531
5532 return true;
5533 }
5534
5535 #undef XTOP
5536
5537 //----------------------inline_unsafe_isFlatArray------------------------
5538 // public native boolean Unsafe.isFlatArray(Class<?> arrayClass);
5539 // This intrinsic exploits assumptions made by the native implementation
5540 // (arrayClass is neither null nor primitive) to avoid unnecessary null checks.
5541 bool LibraryCallKit::inline_unsafe_isFlatArray() {
5542 Node* cls = argument(1);
5543 Node* p = basic_plus_adr(cls, java_lang_Class::klass_offset());
5544 Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, immutable_memory(), p,
5545 TypeRawPtr::BOTTOM, TypeInstKlassPtr::OBJECT));
5546 Node* result = flat_array_test(kls);
5547 set_result(result);
5548 return true;
5549 }
5550
5551 //------------------------clone_coping-----------------------------------
5552 // Helper function for inline_native_clone.
5553 void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array) {
5554 assert(obj_size != nullptr, "");
5555 Node* raw_obj = alloc_obj->in(1);
5556 assert(alloc_obj->is_CheckCastPP() && raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
5557
5558 AllocateNode* alloc = nullptr;
5559 if (ReduceBulkZeroing &&
5560 // If we are implementing an array clone without knowing its source type
5561 // (can happen when compiling the array-guarded branch of a reflective
5562 // Object.clone() invocation), initialize the array within the allocation.
5563 // This is needed because some GCs (e.g. ZGC) might fall back in this case
5564 // to a runtime clone call that assumes fully initialized source arrays.
5565 (!is_array || obj->get_ptr_type()->isa_aryptr() != nullptr)) {
5566 // We will be completely responsible for initializing this object -
5567 // mark Initialize node as complete.
5568 alloc = AllocateNode::Ideal_allocation(alloc_obj);
5569 // The object was just allocated - there should be no any stores!
5570 guarantee(alloc != nullptr && alloc->maybe_set_complete(&_gvn), "");
5601 // not cloneable or finalizer => slow path to out-of-line Object.clone
5602 //
5603 // The general case has two steps, allocation and copying.
5604 // Allocation has two cases, and uses GraphKit::new_instance or new_array.
5605 //
5606 // Copying also has two cases, oop arrays and everything else.
5607 // Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
5608 // Everything else uses the tight inline loop supplied by CopyArrayNode.
5609 //
5610 // These steps fold up nicely if and when the cloned object's klass
5611 // can be sharply typed as an object array, a type array, or an instance.
5612 //
5613 bool LibraryCallKit::inline_native_clone(bool is_virtual) {
5614 PhiNode* result_val;
5615
5616 // Set the reexecute bit for the interpreter to reexecute
5617 // the bytecode that invokes Object.clone if deoptimization happens.
5618 { PreserveReexecuteState preexecs(this);
5619 jvms()->set_should_reexecute(true);
5620
5621 Node* obj = argument(0);
5622 obj = null_check_receiver();
5623 if (stopped()) return true;
5624
5625 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
5626 if (obj_type->is_inlinetypeptr()) {
5627 // If the object to clone is an inline type, we can simply return it (i.e. a nop) since inline types have
5628 // no identity.
5629 set_result(obj);
5630 return true;
5631 }
5632
5633 // If we are going to clone an instance, we need its exact type to
5634 // know the number and types of fields to convert the clone to
5635 // loads/stores. Maybe a speculative type can help us.
5636 if (!obj_type->klass_is_exact() &&
5637 obj_type->speculative_type() != nullptr &&
5638 obj_type->speculative_type()->is_instance_klass() &&
5639 !obj_type->speculative_type()->is_inlinetype()) {
5640 ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass();
5641 if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem &&
5642 !spec_ik->has_injected_fields()) {
5643 if (!obj_type->isa_instptr() ||
5644 obj_type->is_instptr()->instance_klass()->has_subklass()) {
5645 obj = maybe_cast_profiled_obj(obj, obj_type->speculative_type(), false);
5646 }
5647 }
5648 }
5649
5650 // Conservatively insert a memory barrier on all memory slices.
5651 // Do not let writes into the original float below the clone.
5652 insert_mem_bar(Op_MemBarCPUOrder);
5653
5654 // paths into result_reg:
5655 enum {
5656 _slow_path = 1, // out-of-line call to clone method (virtual or not)
5657 _objArray_path, // plain array allocation, plus arrayof_oop_arraycopy
5658 _array_path, // plain array allocation, plus arrayof_long_arraycopy
5659 _instance_path, // plain instance allocation, plus arrayof_long_arraycopy
5660 PATH_LIMIT
5661 };
5662 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
5663 result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
5664 PhiNode* result_i_o = new PhiNode(result_reg, Type::ABIO);
5665 PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
5666 record_for_igvn(result_reg);
5667
5668 Node* obj_klass = load_object_klass(obj);
5669 // We only go to the fast case code if we pass a number of guards.
5670 // The paths which do not pass are accumulated in the slow_region.
5671 RegionNode* slow_region = new RegionNode(1);
5672 record_for_igvn(slow_region);
5673
5674 Node* array_obj = obj;
5675 Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)nullptr, &array_obj);
5676 if (array_ctl != nullptr) {
5677 // It's an array.
5678 PreserveJVMState pjvms(this);
5679 set_control(array_ctl);
5680
5681 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
5682 const TypeAryPtr* ary_ptr = obj_type->isa_aryptr();
5683 if (UseArrayFlattening && bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Expansion) &&
5684 obj_type->can_be_inline_array() &&
5685 (ary_ptr == nullptr || (!ary_ptr->is_not_flat() && (!ary_ptr->is_flat() || ary_ptr->elem()->inline_klass()->contains_oops())))) {
5686 // Flat inline type array may have object field that would require a
5687 // write barrier. Conservatively, go to slow path.
5688 generate_fair_guard(flat_array_test(obj_klass), slow_region);
5689 }
5690
5691 if (!stopped()) {
5692 Node* obj_length = load_array_length(array_obj);
5693 Node* array_size = nullptr; // Size of the array without object alignment padding.
5694 Node* alloc_obj = new_array(obj_klass, obj_length, 0, &array_size, /*deoptimize_on_exception=*/true);
5695
5696 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
5697 if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Parsing)) {
5698 // If it is an oop array, it requires very special treatment,
5699 // because gc barriers are required when accessing the array.
5700 Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)nullptr);
5701 if (is_obja != nullptr) {
5702 PreserveJVMState pjvms2(this);
5703 set_control(is_obja);
5704 // Generate a direct call to the right arraycopy function(s).
5705 // Clones are always tightly coupled.
5706 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, array_obj, intcon(0), alloc_obj, intcon(0), obj_length, true, false);
5707 ac->set_clone_oop_array();
5708 Node* n = _gvn.transform(ac);
5709 assert(n == ac, "cannot disappear");
5710 ac->connect_outputs(this, /*deoptimize_on_exception=*/true);
5711
5712 result_reg->init_req(_objArray_path, control());
5713 result_val->init_req(_objArray_path, alloc_obj);
5714 result_i_o ->set_req(_objArray_path, i_o());
5715 result_mem ->set_req(_objArray_path, reset_memory());
5716 }
5717 }
5718 // Otherwise, there are no barriers to worry about.
5719 // (We can dispense with card marks if we know the allocation
5720 // comes out of eden (TLAB)... In fact, ReduceInitialCardMarks
5721 // causes the non-eden paths to take compensating steps to
5722 // simulate a fresh allocation, so that no further
5723 // card marks are required in compiled code to initialize
5724 // the object.)
5725
5726 if (!stopped()) {
5727 copy_to_clone(obj, alloc_obj, array_size, true);
5728
5729 // Present the results of the copy.
5730 result_reg->init_req(_array_path, control());
5731 result_val->init_req(_array_path, alloc_obj);
5732 result_i_o ->set_req(_array_path, i_o());
5733 result_mem ->set_req(_array_path, reset_memory());
5734 }
5735 }
5736 }
5737
5738 if (!stopped()) {
5739 // It's an instance (we did array above). Make the slow-path tests.
5740 // If this is a virtual call, we generate a funny guard. We grab
5741 // the vtable entry corresponding to clone() from the target object.
5742 // If the target method which we are calling happens to be the
5743 // Object clone() method, we pass the guard. We do not need this
5744 // guard for non-virtual calls; the caller is known to be the native
5745 // Object clone().
5746 if (is_virtual) {
5747 generate_virtual_guard(obj_klass, slow_region);
5748 }
5749
5750 // The object must be easily cloneable and must not have a finalizer.
5751 // Both of these conditions may be checked in a single test.
5752 // We could optimize the test further, but we don't care.
5753 generate_misc_flags_guard(obj_klass,
5754 // Test both conditions:
5755 KlassFlags::_misc_is_cloneable_fast | KlassFlags::_misc_has_finalizer,
5756 // Must be cloneable but not finalizer:
5757 KlassFlags::_misc_is_cloneable_fast,
5849 set_jvms(sfpt->jvms());
5850 _reexecute_sp = jvms()->sp();
5851
5852 return saved_jvms;
5853 }
5854 }
5855 }
5856 return nullptr;
5857 }
5858
5859 // Clone the JVMState of the array allocation and create a new safepoint with it. Re-push the array length to the stack
5860 // such that uncommon traps can be emitted to re-execute the array allocation in the interpreter.
5861 SafePointNode* LibraryCallKit::create_safepoint_with_state_before_array_allocation(const AllocateArrayNode* alloc) const {
5862 JVMState* old_jvms = alloc->jvms()->clone_shallow(C);
5863 uint size = alloc->req();
5864 SafePointNode* sfpt = new SafePointNode(size, old_jvms);
5865 old_jvms->set_map(sfpt);
5866 for (uint i = 0; i < size; i++) {
5867 sfpt->init_req(i, alloc->in(i));
5868 }
5869 int adjustment = 1;
5870 const TypeAryKlassPtr* ary_klass_ptr = alloc->in(AllocateNode::KlassNode)->bottom_type()->is_aryklassptr();
5871 if (ary_klass_ptr->is_null_free()) {
5872 // A null-free, tightly coupled array allocation can only come from LibraryCallKit::inline_newArray which
5873 // also requires the componentType and initVal on stack for re-execution.
5874 // Re-create and push the componentType.
5875 ciArrayKlass* klass = ary_klass_ptr->exact_klass()->as_array_klass();
5876 ciInstance* instance = klass->component_mirror_instance();
5877 const TypeInstPtr* t_instance = TypeInstPtr::make(instance);
5878 sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp(), makecon(t_instance));
5879 adjustment++;
5880 }
5881 // re-push array length for deoptimization
5882 sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp() + adjustment - 1, alloc->in(AllocateNode::ALength));
5883 if (ary_klass_ptr->is_null_free()) {
5884 // Re-create and push the initVal.
5885 Node* init_val = alloc->in(AllocateNode::InitValue);
5886 if (init_val == nullptr) {
5887 init_val = InlineTypeNode::make_all_zero(_gvn, ary_klass_ptr->elem()->is_instklassptr()->instance_klass()->as_inline_klass());
5888 } else if (UseCompressedOops) {
5889 init_val = _gvn.transform(new DecodeNNode(init_val, init_val->bottom_type()->make_ptr()));
5890 }
5891 sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp() + adjustment, init_val);
5892 adjustment++;
5893 }
5894 old_jvms->set_sp(old_jvms->sp() + adjustment);
5895 old_jvms->set_monoff(old_jvms->monoff() + adjustment);
5896 old_jvms->set_scloff(old_jvms->scloff() + adjustment);
5897 old_jvms->set_endoff(old_jvms->endoff() + adjustment);
5898 old_jvms->set_should_reexecute(true);
5899
5900 sfpt->set_i_o(map()->i_o());
5901 sfpt->set_memory(map()->memory());
5902 sfpt->set_control(map()->control());
5903 return sfpt;
5904 }
5905
5906 // In case of a deoptimization, we restart execution at the
5907 // allocation, allocating a new array. We would leave an uninitialized
5908 // array in the heap that GCs wouldn't expect. Move the allocation
5909 // after the traps so we don't allocate the array if we
5910 // deoptimize. This is possible because tightly_coupled_allocation()
5911 // guarantees there's no observer of the allocated array at this point
5912 // and the control flow is simple enough.
5913 void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms_before_guards,
5914 int saved_reexecute_sp, uint new_idx) {
5915 if (saved_jvms_before_guards != nullptr && !stopped()) {
5916 replace_unrelated_uncommon_traps_with_alloc_state(alloc, saved_jvms_before_guards);
5917
5918 assert(alloc != nullptr, "only with a tightly coupled allocation");
5919 // restore JVM state to the state at the arraycopy
5920 saved_jvms_before_guards->map()->set_control(map()->control());
5921 assert(saved_jvms_before_guards->map()->memory() == map()->memory(), "memory state changed?");
5922 assert(saved_jvms_before_guards->map()->i_o() == map()->i_o(), "IO state changed?");
5923 // If we've improved the types of some nodes (null check) while
5924 // emitting the guards, propagate them to the current state
5925 map()->replaced_nodes().apply(saved_jvms_before_guards->map(), new_idx);
5926 set_jvms(saved_jvms_before_guards);
5927 _reexecute_sp = saved_reexecute_sp;
5928
5929 // Remove the allocation from above the guards
5930 CallProjections* callprojs = alloc->extract_projections(true);
5931 InitializeNode* init = alloc->initialization();
5932 Node* alloc_mem = alloc->in(TypeFunc::Memory);
5933 C->gvn_replace_by(callprojs->fallthrough_ioproj, alloc->in(TypeFunc::I_O));
5934 C->gvn_replace_by(init->proj_out(TypeFunc::Memory), alloc_mem);
5935
5936 // The CastIINode created in GraphKit::new_array (in AllocateArrayNode::make_ideal_length) must stay below
5937 // the allocation (i.e. is only valid if the allocation succeeds):
5938 // 1) replace CastIINode with AllocateArrayNode's length here
5939 // 2) Create CastIINode again once allocation has moved (see below) at the end of this method
5940 //
5941 // Multiple identical CastIINodes might exist here. Each GraphKit::load_array_length() call will generate
5942 // new separate CastIINode (arraycopy guard checks or any array length use between array allocation and ararycopy)
5943 Node* init_control = init->proj_out(TypeFunc::Control);
5944 Node* alloc_length = alloc->Ideal_length();
5945 #ifdef ASSERT
5946 Node* prev_cast = nullptr;
5947 #endif
5948 for (uint i = 0; i < init_control->outcnt(); i++) {
5949 Node* init_out = init_control->raw_out(i);
5950 if (init_out->is_CastII() && init_out->in(TypeFunc::Control) == init_control && init_out->in(1) == alloc_length) {
5951 #ifdef ASSERT
5952 if (prev_cast == nullptr) {
5953 prev_cast = init_out;
5955 if (prev_cast->cmp(*init_out) == false) {
5956 prev_cast->dump();
5957 init_out->dump();
5958 assert(false, "not equal CastIINode");
5959 }
5960 }
5961 #endif
5962 C->gvn_replace_by(init_out, alloc_length);
5963 }
5964 }
5965 C->gvn_replace_by(init->proj_out(TypeFunc::Control), alloc->in(0));
5966
5967 // move the allocation here (after the guards)
5968 _gvn.hash_delete(alloc);
5969 alloc->set_req(TypeFunc::Control, control());
5970 alloc->set_req(TypeFunc::I_O, i_o());
5971 Node *mem = reset_memory();
5972 set_all_memory(mem);
5973 alloc->set_req(TypeFunc::Memory, mem);
5974 set_control(init->proj_out_or_null(TypeFunc::Control));
5975 set_i_o(callprojs->fallthrough_ioproj);
5976
5977 // Update memory as done in GraphKit::set_output_for_allocation()
5978 const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
5979 const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
5980 if (ary_type->isa_aryptr() && length_type != nullptr) {
5981 ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
5982 }
5983 const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot);
5984 int elemidx = C->get_alias_index(telemref);
5985 set_memory(init->proj_out_or_null(TypeFunc::Memory), Compile::AliasIdxRaw);
5986 set_memory(init->proj_out_or_null(TypeFunc::Memory), elemidx);
5987
5988 Node* allocx = _gvn.transform(alloc);
5989 assert(allocx == alloc, "where has the allocation gone?");
5990 assert(dest->is_CheckCastPP(), "not an allocation result?");
5991
5992 _gvn.hash_delete(dest);
5993 dest->set_req(0, control());
5994 Node* destx = _gvn.transform(dest);
5995 assert(destx == dest, "where has the allocation result gone?");
6293 top_src = src_type->isa_aryptr();
6294 has_src = (top_src != nullptr && top_src->elem() != Type::BOTTOM);
6295 src_spec = true;
6296 }
6297 if (!has_dest) {
6298 dest = maybe_cast_profiled_obj(dest, dest_k, true);
6299 dest_type = _gvn.type(dest);
6300 top_dest = dest_type->isa_aryptr();
6301 has_dest = (top_dest != nullptr && top_dest->elem() != Type::BOTTOM);
6302 dest_spec = true;
6303 }
6304 }
6305 }
6306
6307 if (has_src && has_dest && can_emit_guards) {
6308 BasicType src_elem = top_src->isa_aryptr()->elem()->array_element_basic_type();
6309 BasicType dest_elem = top_dest->isa_aryptr()->elem()->array_element_basic_type();
6310 if (is_reference_type(src_elem, true)) src_elem = T_OBJECT;
6311 if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT;
6312
6313 if (src_elem == dest_elem && top_src->is_flat() == top_dest->is_flat() && src_elem == T_OBJECT) {
6314 // If both arrays are object arrays then having the exact types
6315 // for both will remove the need for a subtype check at runtime
6316 // before the call and may make it possible to pick a faster copy
6317 // routine (without a subtype check on every element)
6318 // Do we have the exact type of src?
6319 bool could_have_src = src_spec;
6320 // Do we have the exact type of dest?
6321 bool could_have_dest = dest_spec;
6322 ciKlass* src_k = nullptr;
6323 ciKlass* dest_k = nullptr;
6324 if (!src_spec) {
6325 src_k = src_type->speculative_type_not_null();
6326 if (src_k != nullptr && src_k->is_array_klass()) {
6327 could_have_src = true;
6328 }
6329 }
6330 if (!dest_spec) {
6331 dest_k = dest_type->speculative_type_not_null();
6332 if (dest_k != nullptr && dest_k->is_array_klass()) {
6333 could_have_dest = true;
6334 }
6335 }
6336 if (could_have_src && could_have_dest) {
6337 // If we can have both exact types, emit the missing guards
6338 if (could_have_src && !src_spec) {
6339 src = maybe_cast_profiled_obj(src, src_k, true);
6340 src_type = _gvn.type(src);
6341 top_src = src_type->isa_aryptr();
6342 }
6343 if (could_have_dest && !dest_spec) {
6344 dest = maybe_cast_profiled_obj(dest, dest_k, true);
6345 dest_type = _gvn.type(dest);
6346 top_dest = dest_type->isa_aryptr();
6347 }
6348 }
6349 }
6350 }
6351
6352 ciMethod* trap_method = method();
6353 int trap_bci = bci();
6354 if (saved_jvms_before_guards != nullptr) {
6355 trap_method = alloc->jvms()->method();
6356 trap_bci = alloc->jvms()->bci();
6357 }
6358
6359 bool negative_length_guard_generated = false;
6360
6361 if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
6362 can_emit_guards && !src->is_top() && !dest->is_top()) {
6363 // validate arguments: enables transformation the ArrayCopyNode
6364 validated = true;
6365
6366 RegionNode* slow_region = new RegionNode(1);
6367 record_for_igvn(slow_region);
6368
6369 // (1) src and dest are arrays.
6370 generate_non_array_guard(load_object_klass(src), slow_region, &src);
6371 generate_non_array_guard(load_object_klass(dest), slow_region, &dest);
6372
6373 // (2) src and dest arrays must have elements of the same BasicType
6374 // done at macro expansion or at Ideal transformation time
6375
6376 // (4) src_offset must not be negative.
6377 generate_negative_guard(src_offset, slow_region);
6378
6379 // (5) dest_offset must not be negative.
6380 generate_negative_guard(dest_offset, slow_region);
6381
6382 // (7) src_offset + length must not exceed length of src.
6385 slow_region);
6386
6387 // (8) dest_offset + length must not exceed length of dest.
6388 generate_limit_guard(dest_offset, length,
6389 load_array_length(dest),
6390 slow_region);
6391
6392 // (6) length must not be negative.
6393 // This is also checked in generate_arraycopy() during macro expansion, but
6394 // we also have to check it here for the case where the ArrayCopyNode will
6395 // be eliminated by Escape Analysis.
6396 if (EliminateAllocations) {
6397 generate_negative_guard(length, slow_region);
6398 negative_length_guard_generated = true;
6399 }
6400
6401 // (9) each element of an oop array must be assignable
6402 Node* dest_klass = load_object_klass(dest);
6403 if (src != dest) {
6404 Node* not_subtype_ctrl = gen_subtype_check(src, dest_klass);
6405 slow_region->add_req(not_subtype_ctrl);
6406 }
6407
6408 // TODO 8350865 Fix below logic. Also handle atomicity.
6409 generate_fair_guard(flat_array_test(src), slow_region);
6410 generate_fair_guard(flat_array_test(dest), slow_region);
6411
6412 const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr();
6413 const Type* toop = dest_klass_t->cast_to_exactness(false)->as_instance_type();
6414 src = _gvn.transform(new CheckCastPPNode(control(), src, toop));
6415 src_type = _gvn.type(src);
6416 top_src = src_type->isa_aryptr();
6417
6418 // Handle flat inline type arrays (null-free arrays are handled by the subtype check above)
6419 if (!stopped() && UseArrayFlattening) {
6420 // If dest is flat, src must be flat as well (guaranteed by src <: dest check). Handle flat src here.
6421 assert(top_dest == nullptr || !top_dest->is_flat() || top_src->is_flat(), "src array must be flat");
6422 if (top_src != nullptr && top_src->is_flat()) {
6423 // Src is flat, check that dest is flat as well
6424 if (top_dest != nullptr && !top_dest->is_flat()) {
6425 generate_fair_guard(flat_array_test(dest_klass, /* flat = */ false), slow_region);
6426 // Since dest is flat and src <: dest, dest must have the same type as src.
6427 top_dest = top_src->cast_to_exactness(false);
6428 assert(top_dest->is_flat(), "dest must be flat");
6429 dest = _gvn.transform(new CheckCastPPNode(control(), dest, top_dest));
6430 }
6431 } else if (top_src == nullptr || !top_src->is_not_flat()) {
6432 // Src might be flat and dest might not be flat. Go to the slow path if src is flat.
6433 // TODO 8251971: Optimize for the case when src/dest are later found to be both flat.
6434 assert(top_dest == nullptr || !top_dest->is_flat(), "dest array must not be flat");
6435 generate_fair_guard(flat_array_test(src), slow_region);
6436 if (top_src != nullptr) {
6437 top_src = top_src->cast_to_not_flat();
6438 src = _gvn.transform(new CheckCastPPNode(control(), src, top_src));
6439 }
6440 }
6441 }
6442
6443 {
6444 PreserveJVMState pjvms(this);
6445 set_control(_gvn.transform(slow_region));
6446 uncommon_trap(Deoptimization::Reason_intrinsic,
6447 Deoptimization::Action_make_not_entrant);
6448 assert(stopped(), "Should be stopped");
6449 }
6450 arraycopy_move_allocation_here(alloc, dest, saved_jvms_before_guards, saved_reexecute_sp, new_idx);
6451 }
6452
6453 if (stopped()) {
6454 return true;
6455 }
6456
6457 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != nullptr, negative_length_guard_generated,
6458 // Create LoadRange and LoadKlass nodes for use during macro expansion here
6459 // so the compiler has a chance to eliminate them: during macro expansion,
6460 // we have to set their control (CastPP nodes are eliminated).
6461 load_object_klass(src), load_object_klass(dest),
6462 load_array_length(src), load_array_length(dest));
6463
6464 ac->set_arraycopy(validated);
6465
6466 Node* n = _gvn.transform(ac);
6467 if (n == ac) {
6468 ac->connect_outputs(this);
6469 } else {
|