< prev index next >

src/hotspot/share/opto/library_call.cpp

Print this page

   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "asm/macroAssembler.hpp"



  26 #include "ci/ciSymbols.hpp"
  27 #include "ci/ciUtilities.inline.hpp"
  28 #include "classfile/vmIntrinsics.hpp"
  29 #include "compiler/compileBroker.hpp"
  30 #include "compiler/compileLog.hpp"
  31 #include "gc/shared/barrierSet.hpp"

  32 #include "jfr/support/jfrIntrinsics.hpp"
  33 #include "memory/resourceArea.hpp"

  34 #include "oops/klass.inline.hpp"

  35 #include "oops/objArrayKlass.hpp"
  36 #include "opto/addnode.hpp"
  37 #include "opto/arraycopynode.hpp"
  38 #include "opto/c2compiler.hpp"
  39 #include "opto/castnode.hpp"
  40 #include "opto/cfgnode.hpp"
  41 #include "opto/convertnode.hpp"
  42 #include "opto/countbitsnode.hpp"

  43 #include "opto/idealKit.hpp"

  44 #include "opto/library_call.hpp"
  45 #include "opto/mathexactnode.hpp"
  46 #include "opto/mulnode.hpp"
  47 #include "opto/narrowptrnode.hpp"
  48 #include "opto/opaquenode.hpp"

  49 #include "opto/parse.hpp"
  50 #include "opto/rootnode.hpp"
  51 #include "opto/runtime.hpp"
  52 #include "opto/subnode.hpp"

  53 #include "opto/vectornode.hpp"
  54 #include "prims/jvmtiExport.hpp"
  55 #include "prims/jvmtiThreadState.hpp"
  56 #include "prims/unsafe.hpp"

  57 #include "runtime/jniHandles.inline.hpp"
  58 #include "runtime/mountUnmountDisabler.hpp"
  59 #include "runtime/objectMonitor.hpp"
  60 #include "runtime/sharedRuntime.hpp"
  61 #include "runtime/stubRoutines.hpp"

  62 #include "utilities/macros.hpp"
  63 #include "utilities/powerOfTwo.hpp"
  64 
  65 //---------------------------make_vm_intrinsic----------------------------
  66 CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
  67   vmIntrinsicID id = m->intrinsic_id();
  68   assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
  69 
  70   if (!m->is_loaded()) {
  71     // Do not attempt to inline unloaded methods.
  72     return nullptr;
  73   }
  74 
  75   C2Compiler* compiler = (C2Compiler*)CompileBroker::compiler(CompLevel_full_optimization);
  76   bool is_available = false;
  77 
  78   {
  79     // For calling is_intrinsic_supported and is_intrinsic_disabled_by_flag
  80     // the compiler must transition to '_thread_in_vm' state because both
  81     // methods access VM-internal data.

 392   case vmIntrinsics::_getReferenceOpaque:       return inline_unsafe_access(!is_store, T_OBJECT,   Opaque, false);
 393   case vmIntrinsics::_getBooleanOpaque:         return inline_unsafe_access(!is_store, T_BOOLEAN,  Opaque, false);
 394   case vmIntrinsics::_getByteOpaque:            return inline_unsafe_access(!is_store, T_BYTE,     Opaque, false);
 395   case vmIntrinsics::_getShortOpaque:           return inline_unsafe_access(!is_store, T_SHORT,    Opaque, false);
 396   case vmIntrinsics::_getCharOpaque:            return inline_unsafe_access(!is_store, T_CHAR,     Opaque, false);
 397   case vmIntrinsics::_getIntOpaque:             return inline_unsafe_access(!is_store, T_INT,      Opaque, false);
 398   case vmIntrinsics::_getLongOpaque:            return inline_unsafe_access(!is_store, T_LONG,     Opaque, false);
 399   case vmIntrinsics::_getFloatOpaque:           return inline_unsafe_access(!is_store, T_FLOAT,    Opaque, false);
 400   case vmIntrinsics::_getDoubleOpaque:          return inline_unsafe_access(!is_store, T_DOUBLE,   Opaque, false);
 401 
 402   case vmIntrinsics::_putReferenceOpaque:       return inline_unsafe_access( is_store, T_OBJECT,   Opaque, false);
 403   case vmIntrinsics::_putBooleanOpaque:         return inline_unsafe_access( is_store, T_BOOLEAN,  Opaque, false);
 404   case vmIntrinsics::_putByteOpaque:            return inline_unsafe_access( is_store, T_BYTE,     Opaque, false);
 405   case vmIntrinsics::_putShortOpaque:           return inline_unsafe_access( is_store, T_SHORT,    Opaque, false);
 406   case vmIntrinsics::_putCharOpaque:            return inline_unsafe_access( is_store, T_CHAR,     Opaque, false);
 407   case vmIntrinsics::_putIntOpaque:             return inline_unsafe_access( is_store, T_INT,      Opaque, false);
 408   case vmIntrinsics::_putLongOpaque:            return inline_unsafe_access( is_store, T_LONG,     Opaque, false);
 409   case vmIntrinsics::_putFloatOpaque:           return inline_unsafe_access( is_store, T_FLOAT,    Opaque, false);
 410   case vmIntrinsics::_putDoubleOpaque:          return inline_unsafe_access( is_store, T_DOUBLE,   Opaque, false);
 411 



 412   case vmIntrinsics::_compareAndSetReference:   return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap,      Volatile);
 413   case vmIntrinsics::_compareAndSetByte:        return inline_unsafe_load_store(T_BYTE,   LS_cmp_swap,      Volatile);
 414   case vmIntrinsics::_compareAndSetShort:       return inline_unsafe_load_store(T_SHORT,  LS_cmp_swap,      Volatile);
 415   case vmIntrinsics::_compareAndSetInt:         return inline_unsafe_load_store(T_INT,    LS_cmp_swap,      Volatile);
 416   case vmIntrinsics::_compareAndSetLong:        return inline_unsafe_load_store(T_LONG,   LS_cmp_swap,      Volatile);
 417 
 418   case vmIntrinsics::_weakCompareAndSetReferencePlain:     return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Relaxed);
 419   case vmIntrinsics::_weakCompareAndSetReferenceAcquire:   return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Acquire);
 420   case vmIntrinsics::_weakCompareAndSetReferenceRelease:   return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Release);
 421   case vmIntrinsics::_weakCompareAndSetReference:          return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Volatile);
 422   case vmIntrinsics::_weakCompareAndSetBytePlain:          return inline_unsafe_load_store(T_BYTE,   LS_cmp_swap_weak, Relaxed);
 423   case vmIntrinsics::_weakCompareAndSetByteAcquire:        return inline_unsafe_load_store(T_BYTE,   LS_cmp_swap_weak, Acquire);
 424   case vmIntrinsics::_weakCompareAndSetByteRelease:        return inline_unsafe_load_store(T_BYTE,   LS_cmp_swap_weak, Release);
 425   case vmIntrinsics::_weakCompareAndSetByte:               return inline_unsafe_load_store(T_BYTE,   LS_cmp_swap_weak, Volatile);
 426   case vmIntrinsics::_weakCompareAndSetShortPlain:         return inline_unsafe_load_store(T_SHORT,  LS_cmp_swap_weak, Relaxed);
 427   case vmIntrinsics::_weakCompareAndSetShortAcquire:       return inline_unsafe_load_store(T_SHORT,  LS_cmp_swap_weak, Acquire);
 428   case vmIntrinsics::_weakCompareAndSetShortRelease:       return inline_unsafe_load_store(T_SHORT,  LS_cmp_swap_weak, Release);
 429   case vmIntrinsics::_weakCompareAndSetShort:              return inline_unsafe_load_store(T_SHORT,  LS_cmp_swap_weak, Volatile);
 430   case vmIntrinsics::_weakCompareAndSetIntPlain:           return inline_unsafe_load_store(T_INT,    LS_cmp_swap_weak, Relaxed);
 431   case vmIntrinsics::_weakCompareAndSetIntAcquire:         return inline_unsafe_load_store(T_INT,    LS_cmp_swap_weak, Acquire);

 451   case vmIntrinsics::_compareAndExchangeLong:              return inline_unsafe_load_store(T_LONG,   LS_cmp_exchange,  Volatile);
 452   case vmIntrinsics::_compareAndExchangeLongAcquire:       return inline_unsafe_load_store(T_LONG,   LS_cmp_exchange,  Acquire);
 453   case vmIntrinsics::_compareAndExchangeLongRelease:       return inline_unsafe_load_store(T_LONG,   LS_cmp_exchange,  Release);
 454 
 455   case vmIntrinsics::_getAndAddByte:                    return inline_unsafe_load_store(T_BYTE,   LS_get_add,       Volatile);
 456   case vmIntrinsics::_getAndAddShort:                   return inline_unsafe_load_store(T_SHORT,  LS_get_add,       Volatile);
 457   case vmIntrinsics::_getAndAddInt:                     return inline_unsafe_load_store(T_INT,    LS_get_add,       Volatile);
 458   case vmIntrinsics::_getAndAddLong:                    return inline_unsafe_load_store(T_LONG,   LS_get_add,       Volatile);
 459 
 460   case vmIntrinsics::_getAndSetByte:                    return inline_unsafe_load_store(T_BYTE,   LS_get_set,       Volatile);
 461   case vmIntrinsics::_getAndSetShort:                   return inline_unsafe_load_store(T_SHORT,  LS_get_set,       Volatile);
 462   case vmIntrinsics::_getAndSetInt:                     return inline_unsafe_load_store(T_INT,    LS_get_set,       Volatile);
 463   case vmIntrinsics::_getAndSetLong:                    return inline_unsafe_load_store(T_LONG,   LS_get_set,       Volatile);
 464   case vmIntrinsics::_getAndSetReference:               return inline_unsafe_load_store(T_OBJECT, LS_get_set,       Volatile);
 465 
 466   case vmIntrinsics::_loadFence:
 467   case vmIntrinsics::_storeFence:
 468   case vmIntrinsics::_storeStoreFence:
 469   case vmIntrinsics::_fullFence:                return inline_unsafe_fence(intrinsic_id());
 470 





 471   case vmIntrinsics::_onSpinWait:               return inline_onspinwait();
 472 
 473   case vmIntrinsics::_currentCarrierThread:     return inline_native_currentCarrierThread();
 474   case vmIntrinsics::_currentThread:            return inline_native_currentThread();
 475   case vmIntrinsics::_setCurrentThread:         return inline_native_setCurrentThread();
 476 
 477   case vmIntrinsics::_scopedValueCache:          return inline_native_scopedValueCache();
 478   case vmIntrinsics::_setScopedValueCache:       return inline_native_setScopedValueCache();
 479 
 480   case vmIntrinsics::_Continuation_pin:          return inline_native_Continuation_pinning(false);
 481   case vmIntrinsics::_Continuation_unpin:        return inline_native_Continuation_pinning(true);
 482 
 483   case vmIntrinsics::_vthreadEndFirstTransition:    return inline_native_vthread_end_transition(CAST_FROM_FN_PTR(address, OptoRuntime::vthread_end_first_transition_Java()),
 484                                                                                                 "endFirstTransition", true);
 485   case vmIntrinsics::_vthreadStartFinalTransition:  return inline_native_vthread_start_transition(CAST_FROM_FN_PTR(address, OptoRuntime::vthread_start_final_transition_Java()),
 486                                                                                                   "startFinalTransition", true);
 487   case vmIntrinsics::_vthreadStartTransition:       return inline_native_vthread_start_transition(CAST_FROM_FN_PTR(address, OptoRuntime::vthread_start_transition_Java()),
 488                                                                                                   "startTransition", false);
 489   case vmIntrinsics::_vthreadEndTransition:         return inline_native_vthread_end_transition(CAST_FROM_FN_PTR(address, OptoRuntime::vthread_end_transition_Java()),
 490                                                                                                 "endTransition", false);

 499 #endif
 500   case vmIntrinsics::_currentTimeMillis:        return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
 501   case vmIntrinsics::_nanoTime:                 return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
 502   case vmIntrinsics::_writeback0:               return inline_unsafe_writeback0();
 503   case vmIntrinsics::_writebackPreSync0:        return inline_unsafe_writebackSync0(true);
 504   case vmIntrinsics::_writebackPostSync0:       return inline_unsafe_writebackSync0(false);
 505   case vmIntrinsics::_allocateInstance:         return inline_unsafe_allocate();
 506   case vmIntrinsics::_copyMemory:               return inline_unsafe_copyMemory();
 507   case vmIntrinsics::_setMemory:                return inline_unsafe_setMemory();
 508   case vmIntrinsics::_getLength:                return inline_native_getLength();
 509   case vmIntrinsics::_copyOf:                   return inline_array_copyOf(false);
 510   case vmIntrinsics::_copyOfRange:              return inline_array_copyOf(true);
 511   case vmIntrinsics::_equalsB:                  return inline_array_equals(StrIntrinsicNode::LL);
 512   case vmIntrinsics::_equalsC:                  return inline_array_equals(StrIntrinsicNode::UU);
 513   case vmIntrinsics::_Preconditions_checkIndex: return inline_preconditions_checkIndex(T_INT);
 514   case vmIntrinsics::_Preconditions_checkLongIndex: return inline_preconditions_checkIndex(T_LONG);
 515   case vmIntrinsics::_clone:                    return inline_native_clone(intrinsic()->is_virtual());
 516 
 517   case vmIntrinsics::_allocateUninitializedArray: return inline_unsafe_newArray(true);
 518   case vmIntrinsics::_newArray:                   return inline_unsafe_newArray(false);






 519 
 520   case vmIntrinsics::_isAssignableFrom:         return inline_native_subtype_check();
 521 
 522   case vmIntrinsics::_isInstance:
 523   case vmIntrinsics::_isHidden:
 524   case vmIntrinsics::_getSuperclass:            return inline_native_Class_query(intrinsic_id());
 525 
 526   case vmIntrinsics::_floatToRawIntBits:
 527   case vmIntrinsics::_floatToIntBits:
 528   case vmIntrinsics::_intBitsToFloat:
 529   case vmIntrinsics::_doubleToRawLongBits:
 530   case vmIntrinsics::_doubleToLongBits:
 531   case vmIntrinsics::_longBitsToDouble:
 532   case vmIntrinsics::_floatToFloat16:
 533   case vmIntrinsics::_float16ToFloat:           return inline_fp_conversions(intrinsic_id());
 534   case vmIntrinsics::_sqrt_float16:             return inline_fp16_operations(intrinsic_id(), 1);
 535   case vmIntrinsics::_fma_float16:              return inline_fp16_operations(intrinsic_id(), 3);
 536   case vmIntrinsics::_floatIsFinite:
 537   case vmIntrinsics::_floatIsInfinite:
 538   case vmIntrinsics::_doubleIsFinite:

2263     case vmIntrinsics::_remainderUnsigned_l: {
2264       zero_check_long(argument(2));
2265       // Compile-time detect of null-exception
2266       if (stopped()) {
2267         return true; // keep the graph constructed so far
2268       }
2269       n = new UModLNode(control(), argument(0), argument(2));
2270       break;
2271     }
2272     default:  fatal_unexpected_iid(id);  break;
2273   }
2274   set_result(_gvn.transform(n));
2275   return true;
2276 }
2277 
2278 //----------------------------inline_unsafe_access----------------------------
2279 
2280 const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type) {
2281   // Attempt to infer a sharper value type from the offset and base type.
2282   ciKlass* sharpened_klass = nullptr;

2283 
2284   // See if it is an instance field, with an object type.
2285   if (alias_type->field() != nullptr) {
2286     if (alias_type->field()->type()->is_klass()) {
2287       sharpened_klass = alias_type->field()->type()->as_klass();

2288     }
2289   }
2290 
2291   const TypeOopPtr* result = nullptr;
2292   // See if it is a narrow oop array.
2293   if (adr_type->isa_aryptr()) {
2294     if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
2295       const TypeOopPtr* elem_type = adr_type->is_aryptr()->elem()->make_oopptr();

2296       if (elem_type != nullptr && elem_type->is_loaded()) {
2297         // Sharpen the value type.
2298         result = elem_type;
2299       }
2300     }
2301   }
2302 
2303   // The sharpened class might be unloaded if there is no class loader
2304   // contraint in place.
2305   if (result == nullptr && sharpened_klass != nullptr && sharpened_klass->is_loaded()) {
2306     // Sharpen the value type.
2307     result = TypeOopPtr::make_from_klass(sharpened_klass);



2308   }
2309   if (result != nullptr) {
2310 #ifndef PRODUCT
2311     if (C->print_intrinsics() || C->print_inlining()) {
2312       tty->print("  from base type:  ");  adr_type->dump(); tty->cr();
2313       tty->print("  sharpened value: ");  result->dump();    tty->cr();
2314     }
2315 #endif
2316   }
2317   return result;
2318 }
2319 
2320 DecoratorSet LibraryCallKit::mo_decorator_for_access_kind(AccessKind kind) {
2321   switch (kind) {
2322       case Relaxed:
2323         return MO_UNORDERED;
2324       case Opaque:
2325         return MO_RELAXED;
2326       case Acquire:
2327         return MO_ACQUIRE;

2375 #endif // ASSERT
2376  }
2377 #endif //PRODUCT
2378 
2379   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
2380 
2381   Node* receiver = argument(0);  // type: oop
2382 
2383   // Build address expression.
2384   Node* heap_base_oop = top();
2385 
2386   // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2387   Node* base = argument(1);  // type: oop
2388   // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2389   Node* offset = argument(2);  // type: long
2390   // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2391   // to be plain byte offsets, which are also the same as those accepted
2392   // by oopDesc::field_addr.
2393   assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2394          "fieldOffset must be byte-scaled");







































2395   // 32-bit machines ignore the high half!
2396   offset = ConvL2X(offset);
2397 
2398   // Save state and restore on bailout
2399   SavedState old_state(this);
2400 
2401   Node* adr = make_unsafe_address(base, offset, type, kind == Relaxed);
2402   assert(!stopped(), "Inlining of unsafe access failed: address construction stopped unexpectedly");
2403 
2404   if (_gvn.type(base->uncast())->isa_ptr() == TypePtr::NULL_PTR) {
2405     if (type != T_OBJECT) {
2406       decorators |= IN_NATIVE; // off-heap primitive access
2407     } else {
2408       return false; // off-heap oop accesses are not supported
2409     }
2410   } else {
2411     heap_base_oop = base; // on-heap or mixed access
2412   }
2413 
2414   // Can base be null? Otherwise, always on-heap access.

2418     decorators |= IN_HEAP;
2419   }
2420 
2421   Node* val = is_store ? argument(4) : nullptr;
2422 
2423   const TypePtr* adr_type = _gvn.type(adr)->isa_ptr();
2424   if (adr_type == TypePtr::NULL_PTR) {
2425     return false; // off-heap access with zero address
2426   }
2427 
2428   // Try to categorize the address.
2429   Compile::AliasType* alias_type = C->alias_type(adr_type);
2430   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2431 
2432   if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2433       alias_type->adr_type() == TypeAryPtr::RANGE) {
2434     return false; // not supported
2435   }
2436 
2437   bool mismatched = false;
2438   BasicType bt = alias_type->basic_type();




























2439   if (bt != T_ILLEGAL) {
2440     assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
2441     if (bt == T_BYTE && adr_type->isa_aryptr()) {
2442       // Alias type doesn't differentiate between byte[] and boolean[]).
2443       // Use address type to get the element type.
2444       bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2445     }
2446     if (is_reference_type(bt, true)) {
2447       // accessing an array field with getReference is not a mismatch
2448       bt = T_OBJECT;
2449     }
2450     if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2451       // Don't intrinsify mismatched object accesses
2452       return false;
2453     }
2454     mismatched = (bt != type);
2455   } else if (alias_type->adr_type()->isa_oopptr()) {
2456     mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
2457   }
2458 
2459   old_state.discard();
2460   assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2461 
2462   if (mismatched) {
2463     decorators |= C2_MISMATCHED;
2464   }
2465 
2466   // First guess at the value type.
2467   const Type *value_type = Type::get_const_basic_type(type);
2468 
2469   // Figure out the memory ordering.
2470   decorators |= mo_decorator_for_access_kind(kind);
2471 
2472   if (!is_store && type == T_OBJECT) {
2473     const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2474     if (tjp != nullptr) {
2475       value_type = tjp;


2476     }
2477   }
2478 
2479   receiver = null_check(receiver);
2480   if (stopped()) {
2481     return true;
2482   }
2483   // Heap pointers get a null-check from the interpreter,
2484   // as a courtesy.  However, this is not guaranteed by Unsafe,
2485   // and it is not possible to fully distinguish unintended nulls
2486   // from intended ones in this API.
2487 
2488   if (!is_store) {
2489     Node* p = nullptr;
2490     // Try to constant fold a load from a constant field
2491     ciField* field = alias_type->field();
2492     if (heap_base_oop != top() && field != nullptr && field->is_constant() && !mismatched) {
2493       // final or stable field
2494       p = make_constant_from_field(field, heap_base_oop);
2495     }
2496 
2497     if (p == nullptr) { // Could not constant fold the load
2498       p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);





2499       // Normalize the value returned by getBoolean in the following cases
2500       if (type == T_BOOLEAN &&
2501           (mismatched ||
2502            heap_base_oop == top() ||                  // - heap_base_oop is null or
2503            (can_access_non_heap && field == nullptr)) // - heap_base_oop is potentially null
2504                                                       //   and the unsafe access is made to large offset
2505                                                       //   (i.e., larger than the maximum offset necessary for any
2506                                                       //   field access)
2507             ) {
2508           IdealKit ideal = IdealKit(this);
2509 #define __ ideal.
2510           IdealVariable normalized_result(ideal);
2511           __ declarations_done();
2512           __ set(normalized_result, p);
2513           __ if_then(p, BoolTest::ne, ideal.ConI(0));
2514           __ set(normalized_result, ideal.ConI(1));
2515           ideal.end_if();
2516           final_sync(ideal);
2517           p = __ value(normalized_result);
2518 #undef __

2522       p = gvn().transform(new CastP2XNode(nullptr, p));
2523       p = ConvX2UL(p);
2524     }
2525     // The load node has the control of the preceding MemBarCPUOrder.  All
2526     // following nodes will have the control of the MemBarCPUOrder inserted at
2527     // the end of this method.  So, pushing the load onto the stack at a later
2528     // point is fine.
2529     set_result(p);
2530   } else {
2531     if (bt == T_ADDRESS) {
2532       // Repackage the long as a pointer.
2533       val = ConvL2X(val);
2534       val = gvn().transform(new CastX2PNode(val));
2535     }
2536     access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators);
2537   }
2538 
2539   return true;
2540 }
2541 









































































































































































2542 //----------------------------inline_unsafe_load_store----------------------------
2543 // This method serves a couple of different customers (depending on LoadStoreKind):
2544 //
2545 // LS_cmp_swap:
2546 //
2547 //   boolean compareAndSetReference(Object o, long offset, Object expected, Object x);
2548 //   boolean compareAndSetInt(   Object o, long offset, int    expected, int    x);
2549 //   boolean compareAndSetLong(  Object o, long offset, long   expected, long   x);
2550 //
2551 // LS_cmp_swap_weak:
2552 //
2553 //   boolean weakCompareAndSetReference(       Object o, long offset, Object expected, Object x);
2554 //   boolean weakCompareAndSetReferencePlain(  Object o, long offset, Object expected, Object x);
2555 //   boolean weakCompareAndSetReferenceAcquire(Object o, long offset, Object expected, Object x);
2556 //   boolean weakCompareAndSetReferenceRelease(Object o, long offset, Object expected, Object x);
2557 //
2558 //   boolean weakCompareAndSetInt(          Object o, long offset, int    expected, int    x);
2559 //   boolean weakCompareAndSetIntPlain(     Object o, long offset, int    expected, int    x);
2560 //   boolean weakCompareAndSetIntAcquire(   Object o, long offset, int    expected, int    x);
2561 //   boolean weakCompareAndSetIntRelease(   Object o, long offset, int    expected, int    x);

2724     }
2725     case LS_cmp_swap:
2726     case LS_cmp_swap_weak:
2727     case LS_get_add:
2728       break;
2729     default:
2730       ShouldNotReachHere();
2731   }
2732 
2733   // Null check receiver.
2734   receiver = null_check(receiver);
2735   if (stopped()) {
2736     return true;
2737   }
2738 
2739   int alias_idx = C->get_alias_index(adr_type);
2740 
2741   if (is_reference_type(type)) {
2742     decorators |= IN_HEAP | ON_UNKNOWN_OOP_REF;
2743 













2744     // Transformation of a value which could be null pointer (CastPP #null)
2745     // could be delayed during Parse (for example, in adjust_map_after_if()).
2746     // Execute transformation here to avoid barrier generation in such case.
2747     if (_gvn.type(newval) == TypePtr::NULL_PTR)
2748       newval = _gvn.makecon(TypePtr::NULL_PTR);
2749 
2750     if (oldval != nullptr && _gvn.type(oldval) == TypePtr::NULL_PTR) {
2751       // Refine the value to a null constant, when it is known to be null
2752       oldval = _gvn.makecon(TypePtr::NULL_PTR);
2753     }
2754   }
2755 
2756   Node* result = nullptr;
2757   switch (kind) {
2758     case LS_cmp_exchange: {
2759       result = access_atomic_cmpxchg_val_at(base, adr, adr_type, alias_idx,
2760                                             oldval, newval, value_type, type, decorators);
2761       break;
2762     }
2763     case LS_cmp_swap_weak:

2792   insert_mem_bar(Op_MemBarCPUOrder);
2793   switch(id) {
2794     case vmIntrinsics::_loadFence:
2795       insert_mem_bar(Op_LoadFence);
2796       return true;
2797     case vmIntrinsics::_storeFence:
2798       insert_mem_bar(Op_StoreFence);
2799       return true;
2800     case vmIntrinsics::_storeStoreFence:
2801       insert_mem_bar(Op_StoreStoreFence);
2802       return true;
2803     case vmIntrinsics::_fullFence:
2804       insert_mem_bar(Op_MemBarFull);
2805       return true;
2806     default:
2807       fatal_unexpected_iid(id);
2808       return false;
2809   }
2810 }
2811 






























































































2812 bool LibraryCallKit::inline_onspinwait() {
2813   insert_mem_bar(Op_OnSpinWait);
2814   return true;
2815 }
2816 
2817 bool LibraryCallKit::klass_needs_init_guard(Node* kls) {
2818   if (!kls->is_Con()) {
2819     return true;
2820   }
2821   const TypeInstKlassPtr* klsptr = kls->bottom_type()->isa_instklassptr();
2822   if (klsptr == nullptr) {
2823     return true;
2824   }
2825   ciInstanceKlass* ik = klsptr->instance_klass();
2826   // don't need a guard for a klass that is already initialized
2827   return !ik->is_initialized();
2828 }
2829 
2830 //----------------------------inline_unsafe_writeback0-------------------------
2831 // public native void Unsafe.writeback0(long address)

2910                     Deoptimization::Action_make_not_entrant);
2911     }
2912     if (stopped()) {
2913       return true;
2914     }
2915 #endif //INCLUDE_JVMTI
2916 
2917   Node* test = nullptr;
2918   if (LibraryCallKit::klass_needs_init_guard(kls)) {
2919     // Note:  The argument might still be an illegal value like
2920     // Serializable.class or Object[].class.   The runtime will handle it.
2921     // But we must make an explicit check for initialization.
2922     Node* insp = off_heap_plus_addr(kls, in_bytes(InstanceKlass::init_state_offset()));
2923     // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
2924     // can generate code to load it as unsigned byte.
2925     Node* inst = make_load(nullptr, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::acquire);
2926     Node* bits = intcon(InstanceKlass::fully_initialized);
2927     test = _gvn.transform(new SubINode(inst, bits));
2928     // The 'test' is non-zero if we need to take a slow path.
2929   }
2930 
2931   Node* obj = new_instance(kls, test);
2932   set_result(obj);
2933   return true;
2934 }
2935 
2936 //------------------------inline_native_time_funcs--------------
2937 // inline code for System.currentTimeMillis() and System.nanoTime()
2938 // these have the same type and signature
2939 bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
2940   const TypeFunc* tf = OptoRuntime::void_long_Type();
2941   const TypePtr* no_memory_effects = nullptr;
2942   Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
2943   Node* value = _gvn.transform(new ProjNode(time, TypeFunc::Parms+0));
2944 #ifdef ASSERT
2945   Node* value_top = _gvn.transform(new ProjNode(time, TypeFunc::Parms+1));
2946   assert(value_top == top(), "second value must be top");
2947 #endif
2948   set_result(value);
2949   return true;
2950 }

3726   Node* arr = argument(1);
3727   Node* thread = _gvn.transform(new ThreadLocalNode());
3728   Node* p = off_heap_plus_addr(thread, in_bytes(JavaThread::vthread_offset()));
3729   Node* thread_obj_handle
3730     = make_load(nullptr, p, p->bottom_type()->is_ptr(), T_OBJECT, MemNode::unordered);
3731   const TypePtr *adr_type = _gvn.type(thread_obj_handle)->isa_ptr();
3732   access_store_at(nullptr, thread_obj_handle, adr_type, arr, _gvn.type(arr), T_OBJECT, IN_NATIVE | MO_UNORDERED);
3733 
3734   // Change the _monitor_owner_id of the JavaThread
3735   Node* tid = load_field_from_object(arr, "tid", "J");
3736   Node* monitor_owner_id_offset = off_heap_plus_addr(thread, in_bytes(JavaThread::monitor_owner_id_offset()));
3737   store_to_memory(control(), monitor_owner_id_offset, tid, T_LONG, MemNode::unordered, true);
3738 
3739   JFR_ONLY(extend_setCurrentThread(thread, arr);)
3740   return true;
3741 }
3742 
3743 const Type* LibraryCallKit::scopedValueCache_type() {
3744   ciKlass* objects_klass = ciObjArrayKlass::make(env()->Object_klass());
3745   const TypeOopPtr* etype = TypeOopPtr::make_from_klass(env()->Object_klass());
3746   const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS);
3747 
3748   // Because we create the scopedValue cache lazily we have to make the
3749   // type of the result BotPTR.
3750   bool xk = etype->klass_is_exact();
3751   const Type* objects_type = TypeAryPtr::make(TypePtr::BotPTR, arr0, objects_klass, xk, 0);
3752   return objects_type;
3753 }
3754 
3755 Node* LibraryCallKit::scopedValueCache_helper() {
3756   Node* thread = _gvn.transform(new ThreadLocalNode());
3757   Node* p = off_heap_plus_addr(thread, in_bytes(JavaThread::scopedValueCache_offset()));
3758   // We cannot use immutable_memory() because we might flip onto a
3759   // different carrier thread, at which point we'll need to use that
3760   // carrier thread's cache.
3761   // return _gvn.transform(LoadNode::make(_gvn, nullptr, immutable_memory(), p, p->bottom_type()->is_ptr(),
3762   //       TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered));
3763   return make_load(nullptr, p, p->bottom_type()->is_ptr(), T_ADDRESS, MemNode::unordered);
3764 }
3765 
3766 //------------------------inline_native_scopedValueCache------------------
3767 bool LibraryCallKit::inline_native_scopedValueCache() {
3768   Node* cache_obj_handle = scopedValueCache_helper();
3769   const Type* objects_type = scopedValueCache_type();
3770   set_result(access_load(cache_obj_handle, objects_type, T_OBJECT, IN_NATIVE));
3771 

3907   }
3908   return kls;
3909 }
3910 
3911 //--------------------(inline_native_Class_query helpers)---------------------
3912 // Use this for JVM_ACC_INTERFACE.
3913 // Fall through if (mods & mask) == bits, take the guard otherwise.
3914 Node* LibraryCallKit::generate_klass_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region,
3915                                                  ByteSize offset, const Type* type, BasicType bt) {
3916   // Branch around if the given klass has the given modifier bit set.
3917   // Like generate_guard, adds a new path onto the region.
3918   Node* modp = off_heap_plus_addr(kls, in_bytes(offset));
3919   Node* mods = make_load(nullptr, modp, type, bt, MemNode::unordered);
3920   Node* mask = intcon(modifier_mask);
3921   Node* bits = intcon(modifier_bits);
3922   Node* mbit = _gvn.transform(new AndINode(mods, mask));
3923   Node* cmp  = _gvn.transform(new CmpINode(mbit, bits));
3924   Node* bol  = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
3925   return generate_fair_guard(bol, region);
3926 }

3927 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
3928   return generate_klass_flags_guard(kls, JVM_ACC_INTERFACE, 0, region,
3929                                     InstanceKlass::access_flags_offset(), TypeInt::CHAR, T_CHAR);
3930 }
3931 
3932 // Use this for testing if Klass is_hidden, has_finalizer, and is_cloneable_fast.
3933 Node* LibraryCallKit::generate_misc_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
3934   return generate_klass_flags_guard(kls, modifier_mask, modifier_bits, region,
3935                                     Klass::misc_flags_offset(), TypeInt::UBYTE, T_BOOLEAN);
3936 }
3937 
3938 Node* LibraryCallKit::generate_hidden_class_guard(Node* kls, RegionNode* region) {
3939   return generate_misc_flags_guard(kls, KlassFlags::_misc_is_hidden_class, 0, region);
3940 }
3941 
3942 //-------------------------inline_native_Class_query-------------------
3943 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
3944   const Type* return_type = TypeInt::BOOL;
3945   Node* prim_return_value = top();  // what happens if it's a primitive class?
3946   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);

4032 
4033 
4034   case vmIntrinsics::_getSuperclass:
4035     // The rules here are somewhat unfortunate, but we can still do better
4036     // with random logic than with a JNI call.
4037     // Interfaces store null or Object as _super, but must report null.
4038     // Arrays store an intermediate super as _super, but must report Object.
4039     // Other types can report the actual _super.
4040     // (To verify this code sequence, check the asserts in JVM_IsInterface.)
4041     if (generate_array_guard(kls, region) != nullptr) {
4042       // A guard was added.  If the guard is taken, it was an array.
4043       phi->add_req(makecon(TypeInstPtr::make(env()->Object_klass()->java_mirror())));
4044     }
4045     // Check for interface after array since this checks AccessFlags offset into InstanceKlass.
4046     // In other words, we are accessing subtype-specific information, so we need to determine the subtype first.
4047     if (generate_interface_guard(kls, region) != nullptr) {
4048       // A guard was added.  If the guard is taken, it was an interface.
4049       phi->add_req(null());
4050     }
4051     // If we fall through, it's a plain class.  Get its _super.
4052     p = off_heap_plus_addr(kls, in_bytes(Klass::super_offset()));
4053     kls = _gvn.transform(LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeInstKlassPtr::OBJECT_OR_NULL));
4054     null_ctl = top();
4055     kls = null_check_oop(kls, &null_ctl);
4056     if (null_ctl != top()) {
4057       // If the guard is taken, Object.superClass is null (both klass and mirror).
4058       region->add_req(null_ctl);
4059       phi   ->add_req(null());
4060     }
4061     if (!stopped()) {
4062       query_value = load_mirror_from_klass(kls);











4063     }
4064     break;
4065 
4066   default:
4067     fatal_unexpected_iid(id);
4068     break;
4069   }
4070 
4071   // Fall-through is the normal case of a query to a real class.
4072   phi->init_req(1, query_value);
4073   region->init_req(1, control());
4074 
4075   C->set_has_split_ifs(true); // Has chance for split-if optimization
4076   set_result(region, phi);
4077   return true;
4078 }
4079 

4080 //-------------------------inline_Class_cast-------------------
4081 bool LibraryCallKit::inline_Class_cast() {
4082   Node* mirror = argument(0); // Class
4083   Node* obj    = argument(1);
4084   const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
4085   if (mirror_con == nullptr) {
4086     return false;  // dead path (mirror->is_top()).
4087   }
4088   if (obj == nullptr || obj->is_top()) {
4089     return false;  // dead path
4090   }
4091   const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr();
4092 
4093   // First, see if Class.cast() can be folded statically.
4094   // java_mirror_type() returns non-null for compile-time Class constants.
4095   ciType* tm = mirror_con->java_mirror_type();
4096   if (tm != nullptr && tm->is_klass() &&
4097       tp != nullptr) {
4098     if (!tp->is_loaded()) {
4099       // Don't use intrinsic when class is not loaded.
4100       return false;
4101     } else {
4102       int static_res = C->static_subtype_check(TypeKlassPtr::make(tm->as_klass(), Type::trust_interfaces), tp->as_klass_type());

4103       if (static_res == Compile::SSC_always_true) {
4104         // isInstance() is true - fold the code.
4105         set_result(obj);
4106         return true;
4107       } else if (static_res == Compile::SSC_always_false) {
4108         // Don't use intrinsic, have to throw ClassCastException.
4109         // If the reference is null, the non-intrinsic bytecode will
4110         // be optimized appropriately.
4111         return false;
4112       }
4113     }
4114   }
4115 
4116   // Bailout intrinsic and do normal inlining if exception path is frequent.
4117   if (too_many_traps(Deoptimization::Reason_intrinsic)) {
4118     return false;
4119   }
4120 
4121   // Generate dynamic checks.
4122   // Class.cast() is java implementation of _checkcast bytecode.
4123   // Do checkcast (Parse::do_checkcast()) optimizations here.
4124 
4125   mirror = null_check(mirror);
4126   // If mirror is dead, only null-path is taken.
4127   if (stopped()) {
4128     return true;
4129   }
4130 
4131   // Not-subtype or the mirror's klass ptr is null (in case it is a primitive).
4132   enum { _bad_type_path = 1, _prim_path = 2, PATH_LIMIT };
4133   RegionNode* region = new RegionNode(PATH_LIMIT);
4134   record_for_igvn(region);
4135 
4136   // Now load the mirror's klass metaobject, and null-check it.
4137   // If kls is null, we have a primitive mirror and
4138   // nothing is an instance of a primitive type.
4139   Node* kls = load_klass_from_mirror(mirror, false, region, _prim_path);
4140 
4141   Node* res = top();




4142   if (!stopped()) {

4143     Node* bad_type_ctrl = top();
4144     // Do checkcast optimizations.
4145     res = gen_checkcast(obj, kls, &bad_type_ctrl);
4146     region->init_req(_bad_type_path, bad_type_ctrl);
4147   }
4148   if (region->in(_prim_path) != top() ||
4149       region->in(_bad_type_path) != top()) {

4150     // Let Interpreter throw ClassCastException.
4151     PreserveJVMState pjvms(this);




4152     set_control(_gvn.transform(region));



4153     uncommon_trap(Deoptimization::Reason_intrinsic,
4154                   Deoptimization::Action_maybe_recompile);
4155   }
4156   if (!stopped()) {
4157     set_result(res);
4158   }
4159   return true;
4160 }
4161 
4162 
4163 //--------------------------inline_native_subtype_check------------------------
4164 // This intrinsic takes the JNI calls out of the heart of
4165 // UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
4166 bool LibraryCallKit::inline_native_subtype_check() {
4167   // Pull both arguments off the stack.
4168   Node* args[2];                // two java.lang.Class mirrors: superc, subc
4169   args[0] = argument(0);
4170   args[1] = argument(1);
4171   Node* klasses[2];             // corresponding Klasses: superk, subk
4172   klasses[0] = klasses[1] = top();
4173 
4174   enum {
4175     // A full decision tree on {superc is prim, subc is prim}:
4176     _prim_0_path = 1,           // {P,N} => false
4177                                 // {P,P} & superc!=subc => false
4178     _prim_same_path,            // {P,P} & superc==subc => true
4179     _prim_1_path,               // {N,P} => false
4180     _ref_subtype_path,          // {N,N} & subtype check wins => true
4181     _both_ref_path,             // {N,N} & subtype check loses => false
4182     PATH_LIMIT
4183   };
4184 
4185   RegionNode* region = new RegionNode(PATH_LIMIT);

4186   Node*       phi    = new PhiNode(region, TypeInt::BOOL);
4187   record_for_igvn(region);

4188 
4189   const TypePtr* adr_type = TypeRawPtr::BOTTOM;   // memory type of loads
4190   const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
4191   int class_klass_offset = java_lang_Class::klass_offset();
4192 
4193   // First null-check both mirrors and load each mirror's klass metaobject.
4194   int which_arg;
4195   for (which_arg = 0; which_arg <= 1; which_arg++) {
4196     Node* arg = args[which_arg];
4197     arg = null_check(arg);
4198     if (stopped())  break;
4199     args[which_arg] = arg;
4200 
4201     Node* p = basic_plus_adr(arg, class_klass_offset);
4202     Node* kls = LoadKlassNode::make(_gvn, immutable_memory(), p, adr_type, kls_type);
4203     klasses[which_arg] = _gvn.transform(kls);
4204   }
4205 
4206   // Having loaded both klasses, test each for null.
4207   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
4208   for (which_arg = 0; which_arg <= 1; which_arg++) {
4209     Node* kls = klasses[which_arg];
4210     Node* null_ctl = top();
4211     kls = null_check_oop(kls, &null_ctl, never_see_null);
4212     int prim_path = (which_arg == 0 ? _prim_0_path : _prim_1_path);
4213     region->init_req(prim_path, null_ctl);



4214     if (stopped())  break;
4215     klasses[which_arg] = kls;
4216   }
4217 
4218   if (!stopped()) {
4219     // now we have two reference types, in klasses[0..1]
4220     Node* subk   = klasses[1];  // the argument to isAssignableFrom
4221     Node* superk = klasses[0];  // the receiver
4222     region->set_req(_both_ref_path, gen_subtype_check(subk, superk));
4223     // now we have a successful reference subtype check
4224     region->set_req(_ref_subtype_path, control());
4225   }
4226 
4227   // If both operands are primitive (both klasses null), then
4228   // we must return true when they are identical primitives.
4229   // It is convenient to test this after the first null klass check.
4230   set_control(region->in(_prim_0_path)); // go back to first null check

4231   if (!stopped()) {
4232     // Since superc is primitive, make a guard for the superc==subc case.
4233     Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1]));
4234     Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq));
4235     generate_guard(bol_eq, region, PROB_FAIR);
4236     if (region->req() == PATH_LIMIT+1) {
4237       // A guard was added.  If the added guard is taken, superc==subc.
4238       region->swap_edges(PATH_LIMIT, _prim_same_path);
4239       region->del_req(PATH_LIMIT);
4240     }
4241     region->set_req(_prim_0_path, control()); // Not equal after all.
4242   }
4243 
4244   // these are the only paths that produce 'true':
4245   phi->set_req(_prim_same_path,   intcon(1));
4246   phi->set_req(_ref_subtype_path, intcon(1));
4247 
4248   // pull together the cases:
4249   assert(region->req() == PATH_LIMIT, "sane region");
4250   for (uint i = 1; i < region->req(); i++) {
4251     Node* ctl = region->in(i);
4252     if (ctl == nullptr || ctl == top()) {
4253       region->set_req(i, top());
4254       phi   ->set_req(i, top());
4255     } else if (phi->in(i) == nullptr) {
4256       phi->set_req(i, intcon(0)); // all other paths produce 'false'
4257     }
4258   }
4259 
4260   set_control(_gvn.transform(region));
4261   set_result(_gvn.transform(phi));
4262   return true;
4263 }
4264 
4265 //---------------------generate_array_guard_common------------------------
4266 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region,
4267                                                   bool obj_array, bool not_array, Node** obj) {
4268 
4269   if (stopped()) {
4270     return nullptr;
4271   }
4272 
4273   // If obj_array/non_array==false/false:
4274   // Branch around if the given klass is in fact an array (either obj or prim).
4275   // If obj_array/non_array==false/true:
4276   // Branch around if the given klass is not an array klass of any kind.
4277   // If obj_array/non_array==true/true:
4278   // Branch around if the kls is not an oop array (kls is int[], String, etc.)
4279   // If obj_array/non_array==true/false:
4280   // Branch around if the kls is an oop array (Object[] or subtype)
4281   //
4282   // Like generate_guard, adds a new path onto the region.
4283   jint  layout_con = 0;
4284   Node* layout_val = get_layout_helper(kls, layout_con);
4285   if (layout_val == nullptr) {
4286     bool query = (obj_array
4287                   ? Klass::layout_helper_is_objArray(layout_con)
4288                   : Klass::layout_helper_is_array(layout_con));
4289     if (query == not_array) {







4290       return nullptr;                       // never a branch
4291     } else {                             // always a branch
4292       Node* always_branch = control();
4293       if (region != nullptr)
4294         region->add_req(always_branch);
4295       set_control(top());
4296       return always_branch;
4297     }
4298   }





















4299   // Now test the correct condition.
4300   jint  nval = (obj_array
4301                 ? (jint)(Klass::_lh_array_tag_type_value
4302                    <<    Klass::_lh_array_tag_shift)
4303                 : Klass::_lh_neutral_value);
4304   Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval)));
4305   BoolTest::mask btest = BoolTest::lt;  // correct for testing is_[obj]array
4306   // invert the test if we are looking for a non-array
4307   if (not_array)  btest = BoolTest(btest).negate();
4308   Node* bol = _gvn.transform(new BoolNode(cmp, btest));
4309   Node* ctrl = generate_fair_guard(bol, region);
4310   Node* is_array_ctrl = not_array ? control() : ctrl;
4311   if (obj != nullptr && is_array_ctrl != nullptr && is_array_ctrl != top()) {
4312     // Keep track of the fact that 'obj' is an array to prevent
4313     // array specific accesses from floating above the guard.
4314     *obj = _gvn.transform(new CheckCastPPNode(is_array_ctrl, *obj, TypeAryPtr::BOTTOM));
4315   }
4316   return ctrl;
4317 }
4318 




































































































































4319 
4320 //-----------------------inline_native_newArray--------------------------
4321 // private static native Object java.lang.reflect.newArray(Class<?> componentType, int length);
4322 // private        native Object Unsafe.allocateUninitializedArray0(Class<?> cls, int size);
4323 bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) {
4324   Node* mirror;
4325   Node* count_val;
4326   if (uninitialized) {
4327     null_check_receiver();
4328     mirror    = argument(1);
4329     count_val = argument(2);
4330   } else {
4331     mirror    = argument(0);
4332     count_val = argument(1);
4333   }
4334 
4335   mirror = null_check(mirror);
4336   // If mirror or obj is dead, only null-path is taken.
4337   if (stopped())  return true;
4338 
4339   enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
4340   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4341   PhiNode*    result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);

4359     CallJavaNode* slow_call = nullptr;
4360     if (uninitialized) {
4361       // Generate optimized virtual call (holder class 'Unsafe' is final)
4362       slow_call = generate_method_call(vmIntrinsics::_allocateUninitializedArray, false, false, true);
4363     } else {
4364       slow_call = generate_method_call_static(vmIntrinsics::_newArray, true);
4365     }
4366     Node* slow_result = set_results_for_java_call(slow_call);
4367     // this->control() comes from set_results_for_java_call
4368     result_reg->set_req(_slow_path, control());
4369     result_val->set_req(_slow_path, slow_result);
4370     result_io ->set_req(_slow_path, i_o());
4371     result_mem->set_req(_slow_path, reset_memory());
4372   }
4373 
4374   set_control(normal_ctl);
4375   if (!stopped()) {
4376     // Normal case:  The array type has been cached in the java.lang.Class.
4377     // The following call works fine even if the array type is polymorphic.
4378     // It could be a dynamic mix of int[], boolean[], Object[], etc.



4379     Node* obj = new_array(klass_node, count_val, 0);  // no arguments to push
4380     result_reg->init_req(_normal_path, control());
4381     result_val->init_req(_normal_path, obj);
4382     result_io ->init_req(_normal_path, i_o());
4383     result_mem->init_req(_normal_path, reset_memory());
4384 
4385     if (uninitialized) {
4386       // Mark the allocation so that zeroing is skipped
4387       AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(obj);
4388       alloc->maybe_set_complete(&_gvn);
4389     }
4390   }
4391 
4392   // Return the combined state.
4393   set_i_o(        _gvn.transform(result_io)  );
4394   set_all_memory( _gvn.transform(result_mem));
4395 
4396   C->set_has_split_ifs(true); // Has chance for split-if optimization
4397   set_result(result_reg, result_val);
4398   return true;

4447   // the bytecode that invokes Arrays.copyOf if deoptimization happens.
4448   { PreserveReexecuteState preexecs(this);
4449     jvms()->set_should_reexecute(true);
4450 
4451     array_type_mirror = null_check(array_type_mirror);
4452     original          = null_check(original);
4453 
4454     // Check if a null path was taken unconditionally.
4455     if (stopped())  return true;
4456 
4457     Node* orig_length = load_array_length(original);
4458 
4459     Node* klass_node = load_klass_from_mirror(array_type_mirror, false, nullptr, 0);
4460     klass_node = null_check(klass_node);
4461 
4462     RegionNode* bailout = new RegionNode(1);
4463     record_for_igvn(bailout);
4464 
4465     // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
4466     // Bail out if that is so.
4467     Node* not_objArray = generate_non_objArray_guard(klass_node, bailout);
















4468     if (not_objArray != nullptr) {
4469       // Improve the klass node's type from the new optimistic assumption:
4470       ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
4471       const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/);
4472       Node* cast = new CastPPNode(control(), klass_node, akls);
4473       klass_node = _gvn.transform(cast);


4474     }
4475 
4476     // Bail out if either start or end is negative.
4477     generate_negative_guard(start, bailout, &start);
4478     generate_negative_guard(end,   bailout, &end);
4479 
4480     Node* length = end;
4481     if (_gvn.type(start) != TypeInt::ZERO) {
4482       length = _gvn.transform(new SubINode(end, start));
4483     }
4484 
4485     // Bail out if length is negative (i.e., if start > end).
4486     // Without this the new_array would throw
4487     // NegativeArraySizeException but IllegalArgumentException is what
4488     // should be thrown
4489     generate_negative_guard(length, bailout, &length);
4490 







































4491     // Bail out if start is larger than the original length
4492     Node* orig_tail = _gvn.transform(new SubINode(orig_length, start));
4493     generate_negative_guard(orig_tail, bailout, &orig_tail);
4494 
4495     if (bailout->req() > 1) {
4496       PreserveJVMState pjvms(this);
4497       set_control(_gvn.transform(bailout));
4498       uncommon_trap(Deoptimization::Reason_intrinsic,
4499                     Deoptimization::Action_maybe_recompile);
4500     }
4501 
4502     if (!stopped()) {
4503       // How many elements will we copy from the original?
4504       // The answer is MinI(orig_tail, length).
4505       Node* moved = _gvn.transform(new MinINode(orig_tail, length));
4506 
4507       // Generate a direct call to the right arraycopy function(s).
4508       // We know the copy is disjoint but we might not know if the
4509       // oop stores need checking.
4510       // Extreme case:  Arrays.copyOf((Integer[])x, 10, String[].class).

4516       // to the copyOf to be validated, including that the copy to the
4517       // new array won't trigger an ArrayStoreException. That subtype
4518       // check can be optimized if we know something on the type of
4519       // the input array from type speculation.
4520       if (_gvn.type(klass_node)->singleton()) {
4521         const TypeKlassPtr* subk = _gvn.type(load_object_klass(original))->is_klassptr();
4522         const TypeKlassPtr* superk = _gvn.type(klass_node)->is_klassptr();
4523 
4524         int test = C->static_subtype_check(superk, subk);
4525         if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
4526           const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();
4527           if (t_original->speculative_type() != nullptr) {
4528             original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true);
4529           }
4530         }
4531       }
4532 
4533       bool validated = false;
4534       // Reason_class_check rather than Reason_intrinsic because we
4535       // want to intrinsify even if this traps.
4536       if (!too_many_traps(Deoptimization::Reason_class_check)) {
4537         Node* not_subtype_ctrl = gen_subtype_check(original, klass_node);
4538 
4539         if (not_subtype_ctrl != top()) {
4540           PreserveJVMState pjvms(this);
4541           set_control(not_subtype_ctrl);
4542           uncommon_trap(Deoptimization::Reason_class_check,
4543                         Deoptimization::Action_make_not_entrant);
4544           assert(stopped(), "Should be stopped");
4545         }
4546         validated = true;
4547       }
4548 
4549       if (!stopped()) {
4550         newcopy = new_array(klass_node, length, 0);  // no arguments to push
4551 
4552         ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true, true,
4553                                                 load_object_klass(original), klass_node);
4554         if (!is_copyOfRange) {
4555           ac->set_copyof(validated);
4556         } else {
4557           ac->set_copyofrange(validated);
4558         }
4559         Node* n = _gvn.transform(ac);
4560         if (n == ac) {
4561           ac->connect_outputs(this);
4562         } else {
4563           assert(validated, "shouldn't transform if all arguments not validated");
4564           set_all_memory(n);
4565         }
4566       }
4567     }
4568   } // original reexecute is set back here
4569 
4570   C->set_has_split_ifs(true); // Has chance for split-if optimization

4602 
4603 //-----------------------generate_method_call----------------------------
4604 // Use generate_method_call to make a slow-call to the real
4605 // method if the fast path fails.  An alternative would be to
4606 // use a stub like OptoRuntime::slow_arraycopy_Java.
4607 // This only works for expanding the current library call,
4608 // not another intrinsic.  (E.g., don't use this for making an
4609 // arraycopy call inside of the copyOf intrinsic.)
4610 CallJavaNode*
4611 LibraryCallKit::generate_method_call(vmIntrinsicID method_id, bool is_virtual, bool is_static, bool res_not_null) {
4612   // When compiling the intrinsic method itself, do not use this technique.
4613   guarantee(callee() != C->method(), "cannot make slow-call to self");
4614 
4615   ciMethod* method = callee();
4616   // ensure the JVMS we have will be correct for this call
4617   guarantee(method_id == method->intrinsic_id(), "must match");
4618 
4619   const TypeFunc* tf = TypeFunc::make(method);
4620   if (res_not_null) {
4621     assert(tf->return_type() == T_OBJECT, "");
4622     const TypeTuple* range = tf->range();
4623     const Type** fields = TypeTuple::fields(range->cnt());
4624     fields[TypeFunc::Parms] = range->field_at(TypeFunc::Parms)->filter_speculative(TypePtr::NOTNULL);
4625     const TypeTuple* new_range = TypeTuple::make(range->cnt(), fields);
4626     tf = TypeFunc::make(tf->domain(), new_range);
4627   }
4628   CallJavaNode* slow_call;
4629   if (is_static) {
4630     assert(!is_virtual, "");
4631     slow_call = new CallStaticJavaNode(C, tf,
4632                            SharedRuntime::get_resolve_static_call_stub(), method);
4633   } else if (is_virtual) {
4634     assert(!gvn().type(argument(0))->maybe_null(), "should not be null");
4635     int vtable_index = Method::invalid_vtable_index;
4636     if (UseInlineCaches) {
4637       // Suppress the vtable call
4638     } else {
4639       // hashCode and clone are not a miranda methods,
4640       // so the vtable index is fixed.
4641       // No need to use the linkResolver to get it.
4642        vtable_index = method->vtable_index();
4643        assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
4644               "bad index %d", vtable_index);
4645     }
4646     slow_call = new CallDynamicJavaNode(tf,

4663   set_edges_for_java_call(slow_call);
4664   return slow_call;
4665 }
4666 
4667 
4668 /**
4669  * Build special case code for calls to hashCode on an object. This call may
4670  * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
4671  * slightly different code.
4672  */
4673 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
4674   assert(is_static == callee()->is_static(), "correct intrinsic selection");
4675   assert(!(is_virtual && is_static), "either virtual, special, or static");
4676 
4677   enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
4678 
4679   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4680   PhiNode*    result_val = new PhiNode(result_reg, TypeInt::INT);
4681   PhiNode*    result_io  = new PhiNode(result_reg, Type::ABIO);
4682   PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4683   Node* obj = nullptr;







4684   if (!is_static) {
4685     // Check for hashing null object
4686     obj = null_check_receiver();
4687     if (stopped())  return true;        // unconditionally null
4688     result_reg->init_req(_null_path, top());
4689     result_val->init_req(_null_path, top());
4690   } else {
4691     // Do a null check, and return zero if null.
4692     // System.identityHashCode(null) == 0
4693     obj = argument(0);
4694     Node* null_ctl = top();
4695     obj = null_check_oop(obj, &null_ctl);
4696     result_reg->init_req(_null_path, null_ctl);
4697     result_val->init_req(_null_path, _gvn.intcon(0));
4698   }
4699 
4700   // Unconditionally null?  Then return right away.
4701   if (stopped()) {
4702     set_control( result_reg->in(_null_path));
4703     if (!stopped())
4704       set_result(result_val->in(_null_path));
4705     return true;
4706   }
4707 
4708   // We only go to the fast case code if we pass a number of guards.  The
4709   // paths which do not pass are accumulated in the slow_region.
4710   RegionNode* slow_region = new RegionNode(1);
4711   record_for_igvn(slow_region);
4712 
4713   // If this is a virtual call, we generate a funny guard.  We pull out
4714   // the vtable entry corresponding to hashCode() from the target object.
4715   // If the target method which we are calling happens to be the native
4716   // Object hashCode() method, we pass the guard.  We do not need this
4717   // guard for non-virtual calls -- the caller is known to be the native
4718   // Object hashCode().
4719   if (is_virtual) {
4720     // After null check, get the object's klass.
4721     Node* obj_klass = load_object_klass(obj);
4722     generate_virtual_guard(obj_klass, slow_region);
4723   }
4724 
4725   // Get the header out of the object, use LoadMarkNode when available
4726   Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
4727   // The control of the load must be null. Otherwise, the load can move before
4728   // the null check after castPP removal.
4729   Node* no_ctrl = nullptr;
4730   Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
4731 
4732   if (!UseObjectMonitorTable) {
4733     // Test the header to see if it is safe to read w.r.t. locking.


4734     Node *lock_mask      = _gvn.MakeConX(markWord::lock_mask_in_place);
4735     Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
4736     Node *monitor_val   = _gvn.MakeConX(markWord::monitor_value);
4737     Node *chk_monitor   = _gvn.transform(new CmpXNode(lmasked_header, monitor_val));
4738     Node *test_monitor  = _gvn.transform(new BoolNode(chk_monitor, BoolTest::eq));
4739 
4740     generate_slow_guard(test_monitor, slow_region);
4741   }
4742 
4743   // Get the hash value and check to see that it has been properly assigned.
4744   // We depend on hash_mask being at most 32 bits and avoid the use of
4745   // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
4746   // vm: see markWord.hpp.
4747   Node *hash_mask      = _gvn.intcon(markWord::hash_mask);
4748   Node *hash_shift     = _gvn.intcon(markWord::hash_shift);
4749   Node *hshifted_header= _gvn.transform(new URShiftXNode(header, hash_shift));
4750   // This hack lets the hash bits live anywhere in the mark object now, as long
4751   // as the shift drops the relevant bits into the low 32 bits.  Note that
4752   // Java spec says that HashCode is an int so there's no point in capturing
4753   // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build).

4781     // this->control() comes from set_results_for_java_call
4782     result_reg->init_req(_slow_path, control());
4783     result_val->init_req(_slow_path, slow_result);
4784     result_io  ->set_req(_slow_path, i_o());
4785     result_mem ->set_req(_slow_path, reset_memory());
4786   }
4787 
4788   // Return the combined state.
4789   set_i_o(        _gvn.transform(result_io)  );
4790   set_all_memory( _gvn.transform(result_mem));
4791 
4792   set_result(result_reg, result_val);
4793   return true;
4794 }
4795 
4796 //---------------------------inline_native_getClass----------------------------
4797 // public final native Class<?> java.lang.Object.getClass();
4798 //
4799 // Build special case code for calls to getClass on an object.
4800 bool LibraryCallKit::inline_native_getClass() {
4801   Node* obj = null_check_receiver();









4802   if (stopped())  return true;
4803   set_result(load_mirror_from_klass(load_object_klass(obj)));
4804   return true;
4805 }
4806 
4807 //-----------------inline_native_Reflection_getCallerClass---------------------
4808 // public static native Class<?> sun.reflect.Reflection.getCallerClass();
4809 //
4810 // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
4811 //
4812 // NOTE: This code must perform the same logic as JVM_GetCallerClass
4813 // in that it must skip particular security frames and checks for
4814 // caller sensitive methods.
4815 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
4816 #ifndef PRODUCT
4817   if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4818     tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
4819   }
4820 #endif
4821 

5203 //  not cloneable or finalizer => slow path to out-of-line Object.clone
5204 //
5205 // The general case has two steps, allocation and copying.
5206 // Allocation has two cases, and uses GraphKit::new_instance or new_array.
5207 //
5208 // Copying also has two cases, oop arrays and everything else.
5209 // Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
5210 // Everything else uses the tight inline loop supplied by CopyArrayNode.
5211 //
5212 // These steps fold up nicely if and when the cloned object's klass
5213 // can be sharply typed as an object array, a type array, or an instance.
5214 //
5215 bool LibraryCallKit::inline_native_clone(bool is_virtual) {
5216   PhiNode* result_val;
5217 
5218   // Set the reexecute bit for the interpreter to reexecute
5219   // the bytecode that invokes Object.clone if deoptimization happens.
5220   { PreserveReexecuteState preexecs(this);
5221     jvms()->set_should_reexecute(true);
5222 
5223     Node* obj = null_check_receiver();

5224     if (stopped())  return true;
5225 
5226     const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();






5227 
5228     // If we are going to clone an instance, we need its exact type to
5229     // know the number and types of fields to convert the clone to
5230     // loads/stores. Maybe a speculative type can help us.
5231     if (!obj_type->klass_is_exact() &&
5232         obj_type->speculative_type() != nullptr &&
5233         obj_type->speculative_type()->is_instance_klass()) {

5234       ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass();
5235       if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem &&
5236           !spec_ik->has_injected_fields()) {
5237         if (!obj_type->isa_instptr() ||
5238             obj_type->is_instptr()->instance_klass()->has_subklass()) {
5239           obj = maybe_cast_profiled_obj(obj, obj_type->speculative_type(), false);
5240         }
5241       }
5242     }
5243 
5244     // Conservatively insert a memory barrier on all memory slices.
5245     // Do not let writes into the original float below the clone.
5246     insert_mem_bar(Op_MemBarCPUOrder);
5247 
5248     // paths into result_reg:
5249     enum {
5250       _slow_path = 1,     // out-of-line call to clone method (virtual or not)
5251       _objArray_path,     // plain array allocation, plus arrayof_oop_arraycopy
5252       _array_path,        // plain array allocation, plus arrayof_long_arraycopy
5253       _instance_path,     // plain instance allocation, plus arrayof_long_arraycopy
5254       PATH_LIMIT
5255     };
5256     RegionNode* result_reg = new RegionNode(PATH_LIMIT);
5257     result_val             = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
5258     PhiNode*    result_i_o = new PhiNode(result_reg, Type::ABIO);
5259     PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
5260     record_for_igvn(result_reg);
5261 
5262     Node* obj_klass = load_object_klass(obj);





5263     Node* array_obj = obj;
5264     Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)nullptr, &array_obj);
5265     if (array_ctl != nullptr) {
5266       // It's an array.
5267       PreserveJVMState pjvms(this);
5268       set_control(array_ctl);
5269       Node* obj_length = load_array_length(array_obj);
5270       Node* array_size = nullptr; // Size of the array without object alignment padding.
5271       Node* alloc_obj = new_array(obj_klass, obj_length, 0, &array_size, /*deoptimize_on_exception=*/true);
5272 
5273       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
5274       if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Parsing)) {
5275         // If it is an oop array, it requires very special treatment,
5276         // because gc barriers are required when accessing the array.
5277         Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)nullptr);
5278         if (is_obja != nullptr) {
5279           PreserveJVMState pjvms2(this);
5280           set_control(is_obja);
5281           // Generate a direct call to the right arraycopy function(s).
5282           // Clones are always tightly coupled.
5283           ArrayCopyNode* ac = ArrayCopyNode::make(this, true, array_obj, intcon(0), alloc_obj, intcon(0), obj_length, true, false);
5284           ac->set_clone_oop_array();
5285           Node* n = _gvn.transform(ac);
5286           assert(n == ac, "cannot disappear");
5287           ac->connect_outputs(this, /*deoptimize_on_exception=*/true);
5288 
5289           result_reg->init_req(_objArray_path, control());
5290           result_val->init_req(_objArray_path, alloc_obj);
5291           result_i_o ->set_req(_objArray_path, i_o());
5292           result_mem ->set_req(_objArray_path, reset_memory());
5293         }
5294       }
5295       // Otherwise, there are no barriers to worry about.
5296       // (We can dispense with card marks if we know the allocation
5297       //  comes out of eden (TLAB)...  In fact, ReduceInitialCardMarks
5298       //  causes the non-eden paths to take compensating steps to
5299       //  simulate a fresh allocation, so that no further
5300       //  card marks are required in compiled code to initialize
5301       //  the object.)
5302 
5303       if (!stopped()) {
5304         copy_to_clone(array_obj, alloc_obj, array_size, true);
5305 
5306         // Present the results of the copy.
5307         result_reg->init_req(_array_path, control());
5308         result_val->init_req(_array_path, alloc_obj);
5309         result_i_o ->set_req(_array_path, i_o());
5310         result_mem ->set_req(_array_path, reset_memory());




































5311       }
5312     }
5313 
5314     // We only go to the instance fast case code if we pass a number of guards.
5315     // The paths which do not pass are accumulated in the slow_region.
5316     RegionNode* slow_region = new RegionNode(1);
5317     record_for_igvn(slow_region);
5318     if (!stopped()) {
5319       // It's an instance (we did array above).  Make the slow-path tests.
5320       // If this is a virtual call, we generate a funny guard.  We grab
5321       // the vtable entry corresponding to clone() from the target object.
5322       // If the target method which we are calling happens to be the
5323       // Object clone() method, we pass the guard.  We do not need this
5324       // guard for non-virtual calls; the caller is known to be the native
5325       // Object clone().
5326       if (is_virtual) {
5327         generate_virtual_guard(obj_klass, slow_region);
5328       }
5329 
5330       // The object must be easily cloneable and must not have a finalizer.
5331       // Both of these conditions may be checked in a single test.
5332       // We could optimize the test further, but we don't care.
5333       generate_misc_flags_guard(obj_klass,
5334                                 // Test both conditions:
5335                                 KlassFlags::_misc_is_cloneable_fast | KlassFlags::_misc_has_finalizer,
5336                                 // Must be cloneable but not finalizer:
5337                                 KlassFlags::_misc_is_cloneable_fast,

5429         set_jvms(sfpt->jvms());
5430         _reexecute_sp = jvms()->sp();
5431 
5432         return saved_jvms;
5433       }
5434     }
5435   }
5436   return nullptr;
5437 }
5438 
5439 // Clone the JVMState of the array allocation and create a new safepoint with it. Re-push the array length to the stack
5440 // such that uncommon traps can be emitted to re-execute the array allocation in the interpreter.
5441 SafePointNode* LibraryCallKit::create_safepoint_with_state_before_array_allocation(const AllocateArrayNode* alloc) const {
5442   JVMState* old_jvms = alloc->jvms()->clone_shallow(C);
5443   uint size = alloc->req();
5444   SafePointNode* sfpt = new SafePointNode(size, old_jvms);
5445   old_jvms->set_map(sfpt);
5446   for (uint i = 0; i < size; i++) {
5447     sfpt->init_req(i, alloc->in(i));
5448   }












5449   // re-push array length for deoptimization
5450   sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp(), alloc->in(AllocateNode::ALength));
5451   old_jvms->set_sp(old_jvms->sp()+1);
5452   old_jvms->set_monoff(old_jvms->monoff()+1);
5453   old_jvms->set_scloff(old_jvms->scloff()+1);
5454   old_jvms->set_endoff(old_jvms->endoff()+1);











5455   old_jvms->set_should_reexecute(true);
5456 
5457   sfpt->set_i_o(map()->i_o());
5458   sfpt->set_memory(map()->memory());
5459   sfpt->set_control(map()->control());
5460   return sfpt;
5461 }
5462 
5463 // In case of a deoptimization, we restart execution at the
5464 // allocation, allocating a new array. We would leave an uninitialized
5465 // array in the heap that GCs wouldn't expect. Move the allocation
5466 // after the traps so we don't allocate the array if we
5467 // deoptimize. This is possible because tightly_coupled_allocation()
5468 // guarantees there's no observer of the allocated array at this point
5469 // and the control flow is simple enough.
5470 void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms_before_guards,
5471                                                     int saved_reexecute_sp, uint new_idx) {
5472   if (saved_jvms_before_guards != nullptr && !stopped()) {
5473     replace_unrelated_uncommon_traps_with_alloc_state(alloc, saved_jvms_before_guards);
5474 
5475     assert(alloc != nullptr, "only with a tightly coupled allocation");
5476     // restore JVM state to the state at the arraycopy
5477     saved_jvms_before_guards->map()->set_control(map()->control());
5478     assert(saved_jvms_before_guards->map()->memory() == map()->memory(), "memory state changed?");
5479     assert(saved_jvms_before_guards->map()->i_o() == map()->i_o(), "IO state changed?");
5480     // If we've improved the types of some nodes (null check) while
5481     // emitting the guards, propagate them to the current state
5482     map()->replaced_nodes().apply(saved_jvms_before_guards->map(), new_idx);
5483     set_jvms(saved_jvms_before_guards);
5484     _reexecute_sp = saved_reexecute_sp;
5485 
5486     // Remove the allocation from above the guards
5487     CallProjections callprojs;
5488     alloc->extract_projections(&callprojs, true);
5489     InitializeNode* init = alloc->initialization();
5490     Node* alloc_mem = alloc->in(TypeFunc::Memory);
5491     C->gvn_replace_by(callprojs.fallthrough_ioproj, alloc->in(TypeFunc::I_O));
5492     init->replace_mem_projs_by(alloc_mem, C);
5493 
5494     // The CastIINode created in GraphKit::new_array (in AllocateArrayNode::make_ideal_length) must stay below
5495     // the allocation (i.e. is only valid if the allocation succeeds):
5496     // 1) replace CastIINode with AllocateArrayNode's length here
5497     // 2) Create CastIINode again once allocation has moved (see below) at the end of this method
5498     //
5499     // Multiple identical CastIINodes might exist here. Each GraphKit::load_array_length() call will generate
5500     // new separate CastIINode (arraycopy guard checks or any array length use between array allocation and ararycopy)
5501     Node* init_control = init->proj_out(TypeFunc::Control);
5502     Node* alloc_length = alloc->Ideal_length();
5503 #ifdef ASSERT
5504     Node* prev_cast = nullptr;
5505 #endif
5506     for (uint i = 0; i < init_control->outcnt(); i++) {
5507       Node* init_out = init_control->raw_out(i);
5508       if (init_out->is_CastII() && init_out->in(TypeFunc::Control) == init_control && init_out->in(1) == alloc_length) {
5509 #ifdef ASSERT
5510         if (prev_cast == nullptr) {
5511           prev_cast = init_out;

5513           if (prev_cast->cmp(*init_out) == false) {
5514             prev_cast->dump();
5515             init_out->dump();
5516             assert(false, "not equal CastIINode");
5517           }
5518         }
5519 #endif
5520         C->gvn_replace_by(init_out, alloc_length);
5521       }
5522     }
5523     C->gvn_replace_by(init->proj_out(TypeFunc::Control), alloc->in(0));
5524 
5525     // move the allocation here (after the guards)
5526     _gvn.hash_delete(alloc);
5527     alloc->set_req(TypeFunc::Control, control());
5528     alloc->set_req(TypeFunc::I_O, i_o());
5529     Node *mem = reset_memory();
5530     set_all_memory(mem);
5531     alloc->set_req(TypeFunc::Memory, mem);
5532     set_control(init->proj_out_or_null(TypeFunc::Control));
5533     set_i_o(callprojs.fallthrough_ioproj);
5534 
5535     // Update memory as done in GraphKit::set_output_for_allocation()
5536     const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
5537     const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
5538     if (ary_type->isa_aryptr() && length_type != nullptr) {
5539       ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
5540     }
5541     const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot);
5542     int            elemidx  = C->get_alias_index(telemref);
5543     // Need to properly move every memory projection for the Initialize
5544 #ifdef ASSERT
5545     int mark_idx = C->get_alias_index(ary_type->add_offset(oopDesc::mark_offset_in_bytes()));
5546     int klass_idx = C->get_alias_index(ary_type->add_offset(oopDesc::klass_offset_in_bytes()));
5547 #endif
5548     auto move_proj = [&](ProjNode* proj) {
5549       int alias_idx = C->get_alias_index(proj->adr_type());
5550       assert(alias_idx == Compile::AliasIdxRaw ||
5551              alias_idx == elemidx ||
5552              alias_idx == mark_idx ||
5553              alias_idx == klass_idx, "should be raw memory or array element type");

5863         top_src  = src_type->isa_aryptr();
5864         has_src = (top_src != nullptr && top_src->elem() != Type::BOTTOM);
5865         src_spec = true;
5866       }
5867       if (!has_dest) {
5868         dest = maybe_cast_profiled_obj(dest, dest_k, true);
5869         dest_type  = _gvn.type(dest);
5870         top_dest  = dest_type->isa_aryptr();
5871         has_dest = (top_dest != nullptr && top_dest->elem() != Type::BOTTOM);
5872         dest_spec = true;
5873       }
5874     }
5875   }
5876 
5877   if (has_src && has_dest && can_emit_guards) {
5878     BasicType src_elem = top_src->isa_aryptr()->elem()->array_element_basic_type();
5879     BasicType dest_elem = top_dest->isa_aryptr()->elem()->array_element_basic_type();
5880     if (is_reference_type(src_elem, true)) src_elem = T_OBJECT;
5881     if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT;
5882 
5883     if (src_elem == dest_elem && src_elem == T_OBJECT) {
5884       // If both arrays are object arrays then having the exact types
5885       // for both will remove the need for a subtype check at runtime
5886       // before the call and may make it possible to pick a faster copy
5887       // routine (without a subtype check on every element)
5888       // Do we have the exact type of src?
5889       bool could_have_src = src_spec;
5890       // Do we have the exact type of dest?
5891       bool could_have_dest = dest_spec;
5892       ciKlass* src_k = nullptr;
5893       ciKlass* dest_k = nullptr;
5894       if (!src_spec) {
5895         src_k = src_type->speculative_type_not_null();
5896         if (src_k != nullptr && src_k->is_array_klass()) {
5897           could_have_src = true;
5898         }
5899       }
5900       if (!dest_spec) {
5901         dest_k = dest_type->speculative_type_not_null();
5902         if (dest_k != nullptr && dest_k->is_array_klass()) {
5903           could_have_dest = true;
5904         }
5905       }
5906       if (could_have_src && could_have_dest) {
5907         // If we can have both exact types, emit the missing guards
5908         if (could_have_src && !src_spec) {
5909           src = maybe_cast_profiled_obj(src, src_k, true);


5910         }
5911         if (could_have_dest && !dest_spec) {
5912           dest = maybe_cast_profiled_obj(dest, dest_k, true);


5913         }
5914       }
5915     }
5916   }
5917 
5918   ciMethod* trap_method = method();
5919   int trap_bci = bci();
5920   if (saved_jvms_before_guards != nullptr) {
5921     trap_method = alloc->jvms()->method();
5922     trap_bci = alloc->jvms()->bci();
5923   }
5924 
5925   bool negative_length_guard_generated = false;
5926 
5927   if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
5928       can_emit_guards &&
5929       !src->is_top() && !dest->is_top()) {
5930     // validate arguments: enables transformation the ArrayCopyNode
5931     validated = true;
5932 
5933     RegionNode* slow_region = new RegionNode(1);
5934     record_for_igvn(slow_region);
5935 
5936     // (1) src and dest are arrays.
5937     generate_non_array_guard(load_object_klass(src), slow_region, &src);
5938     generate_non_array_guard(load_object_klass(dest), slow_region, &dest);
5939 
5940     // (2) src and dest arrays must have elements of the same BasicType
5941     // done at macro expansion or at Ideal transformation time
5942 
5943     // (4) src_offset must not be negative.
5944     generate_negative_guard(src_offset, slow_region);
5945 
5946     // (5) dest_offset must not be negative.
5947     generate_negative_guard(dest_offset, slow_region);
5948 
5949     // (7) src_offset + length must not exceed length of src.
5950     generate_limit_guard(src_offset, length,
5951                          load_array_length(src),
5952                          slow_region);
5953 
5954     // (8) dest_offset + length must not exceed length of dest.
5955     generate_limit_guard(dest_offset, length,
5956                          load_array_length(dest),
5957                          slow_region);
5958 
5959     // (6) length must not be negative.
5960     // This is also checked in generate_arraycopy() during macro expansion, but
5961     // we also have to check it here for the case where the ArrayCopyNode will
5962     // be eliminated by Escape Analysis.
5963     if (EliminateAllocations) {
5964       generate_negative_guard(length, slow_region);
5965       negative_length_guard_generated = true;
5966     }
5967 
5968     // (9) each element of an oop array must be assignable
5969     Node* dest_klass = load_object_klass(dest);

5970     if (src != dest) {

5971       Node* not_subtype_ctrl = gen_subtype_check(src, dest_klass);
5972 
5973       if (not_subtype_ctrl != top()) {
5974         PreserveJVMState pjvms(this);
5975         set_control(not_subtype_ctrl);
5976         uncommon_trap(Deoptimization::Reason_intrinsic,
5977                       Deoptimization::Action_make_not_entrant);
5978         assert(stopped(), "Should be stopped");
5979       }
5980     }




























5981     {
5982       PreserveJVMState pjvms(this);
5983       set_control(_gvn.transform(slow_region));
5984       uncommon_trap(Deoptimization::Reason_intrinsic,
5985                     Deoptimization::Action_make_not_entrant);
5986       assert(stopped(), "Should be stopped");
5987     }
5988 
5989     const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr();
5990     const Type *toop = dest_klass_t->cast_to_exactness(false)->as_instance_type();








5991     src = _gvn.transform(new CheckCastPPNode(control(), src, toop));
5992     arraycopy_move_allocation_here(alloc, dest, saved_jvms_before_guards, saved_reexecute_sp, new_idx);
5993   }
5994 
5995   if (stopped()) {
5996     return true;
5997   }
5998 



5999   ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != nullptr, negative_length_guard_generated,
6000                                           // Create LoadRange and LoadKlass nodes for use during macro expansion here
6001                                           // so the compiler has a chance to eliminate them: during macro expansion,
6002                                           // we have to set their control (CastPP nodes are eliminated).
6003                                           load_object_klass(src), load_object_klass(dest),
6004                                           load_array_length(src), load_array_length(dest));
6005 
6006   ac->set_arraycopy(validated);
6007 
6008   Node* n = _gvn.transform(ac);
6009   if (n == ac) {
6010     ac->connect_outputs(this);
6011   } else {
6012     assert(validated, "shouldn't transform if all arguments not validated");
6013     set_all_memory(n);
6014   }
6015   clear_upper_avx();
6016 
6017 
6018   return true;
6019 }
6020 
6021 
6022 // Helper function which determines if an arraycopy immediately follows
6023 // an allocation, with no intervening tests or other escapes for the object.

   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "asm/macroAssembler.hpp"
  26 #include "ci/ciArrayKlass.hpp"
  27 #include "ci/ciFlatArrayKlass.hpp"
  28 #include "ci/ciInstanceKlass.hpp"
  29 #include "ci/ciSymbols.hpp"
  30 #include "ci/ciUtilities.inline.hpp"
  31 #include "classfile/vmIntrinsics.hpp"
  32 #include "compiler/compileBroker.hpp"
  33 #include "compiler/compileLog.hpp"
  34 #include "gc/shared/barrierSet.hpp"
  35 #include "gc/shared/c2/barrierSetC2.hpp"
  36 #include "jfr/support/jfrIntrinsics.hpp"
  37 #include "memory/resourceArea.hpp"
  38 #include "oops/accessDecorators.hpp"
  39 #include "oops/klass.inline.hpp"
  40 #include "oops/layoutKind.hpp"
  41 #include "oops/objArrayKlass.hpp"
  42 #include "opto/addnode.hpp"
  43 #include "opto/arraycopynode.hpp"
  44 #include "opto/c2compiler.hpp"
  45 #include "opto/castnode.hpp"
  46 #include "opto/cfgnode.hpp"
  47 #include "opto/convertnode.hpp"
  48 #include "opto/countbitsnode.hpp"
  49 #include "opto/graphKit.hpp"
  50 #include "opto/idealKit.hpp"
  51 #include "opto/inlinetypenode.hpp"
  52 #include "opto/library_call.hpp"
  53 #include "opto/mathexactnode.hpp"
  54 #include "opto/mulnode.hpp"
  55 #include "opto/narrowptrnode.hpp"
  56 #include "opto/opaquenode.hpp"
  57 #include "opto/opcodes.hpp"
  58 #include "opto/parse.hpp"
  59 #include "opto/rootnode.hpp"
  60 #include "opto/runtime.hpp"
  61 #include "opto/subnode.hpp"
  62 #include "opto/type.hpp"
  63 #include "opto/vectornode.hpp"
  64 #include "prims/jvmtiExport.hpp"
  65 #include "prims/jvmtiThreadState.hpp"
  66 #include "prims/unsafe.hpp"
  67 #include "runtime/globals.hpp"
  68 #include "runtime/jniHandles.inline.hpp"
  69 #include "runtime/mountUnmountDisabler.hpp"
  70 #include "runtime/objectMonitor.hpp"
  71 #include "runtime/sharedRuntime.hpp"
  72 #include "runtime/stubRoutines.hpp"
  73 #include "utilities/globalDefinitions.hpp"
  74 #include "utilities/macros.hpp"
  75 #include "utilities/powerOfTwo.hpp"
  76 
  77 //---------------------------make_vm_intrinsic----------------------------
  78 CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
  79   vmIntrinsicID id = m->intrinsic_id();
  80   assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
  81 
  82   if (!m->is_loaded()) {
  83     // Do not attempt to inline unloaded methods.
  84     return nullptr;
  85   }
  86 
  87   C2Compiler* compiler = (C2Compiler*)CompileBroker::compiler(CompLevel_full_optimization);
  88   bool is_available = false;
  89 
  90   {
  91     // For calling is_intrinsic_supported and is_intrinsic_disabled_by_flag
  92     // the compiler must transition to '_thread_in_vm' state because both
  93     // methods access VM-internal data.

 404   case vmIntrinsics::_getReferenceOpaque:       return inline_unsafe_access(!is_store, T_OBJECT,   Opaque, false);
 405   case vmIntrinsics::_getBooleanOpaque:         return inline_unsafe_access(!is_store, T_BOOLEAN,  Opaque, false);
 406   case vmIntrinsics::_getByteOpaque:            return inline_unsafe_access(!is_store, T_BYTE,     Opaque, false);
 407   case vmIntrinsics::_getShortOpaque:           return inline_unsafe_access(!is_store, T_SHORT,    Opaque, false);
 408   case vmIntrinsics::_getCharOpaque:            return inline_unsafe_access(!is_store, T_CHAR,     Opaque, false);
 409   case vmIntrinsics::_getIntOpaque:             return inline_unsafe_access(!is_store, T_INT,      Opaque, false);
 410   case vmIntrinsics::_getLongOpaque:            return inline_unsafe_access(!is_store, T_LONG,     Opaque, false);
 411   case vmIntrinsics::_getFloatOpaque:           return inline_unsafe_access(!is_store, T_FLOAT,    Opaque, false);
 412   case vmIntrinsics::_getDoubleOpaque:          return inline_unsafe_access(!is_store, T_DOUBLE,   Opaque, false);
 413 
 414   case vmIntrinsics::_putReferenceOpaque:       return inline_unsafe_access( is_store, T_OBJECT,   Opaque, false);
 415   case vmIntrinsics::_putBooleanOpaque:         return inline_unsafe_access( is_store, T_BOOLEAN,  Opaque, false);
 416   case vmIntrinsics::_putByteOpaque:            return inline_unsafe_access( is_store, T_BYTE,     Opaque, false);
 417   case vmIntrinsics::_putShortOpaque:           return inline_unsafe_access( is_store, T_SHORT,    Opaque, false);
 418   case vmIntrinsics::_putCharOpaque:            return inline_unsafe_access( is_store, T_CHAR,     Opaque, false);
 419   case vmIntrinsics::_putIntOpaque:             return inline_unsafe_access( is_store, T_INT,      Opaque, false);
 420   case vmIntrinsics::_putLongOpaque:            return inline_unsafe_access( is_store, T_LONG,     Opaque, false);
 421   case vmIntrinsics::_putFloatOpaque:           return inline_unsafe_access( is_store, T_FLOAT,    Opaque, false);
 422   case vmIntrinsics::_putDoubleOpaque:          return inline_unsafe_access( is_store, T_DOUBLE,   Opaque, false);
 423 
 424   case vmIntrinsics::_getFlatValue:             return inline_unsafe_flat_access(!is_store, Relaxed);
 425   case vmIntrinsics::_putFlatValue:             return inline_unsafe_flat_access( is_store, Relaxed);
 426 
 427   case vmIntrinsics::_compareAndSetReference:   return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap,      Volatile);
 428   case vmIntrinsics::_compareAndSetByte:        return inline_unsafe_load_store(T_BYTE,   LS_cmp_swap,      Volatile);
 429   case vmIntrinsics::_compareAndSetShort:       return inline_unsafe_load_store(T_SHORT,  LS_cmp_swap,      Volatile);
 430   case vmIntrinsics::_compareAndSetInt:         return inline_unsafe_load_store(T_INT,    LS_cmp_swap,      Volatile);
 431   case vmIntrinsics::_compareAndSetLong:        return inline_unsafe_load_store(T_LONG,   LS_cmp_swap,      Volatile);
 432 
 433   case vmIntrinsics::_weakCompareAndSetReferencePlain:     return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Relaxed);
 434   case vmIntrinsics::_weakCompareAndSetReferenceAcquire:   return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Acquire);
 435   case vmIntrinsics::_weakCompareAndSetReferenceRelease:   return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Release);
 436   case vmIntrinsics::_weakCompareAndSetReference:          return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Volatile);
 437   case vmIntrinsics::_weakCompareAndSetBytePlain:          return inline_unsafe_load_store(T_BYTE,   LS_cmp_swap_weak, Relaxed);
 438   case vmIntrinsics::_weakCompareAndSetByteAcquire:        return inline_unsafe_load_store(T_BYTE,   LS_cmp_swap_weak, Acquire);
 439   case vmIntrinsics::_weakCompareAndSetByteRelease:        return inline_unsafe_load_store(T_BYTE,   LS_cmp_swap_weak, Release);
 440   case vmIntrinsics::_weakCompareAndSetByte:               return inline_unsafe_load_store(T_BYTE,   LS_cmp_swap_weak, Volatile);
 441   case vmIntrinsics::_weakCompareAndSetShortPlain:         return inline_unsafe_load_store(T_SHORT,  LS_cmp_swap_weak, Relaxed);
 442   case vmIntrinsics::_weakCompareAndSetShortAcquire:       return inline_unsafe_load_store(T_SHORT,  LS_cmp_swap_weak, Acquire);
 443   case vmIntrinsics::_weakCompareAndSetShortRelease:       return inline_unsafe_load_store(T_SHORT,  LS_cmp_swap_weak, Release);
 444   case vmIntrinsics::_weakCompareAndSetShort:              return inline_unsafe_load_store(T_SHORT,  LS_cmp_swap_weak, Volatile);
 445   case vmIntrinsics::_weakCompareAndSetIntPlain:           return inline_unsafe_load_store(T_INT,    LS_cmp_swap_weak, Relaxed);
 446   case vmIntrinsics::_weakCompareAndSetIntAcquire:         return inline_unsafe_load_store(T_INT,    LS_cmp_swap_weak, Acquire);

 466   case vmIntrinsics::_compareAndExchangeLong:              return inline_unsafe_load_store(T_LONG,   LS_cmp_exchange,  Volatile);
 467   case vmIntrinsics::_compareAndExchangeLongAcquire:       return inline_unsafe_load_store(T_LONG,   LS_cmp_exchange,  Acquire);
 468   case vmIntrinsics::_compareAndExchangeLongRelease:       return inline_unsafe_load_store(T_LONG,   LS_cmp_exchange,  Release);
 469 
 470   case vmIntrinsics::_getAndAddByte:                    return inline_unsafe_load_store(T_BYTE,   LS_get_add,       Volatile);
 471   case vmIntrinsics::_getAndAddShort:                   return inline_unsafe_load_store(T_SHORT,  LS_get_add,       Volatile);
 472   case vmIntrinsics::_getAndAddInt:                     return inline_unsafe_load_store(T_INT,    LS_get_add,       Volatile);
 473   case vmIntrinsics::_getAndAddLong:                    return inline_unsafe_load_store(T_LONG,   LS_get_add,       Volatile);
 474 
 475   case vmIntrinsics::_getAndSetByte:                    return inline_unsafe_load_store(T_BYTE,   LS_get_set,       Volatile);
 476   case vmIntrinsics::_getAndSetShort:                   return inline_unsafe_load_store(T_SHORT,  LS_get_set,       Volatile);
 477   case vmIntrinsics::_getAndSetInt:                     return inline_unsafe_load_store(T_INT,    LS_get_set,       Volatile);
 478   case vmIntrinsics::_getAndSetLong:                    return inline_unsafe_load_store(T_LONG,   LS_get_set,       Volatile);
 479   case vmIntrinsics::_getAndSetReference:               return inline_unsafe_load_store(T_OBJECT, LS_get_set,       Volatile);
 480 
 481   case vmIntrinsics::_loadFence:
 482   case vmIntrinsics::_storeFence:
 483   case vmIntrinsics::_storeStoreFence:
 484   case vmIntrinsics::_fullFence:                return inline_unsafe_fence(intrinsic_id());
 485 
 486   case vmIntrinsics::_arrayInstanceBaseOffset:  return inline_arrayInstanceBaseOffset();
 487   case vmIntrinsics::_arrayInstanceIndexScale:  return inline_arrayInstanceIndexScale();
 488   case vmIntrinsics::_arrayLayout:              return inline_arrayLayout();
 489   case vmIntrinsics::_getFieldMap:              return inline_getFieldMap();
 490 
 491   case vmIntrinsics::_onSpinWait:               return inline_onspinwait();
 492 
 493   case vmIntrinsics::_currentCarrierThread:     return inline_native_currentCarrierThread();
 494   case vmIntrinsics::_currentThread:            return inline_native_currentThread();
 495   case vmIntrinsics::_setCurrentThread:         return inline_native_setCurrentThread();
 496 
 497   case vmIntrinsics::_scopedValueCache:          return inline_native_scopedValueCache();
 498   case vmIntrinsics::_setScopedValueCache:       return inline_native_setScopedValueCache();
 499 
 500   case vmIntrinsics::_Continuation_pin:          return inline_native_Continuation_pinning(false);
 501   case vmIntrinsics::_Continuation_unpin:        return inline_native_Continuation_pinning(true);
 502 
 503   case vmIntrinsics::_vthreadEndFirstTransition:    return inline_native_vthread_end_transition(CAST_FROM_FN_PTR(address, OptoRuntime::vthread_end_first_transition_Java()),
 504                                                                                                 "endFirstTransition", true);
 505   case vmIntrinsics::_vthreadStartFinalTransition:  return inline_native_vthread_start_transition(CAST_FROM_FN_PTR(address, OptoRuntime::vthread_start_final_transition_Java()),
 506                                                                                                   "startFinalTransition", true);
 507   case vmIntrinsics::_vthreadStartTransition:       return inline_native_vthread_start_transition(CAST_FROM_FN_PTR(address, OptoRuntime::vthread_start_transition_Java()),
 508                                                                                                   "startTransition", false);
 509   case vmIntrinsics::_vthreadEndTransition:         return inline_native_vthread_end_transition(CAST_FROM_FN_PTR(address, OptoRuntime::vthread_end_transition_Java()),
 510                                                                                                 "endTransition", false);

 519 #endif
 520   case vmIntrinsics::_currentTimeMillis:        return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
 521   case vmIntrinsics::_nanoTime:                 return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
 522   case vmIntrinsics::_writeback0:               return inline_unsafe_writeback0();
 523   case vmIntrinsics::_writebackPreSync0:        return inline_unsafe_writebackSync0(true);
 524   case vmIntrinsics::_writebackPostSync0:       return inline_unsafe_writebackSync0(false);
 525   case vmIntrinsics::_allocateInstance:         return inline_unsafe_allocate();
 526   case vmIntrinsics::_copyMemory:               return inline_unsafe_copyMemory();
 527   case vmIntrinsics::_setMemory:                return inline_unsafe_setMemory();
 528   case vmIntrinsics::_getLength:                return inline_native_getLength();
 529   case vmIntrinsics::_copyOf:                   return inline_array_copyOf(false);
 530   case vmIntrinsics::_copyOfRange:              return inline_array_copyOf(true);
 531   case vmIntrinsics::_equalsB:                  return inline_array_equals(StrIntrinsicNode::LL);
 532   case vmIntrinsics::_equalsC:                  return inline_array_equals(StrIntrinsicNode::UU);
 533   case vmIntrinsics::_Preconditions_checkIndex: return inline_preconditions_checkIndex(T_INT);
 534   case vmIntrinsics::_Preconditions_checkLongIndex: return inline_preconditions_checkIndex(T_LONG);
 535   case vmIntrinsics::_clone:                    return inline_native_clone(intrinsic()->is_virtual());
 536 
 537   case vmIntrinsics::_allocateUninitializedArray: return inline_unsafe_newArray(true);
 538   case vmIntrinsics::_newArray:                   return inline_unsafe_newArray(false);
 539   case vmIntrinsics::_newNullRestrictedNonAtomicArray: return inline_newArray(/* null_free */ true, /* atomic */ false);
 540   case vmIntrinsics::_newNullRestrictedAtomicArray: return inline_newArray(/* null_free */ true, /* atomic */ true);
 541   case vmIntrinsics::_newNullableAtomicArray:     return inline_newArray(/* null_free */ false, /* atomic */ true);
 542   case vmIntrinsics::_isFlatArray:              return inline_getArrayProperties(IsFlat);
 543   case vmIntrinsics::_isNullRestrictedArray:    return inline_getArrayProperties(IsNullRestricted);
 544   case vmIntrinsics::_isAtomicArray:            return inline_getArrayProperties(IsAtomic);
 545 
 546   case vmIntrinsics::_isAssignableFrom:         return inline_native_subtype_check();
 547 
 548   case vmIntrinsics::_isInstance:
 549   case vmIntrinsics::_isHidden:
 550   case vmIntrinsics::_getSuperclass:            return inline_native_Class_query(intrinsic_id());
 551 
 552   case vmIntrinsics::_floatToRawIntBits:
 553   case vmIntrinsics::_floatToIntBits:
 554   case vmIntrinsics::_intBitsToFloat:
 555   case vmIntrinsics::_doubleToRawLongBits:
 556   case vmIntrinsics::_doubleToLongBits:
 557   case vmIntrinsics::_longBitsToDouble:
 558   case vmIntrinsics::_floatToFloat16:
 559   case vmIntrinsics::_float16ToFloat:           return inline_fp_conversions(intrinsic_id());
 560   case vmIntrinsics::_sqrt_float16:             return inline_fp16_operations(intrinsic_id(), 1);
 561   case vmIntrinsics::_fma_float16:              return inline_fp16_operations(intrinsic_id(), 3);
 562   case vmIntrinsics::_floatIsFinite:
 563   case vmIntrinsics::_floatIsInfinite:
 564   case vmIntrinsics::_doubleIsFinite:

2289     case vmIntrinsics::_remainderUnsigned_l: {
2290       zero_check_long(argument(2));
2291       // Compile-time detect of null-exception
2292       if (stopped()) {
2293         return true; // keep the graph constructed so far
2294       }
2295       n = new UModLNode(control(), argument(0), argument(2));
2296       break;
2297     }
2298     default:  fatal_unexpected_iid(id);  break;
2299   }
2300   set_result(_gvn.transform(n));
2301   return true;
2302 }
2303 
2304 //----------------------------inline_unsafe_access----------------------------
2305 
2306 const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type) {
2307   // Attempt to infer a sharper value type from the offset and base type.
2308   ciKlass* sharpened_klass = nullptr;
2309   bool null_free = false;
2310 
2311   // See if it is an instance field, with an object type.
2312   if (alias_type->field() != nullptr) {
2313     if (alias_type->field()->type()->is_klass()) {
2314       sharpened_klass = alias_type->field()->type()->as_klass();
2315       null_free = alias_type->field()->is_null_free();
2316     }
2317   }
2318 
2319   const TypeOopPtr* result = nullptr;
2320   // See if it is a narrow oop array.
2321   if (adr_type->isa_aryptr()) {
2322     if (adr_type->offset() >= refArrayOopDesc::base_offset_in_bytes()) {
2323       const TypeOopPtr* elem_type = adr_type->is_aryptr()->elem()->make_oopptr();
2324       null_free = adr_type->is_aryptr()->is_null_free();
2325       if (elem_type != nullptr && elem_type->is_loaded()) {
2326         // Sharpen the value type.
2327         result = elem_type;
2328       }
2329     }
2330   }
2331 
2332   // The sharpened class might be unloaded if there is no class loader
2333   // contraint in place.
2334   if (result == nullptr && sharpened_klass != nullptr && sharpened_klass->is_loaded()) {
2335     // Sharpen the value type.
2336     result = TypeOopPtr::make_from_klass(sharpened_klass);
2337     if (null_free) {
2338       result = result->join_speculative(TypePtr::NOTNULL)->is_oopptr();
2339     }
2340   }
2341   if (result != nullptr) {
2342 #ifndef PRODUCT
2343     if (C->print_intrinsics() || C->print_inlining()) {
2344       tty->print("  from base type:  ");  adr_type->dump(); tty->cr();
2345       tty->print("  sharpened value: ");  result->dump();    tty->cr();
2346     }
2347 #endif
2348   }
2349   return result;
2350 }
2351 
2352 DecoratorSet LibraryCallKit::mo_decorator_for_access_kind(AccessKind kind) {
2353   switch (kind) {
2354       case Relaxed:
2355         return MO_UNORDERED;
2356       case Opaque:
2357         return MO_RELAXED;
2358       case Acquire:
2359         return MO_ACQUIRE;

2407 #endif // ASSERT
2408  }
2409 #endif //PRODUCT
2410 
2411   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
2412 
2413   Node* receiver = argument(0);  // type: oop
2414 
2415   // Build address expression.
2416   Node* heap_base_oop = top();
2417 
2418   // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2419   Node* base = argument(1);  // type: oop
2420   // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2421   Node* offset = argument(2);  // type: long
2422   // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2423   // to be plain byte offsets, which are also the same as those accepted
2424   // by oopDesc::field_addr.
2425   assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2426          "fieldOffset must be byte-scaled");
2427 
2428   if (base->is_InlineType()) {
2429     assert(!is_store, "InlineTypeNodes are non-larval value objects");
2430     InlineTypeNode* vt = base->as_InlineType();
2431     if (offset->is_Con()) {
2432       long off = find_long_con(offset, 0);
2433       ciInlineKlass* vk = vt->type()->inline_klass();
2434       if ((long)(int)off != off || !vk->contains_field_offset(off)) {
2435         return false;
2436       }
2437 
2438       ciField* field = vk->get_non_flat_field_by_offset(off);
2439       if (field != nullptr) {
2440         BasicType bt = type2field[field->type()->basic_type()];
2441         if (bt == T_ARRAY || bt == T_NARROWOOP) {
2442           bt = T_OBJECT;
2443         }
2444         if (bt == type && !field->is_flat()) {
2445           Node* value = vt->field_value_by_offset(off, false);
2446           const Type* value_type = _gvn.type(value);
2447           if (value->is_InlineType()) {
2448             value = value->as_InlineType()->adjust_scalarization_depth(this);
2449           } else if (value_type->is_inlinetypeptr()) {
2450             value = InlineTypeNode::make_from_oop(this, value, value_type->inline_klass());
2451           }
2452           set_result(value);
2453           return true;
2454         }
2455       }
2456     }
2457     {
2458       // Re-execute the unsafe access if allocation triggers deoptimization.
2459       PreserveReexecuteState preexecs(this);
2460       jvms()->set_should_reexecute(true);
2461       vt = vt->buffer(this);
2462     }
2463     base = vt->get_oop();
2464   }
2465 
2466   // 32-bit machines ignore the high half!
2467   offset = ConvL2X(offset);
2468 
2469   // Save state and restore on bailout
2470   SavedState old_state(this);
2471 
2472   Node* adr = make_unsafe_address(base, offset, type, kind == Relaxed);
2473   assert(!stopped(), "Inlining of unsafe access failed: address construction stopped unexpectedly");
2474 
2475   if (_gvn.type(base->uncast())->isa_ptr() == TypePtr::NULL_PTR) {
2476     if (type != T_OBJECT) {
2477       decorators |= IN_NATIVE; // off-heap primitive access
2478     } else {
2479       return false; // off-heap oop accesses are not supported
2480     }
2481   } else {
2482     heap_base_oop = base; // on-heap or mixed access
2483   }
2484 
2485   // Can base be null? Otherwise, always on-heap access.

2489     decorators |= IN_HEAP;
2490   }
2491 
2492   Node* val = is_store ? argument(4) : nullptr;
2493 
2494   const TypePtr* adr_type = _gvn.type(adr)->isa_ptr();
2495   if (adr_type == TypePtr::NULL_PTR) {
2496     return false; // off-heap access with zero address
2497   }
2498 
2499   // Try to categorize the address.
2500   Compile::AliasType* alias_type = C->alias_type(adr_type);
2501   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2502 
2503   if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2504       alias_type->adr_type() == TypeAryPtr::RANGE) {
2505     return false; // not supported
2506   }
2507 
2508   bool mismatched = false;
2509   BasicType bt = T_ILLEGAL;
2510   ciField* field = nullptr;
2511   if (adr_type->isa_instptr()) {
2512     const TypeInstPtr* instptr = adr_type->is_instptr();
2513     ciInstanceKlass* k = instptr->instance_klass();
2514     int off = instptr->offset();
2515     if (instptr->const_oop() != nullptr &&
2516         k == ciEnv::current()->Class_klass() &&
2517         instptr->offset() >= (k->size_helper() * wordSize)) {
2518       k = instptr->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
2519       field = k->get_field_by_offset(off, true);
2520     } else {
2521       field = k->get_non_flat_field_by_offset(off);
2522     }
2523     if (field != nullptr) {
2524       bt = type2field[field->type()->basic_type()];
2525     }
2526     if (bt != alias_type->basic_type()) {
2527       // Type mismatch. Is it an access to a nested flat field?
2528       field = k->get_field_by_offset(off, false);
2529       if (field != nullptr) {
2530         bt = type2field[field->type()->basic_type()];
2531       }
2532     }
2533     assert(bt == alias_type->basic_type(), "should match");
2534   } else {
2535     bt = alias_type->basic_type();
2536   }
2537 
2538   if (bt != T_ILLEGAL) {
2539     assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
2540     if (bt == T_BYTE && adr_type->isa_aryptr()) {
2541       // Alias type doesn't differentiate between byte[] and boolean[]).
2542       // Use address type to get the element type.
2543       bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2544     }
2545     if (is_reference_type(bt, true)) {
2546       // accessing an array field with getReference is not a mismatch
2547       bt = T_OBJECT;
2548     }
2549     if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2550       // Don't intrinsify mismatched object accesses
2551       return false;
2552     }
2553     mismatched = (bt != type);
2554   } else if (alias_type->adr_type()->isa_oopptr()) {
2555     mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
2556   }
2557 
2558   old_state.discard();
2559   assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2560 
2561   if (mismatched) {
2562     decorators |= C2_MISMATCHED;
2563   }
2564 
2565   // First guess at the value type.
2566   const Type *value_type = Type::get_const_basic_type(type);
2567 
2568   // Figure out the memory ordering.
2569   decorators |= mo_decorator_for_access_kind(kind);
2570 
2571   if (!is_store) {
2572     if (type == T_OBJECT) {
2573       const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2574       if (tjp != nullptr) {
2575         value_type = tjp;
2576       }
2577     }
2578   }
2579 
2580   receiver = null_check(receiver);
2581   if (stopped()) {
2582     return true;
2583   }
2584   // Heap pointers get a null-check from the interpreter,
2585   // as a courtesy.  However, this is not guaranteed by Unsafe,
2586   // and it is not possible to fully distinguish unintended nulls
2587   // from intended ones in this API.
2588 
2589   if (!is_store) {
2590     Node* p = nullptr;
2591     // Try to constant fold a load from a constant field
2592 
2593     if (heap_base_oop != top() && field != nullptr && field->is_constant() && !field->is_flat() && !mismatched) {
2594       // final or stable field
2595       p = make_constant_from_field(field, heap_base_oop);
2596     }
2597 
2598     if (p == nullptr) { // Could not constant fold the load
2599       p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);
2600       const TypeOopPtr* ptr = value_type->make_oopptr();
2601       if (ptr != nullptr && ptr->is_inlinetypeptr()) {
2602         // Load a non-flattened inline type from memory
2603         p = InlineTypeNode::make_from_oop(this, p, ptr->inline_klass());
2604       }
2605       // Normalize the value returned by getBoolean in the following cases
2606       if (type == T_BOOLEAN &&
2607           (mismatched ||
2608            heap_base_oop == top() ||                  // - heap_base_oop is null or
2609            (can_access_non_heap && field == nullptr)) // - heap_base_oop is potentially null
2610                                                       //   and the unsafe access is made to large offset
2611                                                       //   (i.e., larger than the maximum offset necessary for any
2612                                                       //   field access)
2613             ) {
2614           IdealKit ideal = IdealKit(this);
2615 #define __ ideal.
2616           IdealVariable normalized_result(ideal);
2617           __ declarations_done();
2618           __ set(normalized_result, p);
2619           __ if_then(p, BoolTest::ne, ideal.ConI(0));
2620           __ set(normalized_result, ideal.ConI(1));
2621           ideal.end_if();
2622           final_sync(ideal);
2623           p = __ value(normalized_result);
2624 #undef __

2628       p = gvn().transform(new CastP2XNode(nullptr, p));
2629       p = ConvX2UL(p);
2630     }
2631     // The load node has the control of the preceding MemBarCPUOrder.  All
2632     // following nodes will have the control of the MemBarCPUOrder inserted at
2633     // the end of this method.  So, pushing the load onto the stack at a later
2634     // point is fine.
2635     set_result(p);
2636   } else {
2637     if (bt == T_ADDRESS) {
2638       // Repackage the long as a pointer.
2639       val = ConvL2X(val);
2640       val = gvn().transform(new CastX2PNode(val));
2641     }
2642     access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators);
2643   }
2644 
2645   return true;
2646 }
2647 
2648 bool LibraryCallKit::inline_unsafe_flat_access(bool is_store, AccessKind kind) {
2649 #ifdef ASSERT
2650   {
2651     ResourceMark rm;
2652     // Check the signatures.
2653     ciSignature* sig = callee()->signature();
2654     assert(sig->type_at(0)->basic_type() == T_OBJECT, "base should be object, but is %s", type2name(sig->type_at(0)->basic_type()));
2655     assert(sig->type_at(1)->basic_type() == T_LONG, "offset should be long, but is %s", type2name(sig->type_at(1)->basic_type()));
2656     assert(sig->type_at(2)->basic_type() == T_INT, "layout kind should be int, but is %s", type2name(sig->type_at(3)->basic_type()));
2657     assert(sig->type_at(3)->basic_type() == T_OBJECT, "value klass should be object, but is %s", type2name(sig->type_at(4)->basic_type()));
2658     if (is_store) {
2659       assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value, but returns %s", type2name(sig->return_type()->basic_type()));
2660       assert(sig->count() == 5, "flat putter should have 5 arguments, but has %d", sig->count());
2661       assert(sig->type_at(4)->basic_type() == T_OBJECT, "put value should be object, but is %s", type2name(sig->type_at(5)->basic_type()));
2662     } else {
2663       assert(sig->return_type()->basic_type() == T_OBJECT, "getter must return an object, but returns %s", type2name(sig->return_type()->basic_type()));
2664       assert(sig->count() == 4, "flat getter should have 4 arguments, but has %d", sig->count());
2665     }
2666  }
2667 #endif // ASSERT
2668 
2669   assert(kind == Relaxed, "Only plain accesses for now");
2670   if (callee()->is_static()) {
2671     // caller must have the capability!
2672     return false;
2673   }
2674   C->set_has_unsafe_access(true);
2675 
2676   const TypeInstPtr* value_klass_node = _gvn.type(argument(5))->isa_instptr();
2677   if (value_klass_node == nullptr || value_klass_node->const_oop() == nullptr) {
2678     // parameter valueType is not a constant
2679     return false;
2680   }
2681   ciType* mirror_type = value_klass_node->const_oop()->as_instance()->java_mirror_type();
2682   if (!mirror_type->is_inlinetype()) {
2683     // Dead code
2684     return false;
2685   }
2686   ciInlineKlass* value_klass = mirror_type->as_inline_klass();
2687 
2688   const TypeInt* layout_type = _gvn.type(argument(4))->isa_int();
2689   if (layout_type == nullptr || !layout_type->is_con()) {
2690     // parameter layoutKind is not a constant
2691     return false;
2692   }
2693   assert(layout_type->get_con() >= static_cast<int>(LayoutKind::REFERENCE) &&
2694          layout_type->get_con() < static_cast<int>(LayoutKind::UNKNOWN),
2695          "invalid layoutKind %d", layout_type->get_con());
2696   LayoutKind layout = static_cast<LayoutKind>(layout_type->get_con());
2697   assert(layout == LayoutKind::REFERENCE || layout == LayoutKind::NULL_FREE_NON_ATOMIC_FLAT ||
2698          layout == LayoutKind::NULL_FREE_ATOMIC_FLAT || layout == LayoutKind::NULLABLE_ATOMIC_FLAT,
2699          "unexpected layoutKind %d", layout_type->get_con());
2700 
2701   null_check(argument(0));
2702   if (stopped()) {
2703     return true;
2704   }
2705 
2706   Node* base = must_be_not_null(argument(1), true);
2707   Node* offset = argument(2);
2708   const Type* base_type = _gvn.type(base);
2709 
2710   Node* ptr;
2711   bool immutable_memory = false;
2712   DecoratorSet decorators = C2_UNSAFE_ACCESS | IN_HEAP | MO_UNORDERED;
2713   if (base_type->isa_instptr()) {
2714     const TypeLong* offset_type = _gvn.type(offset)->isa_long();
2715     if (offset_type == nullptr || !offset_type->is_con()) {
2716       // Offset into a non-array should be a constant
2717       decorators |= C2_MISMATCHED;
2718     } else {
2719       int offset_con = checked_cast<int>(offset_type->get_con());
2720       ciInstanceKlass* base_klass = base_type->is_instptr()->instance_klass();
2721       ciField* field = base_klass->get_non_flat_field_by_offset(offset_con);
2722       if (field == nullptr) {
2723         assert(!base_klass->is_final(), "non-existence field at offset %d of class %s", offset_con, base_klass->name()->as_utf8());
2724         decorators |= C2_MISMATCHED;
2725       } else {
2726         assert(field->type() == value_klass, "field at offset %d of %s is of type %s, but valueType is %s",
2727                offset_con, base_klass->name()->as_utf8(), field->type()->name(), value_klass->name()->as_utf8());
2728         immutable_memory = field->is_strict() && field->is_final();
2729 
2730         if (base->is_InlineType()) {
2731           assert(!is_store, "Cannot store into a non-larval value object");
2732           set_result(base->as_InlineType()->field_value_by_offset(offset_con, false));
2733           return true;
2734         }
2735       }
2736     }
2737 
2738     if (base->is_InlineType()) {
2739       assert(!is_store, "Cannot store into a non-larval value object");
2740       base = base->as_InlineType()->buffer(this, true);
2741     }
2742     ptr = basic_plus_adr(base, ConvL2X(offset));
2743   } else if (base_type->isa_aryptr()) {
2744     decorators |= IS_ARRAY;
2745     if (layout == LayoutKind::REFERENCE) {
2746       if (!base_type->is_aryptr()->is_not_flat()) {
2747         const TypeAryPtr* array_type = base_type->is_aryptr()->cast_to_not_flat();
2748         Node* new_base = _gvn.transform(new CastPPNode(control(), base, array_type, ConstraintCastNode::DependencyType::NonFloatingNarrowing));
2749         replace_in_map(base, new_base);
2750         base = new_base;
2751       }
2752       ptr = basic_plus_adr(base, ConvL2X(offset));
2753     } else {
2754       if (UseArrayFlattening) {
2755         // Flat array must have an exact type
2756         bool is_null_free = !LayoutKindHelper::is_nullable_flat(layout);
2757         bool is_atomic = LayoutKindHelper::is_atomic_flat(layout);
2758         Node* new_base = cast_to_flat_array_exact(base, value_klass, is_null_free, is_atomic);
2759         replace_in_map(base, new_base);
2760         base = new_base;
2761         ptr = basic_plus_adr(base, ConvL2X(offset));
2762         const TypeAryPtr* ptr_type = _gvn.type(ptr)->is_aryptr();
2763         if (ptr_type->field_offset().get() != 0) {
2764           ptr = _gvn.transform(new CastPPNode(control(), ptr, ptr_type->with_field_offset(0), ConstraintCastNode::DependencyType::NonFloatingNarrowing));
2765         }
2766       } else {
2767         uncommon_trap(Deoptimization::Reason_intrinsic,
2768                       Deoptimization::Action_none);
2769         return true;
2770       }
2771     }
2772   } else {
2773     decorators |= C2_MISMATCHED;
2774     ptr = basic_plus_adr(base, ConvL2X(offset));
2775   }
2776 
2777   if (is_store) {
2778     Node* value = argument(6);
2779     const Type* value_type = _gvn.type(value);
2780     if (!value_type->is_inlinetypeptr()) {
2781       value_type = Type::get_const_type(value_klass)->filter_speculative(value_type);
2782       Node* new_value = _gvn.transform(new CastPPNode(control(), value, value_type, ConstraintCastNode::DependencyType::NonFloatingNarrowing));
2783       new_value = InlineTypeNode::make_from_oop(this, new_value, value_klass);
2784       replace_in_map(value, new_value);
2785       value = new_value;
2786     }
2787 
2788     assert(value_type->inline_klass() == value_klass, "value is of type %s while valueType is %s", value_type->inline_klass()->name()->as_utf8(), value_klass->name()->as_utf8());
2789     if (layout == LayoutKind::REFERENCE) {
2790       const TypePtr* ptr_type = (decorators & C2_MISMATCHED) != 0 ? TypeRawPtr::BOTTOM : _gvn.type(ptr)->is_ptr();
2791       access_store_at(base, ptr, ptr_type, value, value_type, T_OBJECT, decorators);
2792     } else {
2793       bool atomic = LayoutKindHelper::is_atomic_flat(layout);
2794       bool null_free = !LayoutKindHelper::is_nullable_flat(layout);
2795       value->as_InlineType()->store_flat(this, base, ptr, atomic, immutable_memory, null_free, decorators);
2796     }
2797 
2798     return true;
2799   } else {
2800     decorators |= (C2_CONTROL_DEPENDENT_LOAD | C2_UNKNOWN_CONTROL_LOAD);
2801     InlineTypeNode* result;
2802     if (layout == LayoutKind::REFERENCE) {
2803       const TypePtr* ptr_type = (decorators & C2_MISMATCHED) != 0 ? TypeRawPtr::BOTTOM : _gvn.type(ptr)->is_ptr();
2804       Node* oop = access_load_at(base, ptr, ptr_type, Type::get_const_type(value_klass), T_OBJECT, decorators);
2805       result = InlineTypeNode::make_from_oop(this, oop, value_klass);
2806     } else {
2807       bool atomic = LayoutKindHelper::is_atomic_flat(layout);
2808       bool null_free = !LayoutKindHelper::is_nullable_flat(layout);
2809       result = InlineTypeNode::make_from_flat(this, value_klass, base, ptr, atomic, immutable_memory, null_free, decorators);
2810     }
2811 
2812     set_result(result);
2813     return true;
2814   }
2815 }
2816 
2817 //----------------------------inline_unsafe_load_store----------------------------
2818 // This method serves a couple of different customers (depending on LoadStoreKind):
2819 //
2820 // LS_cmp_swap:
2821 //
2822 //   boolean compareAndSetReference(Object o, long offset, Object expected, Object x);
2823 //   boolean compareAndSetInt(   Object o, long offset, int    expected, int    x);
2824 //   boolean compareAndSetLong(  Object o, long offset, long   expected, long   x);
2825 //
2826 // LS_cmp_swap_weak:
2827 //
2828 //   boolean weakCompareAndSetReference(       Object o, long offset, Object expected, Object x);
2829 //   boolean weakCompareAndSetReferencePlain(  Object o, long offset, Object expected, Object x);
2830 //   boolean weakCompareAndSetReferenceAcquire(Object o, long offset, Object expected, Object x);
2831 //   boolean weakCompareAndSetReferenceRelease(Object o, long offset, Object expected, Object x);
2832 //
2833 //   boolean weakCompareAndSetInt(          Object o, long offset, int    expected, int    x);
2834 //   boolean weakCompareAndSetIntPlain(     Object o, long offset, int    expected, int    x);
2835 //   boolean weakCompareAndSetIntAcquire(   Object o, long offset, int    expected, int    x);
2836 //   boolean weakCompareAndSetIntRelease(   Object o, long offset, int    expected, int    x);

2999     }
3000     case LS_cmp_swap:
3001     case LS_cmp_swap_weak:
3002     case LS_get_add:
3003       break;
3004     default:
3005       ShouldNotReachHere();
3006   }
3007 
3008   // Null check receiver.
3009   receiver = null_check(receiver);
3010   if (stopped()) {
3011     return true;
3012   }
3013 
3014   int alias_idx = C->get_alias_index(adr_type);
3015 
3016   if (is_reference_type(type)) {
3017     decorators |= IN_HEAP | ON_UNKNOWN_OOP_REF;
3018 
3019     if (oldval != nullptr && oldval->is_InlineType()) {
3020       // Re-execute the unsafe access if allocation triggers deoptimization.
3021       PreserveReexecuteState preexecs(this);
3022       jvms()->set_should_reexecute(true);
3023       oldval = oldval->as_InlineType()->buffer(this)->get_oop();
3024     }
3025     if (newval != nullptr && newval->is_InlineType()) {
3026       // Re-execute the unsafe access if allocation triggers deoptimization.
3027       PreserveReexecuteState preexecs(this);
3028       jvms()->set_should_reexecute(true);
3029       newval = newval->as_InlineType()->buffer(this)->get_oop();
3030     }
3031 
3032     // Transformation of a value which could be null pointer (CastPP #null)
3033     // could be delayed during Parse (for example, in adjust_map_after_if()).
3034     // Execute transformation here to avoid barrier generation in such case.
3035     if (_gvn.type(newval) == TypePtr::NULL_PTR)
3036       newval = _gvn.makecon(TypePtr::NULL_PTR);
3037 
3038     if (oldval != nullptr && _gvn.type(oldval) == TypePtr::NULL_PTR) {
3039       // Refine the value to a null constant, when it is known to be null
3040       oldval = _gvn.makecon(TypePtr::NULL_PTR);
3041     }
3042   }
3043 
3044   Node* result = nullptr;
3045   switch (kind) {
3046     case LS_cmp_exchange: {
3047       result = access_atomic_cmpxchg_val_at(base, adr, adr_type, alias_idx,
3048                                             oldval, newval, value_type, type, decorators);
3049       break;
3050     }
3051     case LS_cmp_swap_weak:

3080   insert_mem_bar(Op_MemBarCPUOrder);
3081   switch(id) {
3082     case vmIntrinsics::_loadFence:
3083       insert_mem_bar(Op_LoadFence);
3084       return true;
3085     case vmIntrinsics::_storeFence:
3086       insert_mem_bar(Op_StoreFence);
3087       return true;
3088     case vmIntrinsics::_storeStoreFence:
3089       insert_mem_bar(Op_StoreStoreFence);
3090       return true;
3091     case vmIntrinsics::_fullFence:
3092       insert_mem_bar(Op_MemBarFull);
3093       return true;
3094     default:
3095       fatal_unexpected_iid(id);
3096       return false;
3097   }
3098 }
3099 
3100 // private native int arrayInstanceBaseOffset0(Object[] array);
3101 bool LibraryCallKit::inline_arrayInstanceBaseOffset() {
3102   Node* array = argument(1);
3103   Node* klass_node = load_object_klass(array);
3104 
3105   jint  layout_con = Klass::_lh_neutral_value;
3106   Node* layout_val = get_layout_helper(klass_node, layout_con);
3107   int   layout_is_con = (layout_val == nullptr);
3108 
3109   Node* header_size = nullptr;
3110   if (layout_is_con) {
3111     int hsize = Klass::layout_helper_header_size(layout_con);
3112     header_size = intcon(hsize);
3113   } else {
3114     Node* hss = intcon(Klass::_lh_header_size_shift);
3115     Node* hsm = intcon(Klass::_lh_header_size_mask);
3116     header_size = _gvn.transform(new URShiftINode(layout_val, hss));
3117     header_size = _gvn.transform(new AndINode(header_size, hsm));
3118   }
3119   set_result(header_size);
3120   return true;
3121 }
3122 
3123 // private native int arrayInstanceIndexScale0(Object[] array);
3124 bool LibraryCallKit::inline_arrayInstanceIndexScale() {
3125   Node* array = argument(1);
3126   Node* klass_node = load_object_klass(array);
3127 
3128   jint  layout_con = Klass::_lh_neutral_value;
3129   Node* layout_val = get_layout_helper(klass_node, layout_con);
3130   int   layout_is_con = (layout_val == nullptr);
3131 
3132   Node* element_size = nullptr;
3133   if (layout_is_con) {
3134     int log_element_size  = Klass::layout_helper_log2_element_size(layout_con);
3135     int elem_size = 1 << log_element_size;
3136     element_size = intcon(elem_size);
3137   } else {
3138     Node* ess = intcon(Klass::_lh_log2_element_size_shift);
3139     Node* esm = intcon(Klass::_lh_log2_element_size_mask);
3140     Node* log_element_size = _gvn.transform(new URShiftINode(layout_val, ess));
3141     log_element_size = _gvn.transform(new AndINode(log_element_size, esm));
3142     element_size = _gvn.transform(new LShiftINode(intcon(1), log_element_size));
3143   }
3144   set_result(element_size);
3145   return true;
3146 }
3147 
3148 // private native int arrayLayout0(Object[] array);
3149 bool LibraryCallKit::inline_arrayLayout() {
3150   RegionNode* region = new RegionNode(2);
3151   Node* phi = new PhiNode(region, TypeInt::POS);
3152 
3153   Node* array = argument(1);
3154   Node* klass_node = load_object_klass(array);
3155   generate_refArray_guard(klass_node, region);
3156   if (region->req() == 3) {
3157     phi->add_req(intcon((jint)LayoutKind::REFERENCE));
3158   }
3159 
3160   int layout_kind_offset = in_bytes(FlatArrayKlass::layout_kind_offset());
3161   Node* layout_kind_addr = basic_plus_adr(top(), klass_node, layout_kind_offset);
3162   Node* layout_kind = make_load(nullptr, layout_kind_addr, TypeInt::POS, T_INT, MemNode::unordered);
3163 
3164   region->init_req(1, control());
3165   phi->init_req(1, layout_kind);
3166 
3167   set_control(_gvn.transform(region));
3168   set_result(_gvn.transform(phi));
3169   return true;
3170 }
3171 
3172 // private native int[] getFieldMap0(Class <?> c);
3173 //   int offset = c._klass._acmp_maps_offset;
3174 //   return (int[])c.obj_field(offset);
3175 bool LibraryCallKit::inline_getFieldMap() {
3176   Node* mirror = argument(1);
3177   Node* klass = load_klass_from_mirror(mirror, false, nullptr, 0);
3178 
3179   int field_map_offset_offset = in_bytes(InstanceKlass::acmp_maps_offset_offset());
3180   Node* field_map_offset_addr = basic_plus_adr(top(), klass, field_map_offset_offset);
3181   Node* field_map_offset = make_load(nullptr, field_map_offset_addr, TypeInt::INT, T_INT, MemNode::unordered);
3182   field_map_offset = _gvn.transform(ConvI2L(field_map_offset));
3183 
3184   Node* map_addr = basic_plus_adr(mirror, field_map_offset);
3185   const TypeAryPtr* val_type = TypeAryPtr::INTS->cast_to_ptr_type(TypePtr::NotNull)->with_offset(0);
3186   // TODO 8350865 Remove this
3187   val_type = val_type->cast_to_not_flat(true)->cast_to_not_null_free(true);
3188   Node* map = access_load_at(mirror, map_addr, TypeAryPtr::INTS, val_type, T_ARRAY, IN_HEAP | MO_UNORDERED);
3189 
3190   set_result(map);
3191   return true;
3192 }
3193 
3194 bool LibraryCallKit::inline_onspinwait() {
3195   insert_mem_bar(Op_OnSpinWait);
3196   return true;
3197 }
3198 
3199 bool LibraryCallKit::klass_needs_init_guard(Node* kls) {
3200   if (!kls->is_Con()) {
3201     return true;
3202   }
3203   const TypeInstKlassPtr* klsptr = kls->bottom_type()->isa_instklassptr();
3204   if (klsptr == nullptr) {
3205     return true;
3206   }
3207   ciInstanceKlass* ik = klsptr->instance_klass();
3208   // don't need a guard for a klass that is already initialized
3209   return !ik->is_initialized();
3210 }
3211 
3212 //----------------------------inline_unsafe_writeback0-------------------------
3213 // public native void Unsafe.writeback0(long address)

3292                     Deoptimization::Action_make_not_entrant);
3293     }
3294     if (stopped()) {
3295       return true;
3296     }
3297 #endif //INCLUDE_JVMTI
3298 
3299   Node* test = nullptr;
3300   if (LibraryCallKit::klass_needs_init_guard(kls)) {
3301     // Note:  The argument might still be an illegal value like
3302     // Serializable.class or Object[].class.   The runtime will handle it.
3303     // But we must make an explicit check for initialization.
3304     Node* insp = off_heap_plus_addr(kls, in_bytes(InstanceKlass::init_state_offset()));
3305     // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
3306     // can generate code to load it as unsigned byte.
3307     Node* inst = make_load(nullptr, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::acquire);
3308     Node* bits = intcon(InstanceKlass::fully_initialized);
3309     test = _gvn.transform(new SubINode(inst, bits));
3310     // The 'test' is non-zero if we need to take a slow path.
3311   }

3312   Node* obj = new_instance(kls, test);
3313   set_result(obj);
3314   return true;
3315 }
3316 
3317 //------------------------inline_native_time_funcs--------------
3318 // inline code for System.currentTimeMillis() and System.nanoTime()
3319 // these have the same type and signature
3320 bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
3321   const TypeFunc* tf = OptoRuntime::void_long_Type();
3322   const TypePtr* no_memory_effects = nullptr;
3323   Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
3324   Node* value = _gvn.transform(new ProjNode(time, TypeFunc::Parms+0));
3325 #ifdef ASSERT
3326   Node* value_top = _gvn.transform(new ProjNode(time, TypeFunc::Parms+1));
3327   assert(value_top == top(), "second value must be top");
3328 #endif
3329   set_result(value);
3330   return true;
3331 }

4107   Node* arr = argument(1);
4108   Node* thread = _gvn.transform(new ThreadLocalNode());
4109   Node* p = off_heap_plus_addr(thread, in_bytes(JavaThread::vthread_offset()));
4110   Node* thread_obj_handle
4111     = make_load(nullptr, p, p->bottom_type()->is_ptr(), T_OBJECT, MemNode::unordered);
4112   const TypePtr *adr_type = _gvn.type(thread_obj_handle)->isa_ptr();
4113   access_store_at(nullptr, thread_obj_handle, adr_type, arr, _gvn.type(arr), T_OBJECT, IN_NATIVE | MO_UNORDERED);
4114 
4115   // Change the _monitor_owner_id of the JavaThread
4116   Node* tid = load_field_from_object(arr, "tid", "J");
4117   Node* monitor_owner_id_offset = off_heap_plus_addr(thread, in_bytes(JavaThread::monitor_owner_id_offset()));
4118   store_to_memory(control(), monitor_owner_id_offset, tid, T_LONG, MemNode::unordered, true);
4119 
4120   JFR_ONLY(extend_setCurrentThread(thread, arr);)
4121   return true;
4122 }
4123 
4124 const Type* LibraryCallKit::scopedValueCache_type() {
4125   ciKlass* objects_klass = ciObjArrayKlass::make(env()->Object_klass());
4126   const TypeOopPtr* etype = TypeOopPtr::make_from_klass(env()->Object_klass());
4127   const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS, /* stable= */ false, /* flat= */ false, /* not_flat= */ true, /* not_null_free= */ true, true);
4128 
4129   // Because we create the scopedValue cache lazily we have to make the
4130   // type of the result BotPTR.
4131   bool xk = etype->klass_is_exact();
4132   const Type* objects_type = TypeAryPtr::make(TypePtr::BotPTR, arr0, objects_klass, xk, TypeAryPtr::Offset(0));
4133   return objects_type;
4134 }
4135 
4136 Node* LibraryCallKit::scopedValueCache_helper() {
4137   Node* thread = _gvn.transform(new ThreadLocalNode());
4138   Node* p = off_heap_plus_addr(thread, in_bytes(JavaThread::scopedValueCache_offset()));
4139   // We cannot use immutable_memory() because we might flip onto a
4140   // different carrier thread, at which point we'll need to use that
4141   // carrier thread's cache.
4142   // return _gvn.transform(LoadNode::make(_gvn, nullptr, immutable_memory(), p, p->bottom_type()->is_ptr(),
4143   //       TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered));
4144   return make_load(nullptr, p, p->bottom_type()->is_ptr(), T_ADDRESS, MemNode::unordered);
4145 }
4146 
4147 //------------------------inline_native_scopedValueCache------------------
4148 bool LibraryCallKit::inline_native_scopedValueCache() {
4149   Node* cache_obj_handle = scopedValueCache_helper();
4150   const Type* objects_type = scopedValueCache_type();
4151   set_result(access_load(cache_obj_handle, objects_type, T_OBJECT, IN_NATIVE));
4152 

4288   }
4289   return kls;
4290 }
4291 
4292 //--------------------(inline_native_Class_query helpers)---------------------
4293 // Use this for JVM_ACC_INTERFACE.
4294 // Fall through if (mods & mask) == bits, take the guard otherwise.
4295 Node* LibraryCallKit::generate_klass_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region,
4296                                                  ByteSize offset, const Type* type, BasicType bt) {
4297   // Branch around if the given klass has the given modifier bit set.
4298   // Like generate_guard, adds a new path onto the region.
4299   Node* modp = off_heap_plus_addr(kls, in_bytes(offset));
4300   Node* mods = make_load(nullptr, modp, type, bt, MemNode::unordered);
4301   Node* mask = intcon(modifier_mask);
4302   Node* bits = intcon(modifier_bits);
4303   Node* mbit = _gvn.transform(new AndINode(mods, mask));
4304   Node* cmp  = _gvn.transform(new CmpINode(mbit, bits));
4305   Node* bol  = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
4306   return generate_fair_guard(bol, region);
4307 }
4308 
4309 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
4310   return generate_klass_flags_guard(kls, JVM_ACC_INTERFACE, 0, region,
4311                                     InstanceKlass::access_flags_offset(), TypeInt::CHAR, T_CHAR);
4312 }
4313 
4314 // Use this for testing if Klass is_hidden, has_finalizer, and is_cloneable_fast.
4315 Node* LibraryCallKit::generate_misc_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
4316   return generate_klass_flags_guard(kls, modifier_mask, modifier_bits, region,
4317                                     Klass::misc_flags_offset(), TypeInt::UBYTE, T_BOOLEAN);
4318 }
4319 
4320 Node* LibraryCallKit::generate_hidden_class_guard(Node* kls, RegionNode* region) {
4321   return generate_misc_flags_guard(kls, KlassFlags::_misc_is_hidden_class, 0, region);
4322 }
4323 
4324 //-------------------------inline_native_Class_query-------------------
4325 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
4326   const Type* return_type = TypeInt::BOOL;
4327   Node* prim_return_value = top();  // what happens if it's a primitive class?
4328   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);

4414 
4415 
4416   case vmIntrinsics::_getSuperclass:
4417     // The rules here are somewhat unfortunate, but we can still do better
4418     // with random logic than with a JNI call.
4419     // Interfaces store null or Object as _super, but must report null.
4420     // Arrays store an intermediate super as _super, but must report Object.
4421     // Other types can report the actual _super.
4422     // (To verify this code sequence, check the asserts in JVM_IsInterface.)
4423     if (generate_array_guard(kls, region) != nullptr) {
4424       // A guard was added.  If the guard is taken, it was an array.
4425       phi->add_req(makecon(TypeInstPtr::make(env()->Object_klass()->java_mirror())));
4426     }
4427     // Check for interface after array since this checks AccessFlags offset into InstanceKlass.
4428     // In other words, we are accessing subtype-specific information, so we need to determine the subtype first.
4429     if (generate_interface_guard(kls, region) != nullptr) {
4430       // A guard was added.  If the guard is taken, it was an interface.
4431       phi->add_req(null());
4432     }
4433     // If we fall through, it's a plain class.  Get its _super.









4434     if (!stopped()) {
4435       p = basic_plus_adr(top(), kls, in_bytes(Klass::super_offset()));
4436       kls = _gvn.transform(LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeInstKlassPtr::OBJECT_OR_NULL));
4437       null_ctl = top();
4438       kls = null_check_oop(kls, &null_ctl);
4439       if (null_ctl != top()) {
4440         // If the guard is taken, Object.superClass is null (both klass and mirror).
4441         region->add_req(null_ctl);
4442         phi   ->add_req(null());
4443       }
4444       if (!stopped()) {
4445         query_value = load_mirror_from_klass(kls);
4446       }
4447     }
4448     break;
4449 
4450   default:
4451     fatal_unexpected_iid(id);
4452     break;
4453   }
4454 
4455   // Fall-through is the normal case of a query to a real class.
4456   phi->init_req(1, query_value);
4457   region->init_req(1, control());
4458 
4459   C->set_has_split_ifs(true); // Has chance for split-if optimization
4460   set_result(region, phi);
4461   return true;
4462 }
4463 
4464 
4465 //-------------------------inline_Class_cast-------------------
4466 bool LibraryCallKit::inline_Class_cast() {
4467   Node* mirror = argument(0); // Class
4468   Node* obj    = argument(1);
4469   const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
4470   if (mirror_con == nullptr) {
4471     return false;  // dead path (mirror->is_top()).
4472   }
4473   if (obj == nullptr || obj->is_top()) {
4474     return false;  // dead path
4475   }
4476   const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr();
4477 
4478   // First, see if Class.cast() can be folded statically.
4479   // java_mirror_type() returns non-null for compile-time Class constants.
4480   ciType* tm = mirror_con->java_mirror_type();
4481   if (tm != nullptr && tm->is_klass() &&
4482       tp != nullptr) {
4483     if (!tp->is_loaded()) {
4484       // Don't use intrinsic when class is not loaded.
4485       return false;
4486     } else {
4487       const TypeKlassPtr* tklass = TypeKlassPtr::make(tm->as_klass(), Type::trust_interfaces);
4488       int static_res = C->static_subtype_check(tklass, tp->as_klass_type());
4489       if (static_res == Compile::SSC_always_true) {
4490         // isInstance() is true - fold the code.
4491         set_result(obj);
4492         return true;
4493       } else if (static_res == Compile::SSC_always_false) {
4494         // Don't use intrinsic, have to throw ClassCastException.
4495         // If the reference is null, the non-intrinsic bytecode will
4496         // be optimized appropriately.
4497         return false;
4498       }
4499     }
4500   }
4501 
4502   // Bailout intrinsic and do normal inlining if exception path is frequent.
4503   if (too_many_traps(Deoptimization::Reason_intrinsic)) {
4504     return false;
4505   }
4506 
4507   // Generate dynamic checks.
4508   // Class.cast() is java implementation of _checkcast bytecode.
4509   // Do checkcast (Parse::do_checkcast()) optimizations here.
4510 
4511   mirror = null_check(mirror);
4512   // If mirror is dead, only null-path is taken.
4513   if (stopped()) {
4514     return true;
4515   }
4516 
4517   // Not-subtype or the mirror's klass ptr is nullptr (in case it is a primitive).
4518   enum { _bad_type_path = 1, _prim_path = 2, _npe_path = 3, PATH_LIMIT };
4519   RegionNode* region = new RegionNode(PATH_LIMIT);
4520   record_for_igvn(region);
4521 
4522   // Now load the mirror's klass metaobject, and null-check it.
4523   // If kls is null, we have a primitive mirror and
4524   // nothing is an instance of a primitive type.
4525   Node* kls = load_klass_from_mirror(mirror, false, region, _prim_path);
4526 
4527   Node* res = top();
4528   Node* io = i_o();
4529   Node* mem = merged_memory();
4530   SafePointNode* new_cast_failure_map = nullptr;
4531 
4532   if (!stopped()) {
4533 
4534     Node* bad_type_ctrl = top();
4535     // Do checkcast optimizations.
4536     res = gen_checkcast(obj, kls, &bad_type_ctrl, &new_cast_failure_map);
4537     region->init_req(_bad_type_path, bad_type_ctrl);
4538   }
4539   if (region->in(_prim_path) != top() ||
4540       region->in(_bad_type_path) != top() ||
4541       region->in(_npe_path) != top()) {
4542     // Let Interpreter throw ClassCastException.
4543     PreserveJVMState pjvms(this);
4544     if (new_cast_failure_map != nullptr) {
4545       // The current map on the success path could have been modified. Use the dedicated failure path map.
4546       set_map(new_cast_failure_map);
4547     }
4548     set_control(_gvn.transform(region));
4549     // Set IO and memory because gen_checkcast may override them when buffering inline types
4550     set_i_o(io);
4551     set_all_memory(mem);
4552     uncommon_trap(Deoptimization::Reason_intrinsic,
4553                   Deoptimization::Action_maybe_recompile);
4554   }
4555   if (!stopped()) {
4556     set_result(res);
4557   }
4558   return true;
4559 }
4560 
4561 
4562 //--------------------------inline_native_subtype_check------------------------
4563 // This intrinsic takes the JNI calls out of the heart of
4564 // UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
4565 bool LibraryCallKit::inline_native_subtype_check() {
4566   // Pull both arguments off the stack.
4567   Node* args[2];                // two java.lang.Class mirrors: superc, subc
4568   args[0] = argument(0);
4569   args[1] = argument(1);
4570   Node* klasses[2];             // corresponding Klasses: superk, subk
4571   klasses[0] = klasses[1] = top();
4572 
4573   enum {
4574     // A full decision tree on {superc is prim, subc is prim}:
4575     _prim_0_path = 1,           // {P,N} => false
4576                                 // {P,P} & superc!=subc => false
4577     _prim_same_path,            // {P,P} & superc==subc => true
4578     _prim_1_path,               // {N,P} => false
4579     _ref_subtype_path,          // {N,N} & subtype check wins => true
4580     _both_ref_path,             // {N,N} & subtype check loses => false
4581     PATH_LIMIT
4582   };
4583 
4584   RegionNode* region = new RegionNode(PATH_LIMIT);
4585   RegionNode* prim_region = new RegionNode(2);
4586   Node*       phi    = new PhiNode(region, TypeInt::BOOL);
4587   record_for_igvn(region);
4588   record_for_igvn(prim_region);
4589 
4590   const TypePtr* adr_type = TypeRawPtr::BOTTOM;   // memory type of loads
4591   const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
4592   int class_klass_offset = java_lang_Class::klass_offset();
4593 
4594   // First null-check both mirrors and load each mirror's klass metaobject.
4595   int which_arg;
4596   for (which_arg = 0; which_arg <= 1; which_arg++) {
4597     Node* arg = args[which_arg];
4598     arg = null_check(arg);
4599     if (stopped())  break;
4600     args[which_arg] = arg;
4601 
4602     Node* p = basic_plus_adr(arg, class_klass_offset);
4603     Node* kls = LoadKlassNode::make(_gvn, immutable_memory(), p, adr_type, kls_type);
4604     klasses[which_arg] = _gvn.transform(kls);
4605   }
4606 
4607   // Having loaded both klasses, test each for null.
4608   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
4609   for (which_arg = 0; which_arg <= 1; which_arg++) {
4610     Node* kls = klasses[which_arg];
4611     Node* null_ctl = top();
4612     kls = null_check_oop(kls, &null_ctl, never_see_null);
4613     if (which_arg == 0) {
4614       prim_region->init_req(1, null_ctl);
4615     } else {
4616       region->init_req(_prim_1_path, null_ctl);
4617     }
4618     if (stopped())  break;
4619     klasses[which_arg] = kls;
4620   }
4621 
4622   if (!stopped()) {
4623     // now we have two reference types, in klasses[0..1]
4624     Node* subk   = klasses[1];  // the argument to isAssignableFrom
4625     Node* superk = klasses[0];  // the receiver
4626     region->set_req(_both_ref_path, gen_subtype_check(subk, superk));

4627     region->set_req(_ref_subtype_path, control());
4628   }
4629 
4630   // If both operands are primitive (both klasses null), then
4631   // we must return true when they are identical primitives.
4632   // It is convenient to test this after the first null klass check.
4633   // This path is also used if superc is a value mirror.
4634   set_control(_gvn.transform(prim_region));
4635   if (!stopped()) {
4636     // Since superc is primitive, make a guard for the superc==subc case.
4637     Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1]));
4638     Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq));
4639     generate_fair_guard(bol_eq, region);
4640     if (region->req() == PATH_LIMIT+1) {
4641       // A guard was added.  If the added guard is taken, superc==subc.
4642       region->swap_edges(PATH_LIMIT, _prim_same_path);
4643       region->del_req(PATH_LIMIT);
4644     }
4645     region->set_req(_prim_0_path, control()); // Not equal after all.
4646   }
4647 
4648   // these are the only paths that produce 'true':
4649   phi->set_req(_prim_same_path,   intcon(1));
4650   phi->set_req(_ref_subtype_path, intcon(1));
4651 
4652   // pull together the cases:
4653   assert(region->req() == PATH_LIMIT, "sane region");
4654   for (uint i = 1; i < region->req(); i++) {
4655     Node* ctl = region->in(i);
4656     if (ctl == nullptr || ctl == top()) {
4657       region->set_req(i, top());
4658       phi   ->set_req(i, top());
4659     } else if (phi->in(i) == nullptr) {
4660       phi->set_req(i, intcon(0)); // all other paths produce 'false'
4661     }
4662   }
4663 
4664   set_control(_gvn.transform(region));
4665   set_result(_gvn.transform(phi));
4666   return true;
4667 }
4668 
4669 //---------------------generate_array_guard_common------------------------
4670 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region, ArrayKind kind, Node** obj) {

4671 
4672   if (stopped()) {
4673     return nullptr;
4674   }
4675 









4676   // Like generate_guard, adds a new path onto the region.
4677   jint  layout_con = 0;
4678   Node* layout_val = get_layout_helper(kls, layout_con);
4679   if (layout_val == nullptr) {
4680     bool query = 0;
4681     switch(kind) {
4682       case RefArray:       query = Klass::layout_helper_is_refArray(layout_con); break;
4683       case NonRefArray:    query = !Klass::layout_helper_is_refArray(layout_con); break;
4684       case TypeArray:      query = Klass::layout_helper_is_typeArray(layout_con); break;
4685       case AnyArray:       query = Klass::layout_helper_is_array(layout_con); break;
4686       case NonArray:       query = !Klass::layout_helper_is_array(layout_con); break;
4687       default:
4688         ShouldNotReachHere();
4689     }
4690     if (!query) {
4691       return nullptr;                       // never a branch
4692     } else {                             // always a branch
4693       Node* always_branch = control();
4694       if (region != nullptr)
4695         region->add_req(always_branch);
4696       set_control(top());
4697       return always_branch;
4698     }
4699   }
4700   unsigned int value = 0;
4701   BoolTest::mask btest = BoolTest::illegal;
4702   switch(kind) {
4703     case RefArray:
4704     case NonRefArray: {
4705       value = Klass::_lh_array_tag_ref_value;
4706       layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
4707       btest = (kind == RefArray) ? BoolTest::eq : BoolTest::ne;
4708       break;
4709     }
4710     case TypeArray: {
4711       value = Klass::_lh_array_tag_type_value;
4712       layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
4713       btest = BoolTest::eq;
4714       break;
4715     }
4716     case AnyArray:    value = Klass::_lh_neutral_value; btest = BoolTest::lt; break;
4717     case NonArray:    value = Klass::_lh_neutral_value; btest = BoolTest::gt; break;
4718     default:
4719       ShouldNotReachHere();
4720   }
4721   // Now test the correct condition.
4722   jint nval = (jint)value;



4723   Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval)));



4724   Node* bol = _gvn.transform(new BoolNode(cmp, btest));
4725   Node* ctrl = generate_fair_guard(bol, region);
4726   Node* is_array_ctrl = kind == NonArray ? control() : ctrl;
4727   if (obj != nullptr && is_array_ctrl != nullptr && is_array_ctrl != top()) {
4728     // Keep track of the fact that 'obj' is an array to prevent
4729     // array specific accesses from floating above the guard.
4730     *obj = _gvn.transform(new CheckCastPPNode(is_array_ctrl, *obj, TypeAryPtr::BOTTOM));
4731   }
4732   return ctrl;
4733 }
4734 
4735 // public static native Object[] ValueClass::newNullRestrictedAtomicArray(Class<?> componentType, int length, Object initVal);
4736 // public static native Object[] ValueClass::newNullRestrictedNonAtomicArray(Class<?> componentType, int length, Object initVal);
4737 // public static native Object[] ValueClass::newNullableAtomicArray(Class<?> componentType, int length);
4738 bool LibraryCallKit::inline_newArray(bool null_free, bool atomic) {
4739   assert(null_free || atomic, "nullable implies atomic");
4740   Node* componentType = argument(0);
4741   Node* length = argument(1);
4742   Node* init_val = null_free ? argument(2) : nullptr;
4743 
4744   const TypeInstPtr* tp = _gvn.type(componentType)->isa_instptr();
4745   if (tp != nullptr) {
4746     ciInstanceKlass* ik = tp->instance_klass();
4747     if (ik == C->env()->Class_klass()) {
4748       ciType* t = tp->java_mirror_type();
4749       if (t != nullptr && t->is_inlinetype()) {
4750 
4751         ciArrayKlass* array_klass = ciArrayKlass::make(t, null_free, atomic, true);
4752         assert(array_klass->is_elem_null_free() == null_free, "inconsistency");
4753 
4754         // TOOD 8350865 ZGC needs card marks on initializing oop stores
4755         if (UseZGC && null_free && !array_klass->is_flat_array_klass()) {
4756           return false;
4757         }
4758 
4759         if (array_klass->is_loaded() && array_klass->element_klass()->as_inline_klass()->is_initialized()) {
4760           const TypeAryKlassPtr* array_klass_type = TypeAryKlassPtr::make(array_klass, Type::trust_interfaces);
4761           if (null_free) {
4762             if (init_val->is_InlineType()) {
4763               if (array_klass_type->is_flat() && init_val->as_InlineType()->is_all_zero(&gvn(), /* flat */ true)) {
4764                 // Zeroing is enough because the init value is the all-zero value
4765                 init_val = nullptr;
4766               } else {
4767                 init_val = init_val->as_InlineType()->buffer(this);
4768               }
4769             }
4770             // TODO 8350865 Should we add a check of the init_val type (maybe in debug only + halt)?
4771             // If we insert a checkcast here, we can be sure that init_val is an InlineTypeNode, so
4772             // when we folded a field load from an allocation (e.g. during escape analysis), we can
4773             // remove the check init_val->is_InlineType().
4774           }
4775           Node* obj = new_array(makecon(array_klass_type), length, 0, nullptr, false, init_val);
4776           const TypeAryPtr* arytype = gvn().type(obj)->is_aryptr();
4777           assert(arytype->is_null_free() == null_free, "inconsistency");
4778           assert(arytype->is_not_null_free() == !null_free, "inconsistency");
4779           set_result(obj);
4780           return true;
4781         }
4782       }
4783     }
4784   }
4785   return false;
4786 }
4787 
4788 // public static native boolean ValueClass::isFlatArray(Object array);
4789 // public static native boolean ValueClass::isNullRestrictedArray(Object array);
4790 // public static native boolean ValueClass::isAtomicArray(Object array);
4791 bool LibraryCallKit::inline_getArrayProperties(ArrayPropertiesCheck check) {
4792   Node* array = argument(0);
4793 
4794   Node* bol;
4795   switch(check) {
4796     case IsFlat:
4797       // TODO 8350865 Use the object version here instead of loading the klass
4798       // The problem is that PhaseMacroExpand::expand_flatarraycheck_node can only handle some IR shapes and will fail, for example, if the bol is directly wired to a ReturnNode
4799       bol = flat_array_test(load_object_klass(array));
4800       break;
4801     case IsNullRestricted:
4802       bol = null_free_array_test(array);
4803       break;
4804     case IsAtomic:
4805       // TODO 8350865 Implement this. It's a bit more complicated, see conditions in JVM_IsAtomicArray
4806       // Enable TestIntrinsics::test87/88 once this is implemented
4807       // bol = null_free_atomic_array_test
4808       return false;
4809     default:
4810       ShouldNotReachHere();
4811   }
4812 
4813   Node* res = gvn().transform(new CMoveINode(bol, intcon(0), intcon(1), TypeInt::BOOL));
4814   set_result(res);
4815   return true;
4816 }
4817 
4818 // Load the default refined array klass from an ObjArrayKlass. This relies on the first entry in the
4819 // '_next_refined_array_klass' linked list being the default (see ObjArrayKlass::klass_with_properties).
4820 Node* LibraryCallKit::load_default_refined_array_klass(Node* klass_node, bool type_array_guard) {
4821   RegionNode* region = new RegionNode(2);
4822   Node* phi = new PhiNode(region, TypeInstKlassPtr::OBJECT_OR_NULL);
4823 
4824   if (type_array_guard) {
4825     generate_typeArray_guard(klass_node, region);
4826     if (region->req() == 3) {
4827       phi->add_req(klass_node);
4828     }
4829   }
4830   Node* adr_refined_klass = basic_plus_adr(top(), klass_node, in_bytes(ObjArrayKlass::next_refined_array_klass_offset()));
4831   Node* refined_klass = _gvn.transform(LoadKlassNode::make(_gvn, immutable_memory(), adr_refined_klass, TypeRawPtr::BOTTOM, TypeInstKlassPtr::OBJECT_OR_NULL));
4832 
4833   // Can be null if not initialized yet, just deopt
4834   Node* null_ctl = top();
4835   refined_klass = null_check_oop(refined_klass, &null_ctl, /* never_see_null= */ true);
4836 
4837   region->init_req(1, control());
4838   phi->init_req(1, refined_klass);
4839 
4840   set_control(_gvn.transform(region));
4841   return _gvn.transform(phi);
4842 }
4843 
4844 // Load the non-refined array klass from an ObjArrayKlass.
4845 Node* LibraryCallKit::load_non_refined_array_klass(Node* klass_node) {
4846   const TypeAryKlassPtr* ary_klass_ptr = _gvn.type(klass_node)->isa_aryklassptr();
4847   if (ary_klass_ptr != nullptr && ary_klass_ptr->klass_is_exact()) {
4848     return _gvn.makecon(ary_klass_ptr->cast_to_refined_array_klass_ptr(false));
4849   }
4850 
4851   RegionNode* region = new RegionNode(2);
4852   Node* phi = new PhiNode(region, TypeInstKlassPtr::OBJECT);
4853 
4854   generate_typeArray_guard(klass_node, region);
4855   if (region->req() == 3) {
4856     phi->add_req(klass_node);
4857   }
4858   Node* super_adr = basic_plus_adr(top(), klass_node, in_bytes(Klass::super_offset()));
4859   Node* super_klass = _gvn.transform(LoadKlassNode::make(_gvn, immutable_memory(), super_adr, TypeRawPtr::BOTTOM, TypeInstKlassPtr::OBJECT));
4860 
4861   region->init_req(1, control());
4862   phi->init_req(1, super_klass);
4863 
4864   set_control(_gvn.transform(region));
4865   return _gvn.transform(phi);
4866 }
4867 
4868 //-----------------------inline_native_newArray--------------------------
4869 // private static native Object java.lang.reflect.Array.newArray(Class<?> componentType, int length);
4870 // private        native Object Unsafe.allocateUninitializedArray0(Class<?> cls, int size);
4871 bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) {
4872   Node* mirror;
4873   Node* count_val;
4874   if (uninitialized) {
4875     null_check_receiver();
4876     mirror    = argument(1);
4877     count_val = argument(2);
4878   } else {
4879     mirror    = argument(0);
4880     count_val = argument(1);
4881   }
4882 
4883   mirror = null_check(mirror);
4884   // If mirror or obj is dead, only null-path is taken.
4885   if (stopped())  return true;
4886 
4887   enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
4888   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4889   PhiNode*    result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);

4907     CallJavaNode* slow_call = nullptr;
4908     if (uninitialized) {
4909       // Generate optimized virtual call (holder class 'Unsafe' is final)
4910       slow_call = generate_method_call(vmIntrinsics::_allocateUninitializedArray, false, false, true);
4911     } else {
4912       slow_call = generate_method_call_static(vmIntrinsics::_newArray, true);
4913     }
4914     Node* slow_result = set_results_for_java_call(slow_call);
4915     // this->control() comes from set_results_for_java_call
4916     result_reg->set_req(_slow_path, control());
4917     result_val->set_req(_slow_path, slow_result);
4918     result_io ->set_req(_slow_path, i_o());
4919     result_mem->set_req(_slow_path, reset_memory());
4920   }
4921 
4922   set_control(normal_ctl);
4923   if (!stopped()) {
4924     // Normal case:  The array type has been cached in the java.lang.Class.
4925     // The following call works fine even if the array type is polymorphic.
4926     // It could be a dynamic mix of int[], boolean[], Object[], etc.
4927 
4928     klass_node = load_default_refined_array_klass(klass_node);
4929 
4930     Node* obj = new_array(klass_node, count_val, 0);  // no arguments to push
4931     result_reg->init_req(_normal_path, control());
4932     result_val->init_req(_normal_path, obj);
4933     result_io ->init_req(_normal_path, i_o());
4934     result_mem->init_req(_normal_path, reset_memory());
4935 
4936     if (uninitialized) {
4937       // Mark the allocation so that zeroing is skipped
4938       AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(obj);
4939       alloc->maybe_set_complete(&_gvn);
4940     }
4941   }
4942 
4943   // Return the combined state.
4944   set_i_o(        _gvn.transform(result_io)  );
4945   set_all_memory( _gvn.transform(result_mem));
4946 
4947   C->set_has_split_ifs(true); // Has chance for split-if optimization
4948   set_result(result_reg, result_val);
4949   return true;

4998   // the bytecode that invokes Arrays.copyOf if deoptimization happens.
4999   { PreserveReexecuteState preexecs(this);
5000     jvms()->set_should_reexecute(true);
5001 
5002     array_type_mirror = null_check(array_type_mirror);
5003     original          = null_check(original);
5004 
5005     // Check if a null path was taken unconditionally.
5006     if (stopped())  return true;
5007 
5008     Node* orig_length = load_array_length(original);
5009 
5010     Node* klass_node = load_klass_from_mirror(array_type_mirror, false, nullptr, 0);
5011     klass_node = null_check(klass_node);
5012 
5013     RegionNode* bailout = new RegionNode(1);
5014     record_for_igvn(bailout);
5015 
5016     // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
5017     // Bail out if that is so.
5018     // Inline type array may have object field that would require a
5019     // write barrier. Conservatively, go to slow path.
5020     // TODO 8251971: Optimize for the case when flat src/dst are later found
5021     // to not contain oops (i.e., move this check to the macro expansion phase).
5022     // TODO 8382226: Revisit for flat abstract value class arrays
5023     BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
5024     const TypeAryPtr* orig_t = _gvn.type(original)->isa_aryptr();
5025     const TypeKlassPtr* tklass = _gvn.type(klass_node)->is_klassptr();
5026     bool exclude_flat = UseArrayFlattening && bs->array_copy_requires_gc_barriers(true, T_OBJECT, false, false, BarrierSetC2::Parsing) &&
5027                         // Can src array be flat and contain oops?
5028                         (orig_t == nullptr || (!orig_t->is_not_flat() && (!orig_t->is_flat() || orig_t->elem()->inline_klass()->contains_oops()))) &&
5029                         // Can dest array be flat and contain oops?
5030                         tklass->can_be_inline_array() && (!tklass->is_flat() || tklass->is_aryklassptr()->elem()->is_instklassptr()->instance_klass()->as_inline_klass()->contains_oops());
5031     Node* not_objArray = exclude_flat ? generate_non_refArray_guard(klass_node, bailout) : generate_typeArray_guard(klass_node, bailout);
5032 
5033     Node* refined_klass_node = load_default_refined_array_klass(klass_node, /* type_array_guard= */ false);
5034 
5035     if (not_objArray != nullptr) {
5036       // Improve the klass node's type from the new optimistic assumption:
5037       ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
5038       bool not_flat = !UseArrayFlattening;
5039       bool not_null_free = !Arguments::is_valhalla_enabled();
5040       const Type* akls = TypeAryKlassPtr::make(TypePtr::NotNull, ak, Type::Offset(0), Type::trust_interfaces, not_flat, not_null_free, false, false, not_flat, true);
5041       Node* cast = new CastPPNode(control(), refined_klass_node, akls);
5042       refined_klass_node = _gvn.transform(cast);
5043     }
5044 
5045     // Bail out if either start or end is negative.
5046     generate_negative_guard(start, bailout, &start);
5047     generate_negative_guard(end,   bailout, &end);
5048 
5049     Node* length = end;
5050     if (_gvn.type(start) != TypeInt::ZERO) {
5051       length = _gvn.transform(new SubINode(end, start));
5052     }
5053 
5054     // Bail out if length is negative (i.e., if start > end).
5055     // Without this the new_array would throw
5056     // NegativeArraySizeException but IllegalArgumentException is what
5057     // should be thrown
5058     generate_negative_guard(length, bailout, &length);
5059 
5060     // Handle inline type arrays
5061     bool can_validate = !too_many_traps(Deoptimization::Reason_class_check);
5062     if (!stopped()) {
5063       // TODO 8251971
5064       if (!orig_t->is_null_free()) {
5065         // Not statically known to be null free, add a check
5066         generate_fair_guard(null_free_array_test(original), bailout);
5067       }
5068       orig_t = _gvn.type(original)->isa_aryptr();
5069       if (orig_t != nullptr && orig_t->is_flat()) {
5070         // Src is flat, check that dest is flat as well
5071         if (exclude_flat) {
5072           // Dest can't be flat, bail out
5073           bailout->add_req(control());
5074           set_control(top());
5075         } else {
5076           generate_fair_guard(flat_array_test(refined_klass_node, /* flat = */ false), bailout);
5077         }
5078         // TODO 8251971 This is not correct anymore. Write tests and fix logic similar to arraycopy.
5079       } else if (UseArrayFlattening && (orig_t == nullptr || !orig_t->is_not_flat()) &&
5080                  // If dest is flat, src must be flat as well (guaranteed by src <: dest check if validated).
5081                  ((!tklass->is_flat() && tklass->can_be_inline_array()) || !can_validate)) {
5082         // Src might be flat and dest might not be flat. Go to the slow path if src is flat.
5083         // TODO 8251971: Optimize for the case when src/dest are later found to be both flat.
5084         generate_fair_guard(flat_array_test(load_object_klass(original)), bailout);
5085         if (orig_t != nullptr) {
5086           orig_t = orig_t->cast_to_not_flat();
5087           original = _gvn.transform(new CheckCastPPNode(control(), original, orig_t));
5088         }
5089       }
5090       if (!can_validate) {
5091         // No validation. The subtype check emitted at macro expansion time will not go to the slow
5092         // path but call checkcast_arraycopy which can not handle flat/null-free inline type arrays.
5093         // TODO 8251971: Optimize for the case when src/dest are later found to be both flat/null-free.
5094         generate_fair_guard(flat_array_test(refined_klass_node), bailout);
5095         generate_fair_guard(null_free_array_test(original), bailout);
5096       }
5097     }
5098 
5099     // Bail out if start is larger than the original length
5100     Node* orig_tail = _gvn.transform(new SubINode(orig_length, start));
5101     generate_negative_guard(orig_tail, bailout, &orig_tail);
5102 
5103     if (bailout->req() > 1) {
5104       PreserveJVMState pjvms(this);
5105       set_control(_gvn.transform(bailout));
5106       uncommon_trap(Deoptimization::Reason_intrinsic,
5107                     Deoptimization::Action_maybe_recompile);
5108     }
5109 
5110     if (!stopped()) {
5111       // How many elements will we copy from the original?
5112       // The answer is MinI(orig_tail, length).
5113       Node* moved = _gvn.transform(new MinINode(orig_tail, length));
5114 
5115       // Generate a direct call to the right arraycopy function(s).
5116       // We know the copy is disjoint but we might not know if the
5117       // oop stores need checking.
5118       // Extreme case:  Arrays.copyOf((Integer[])x, 10, String[].class).

5124       // to the copyOf to be validated, including that the copy to the
5125       // new array won't trigger an ArrayStoreException. That subtype
5126       // check can be optimized if we know something on the type of
5127       // the input array from type speculation.
5128       if (_gvn.type(klass_node)->singleton()) {
5129         const TypeKlassPtr* subk = _gvn.type(load_object_klass(original))->is_klassptr();
5130         const TypeKlassPtr* superk = _gvn.type(klass_node)->is_klassptr();
5131 
5132         int test = C->static_subtype_check(superk, subk);
5133         if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
5134           const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();
5135           if (t_original->speculative_type() != nullptr) {
5136             original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true);
5137           }
5138         }
5139       }
5140 
5141       bool validated = false;
5142       // Reason_class_check rather than Reason_intrinsic because we
5143       // want to intrinsify even if this traps.
5144       if (can_validate) {
5145         Node* not_subtype_ctrl = gen_subtype_check(original, klass_node);
5146 
5147         if (not_subtype_ctrl != top()) {
5148           PreserveJVMState pjvms(this);
5149           set_control(not_subtype_ctrl);
5150           uncommon_trap(Deoptimization::Reason_class_check,
5151                         Deoptimization::Action_make_not_entrant);
5152           assert(stopped(), "Should be stopped");
5153         }
5154         validated = true;
5155       }
5156 
5157       if (!stopped()) {
5158         newcopy = new_array(refined_klass_node, length, 0);  // no arguments to push
5159 
5160         ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true, true,
5161                                                 load_object_klass(original), klass_node);
5162         if (!is_copyOfRange) {
5163           ac->set_copyof(validated);
5164         } else {
5165           ac->set_copyofrange(validated);
5166         }
5167         Node* n = _gvn.transform(ac);
5168         if (n == ac) {
5169           ac->connect_outputs(this);
5170         } else {
5171           assert(validated, "shouldn't transform if all arguments not validated");
5172           set_all_memory(n);
5173         }
5174       }
5175     }
5176   } // original reexecute is set back here
5177 
5178   C->set_has_split_ifs(true); // Has chance for split-if optimization

5210 
5211 //-----------------------generate_method_call----------------------------
5212 // Use generate_method_call to make a slow-call to the real
5213 // method if the fast path fails.  An alternative would be to
5214 // use a stub like OptoRuntime::slow_arraycopy_Java.
5215 // This only works for expanding the current library call,
5216 // not another intrinsic.  (E.g., don't use this for making an
5217 // arraycopy call inside of the copyOf intrinsic.)
5218 CallJavaNode*
5219 LibraryCallKit::generate_method_call(vmIntrinsicID method_id, bool is_virtual, bool is_static, bool res_not_null) {
5220   // When compiling the intrinsic method itself, do not use this technique.
5221   guarantee(callee() != C->method(), "cannot make slow-call to self");
5222 
5223   ciMethod* method = callee();
5224   // ensure the JVMS we have will be correct for this call
5225   guarantee(method_id == method->intrinsic_id(), "must match");
5226 
5227   const TypeFunc* tf = TypeFunc::make(method);
5228   if (res_not_null) {
5229     assert(tf->return_type() == T_OBJECT, "");
5230     const TypeTuple* range = tf->range_cc();
5231     const Type** fields = TypeTuple::fields(range->cnt());
5232     fields[TypeFunc::Parms] = range->field_at(TypeFunc::Parms)->filter_speculative(TypePtr::NOTNULL);
5233     const TypeTuple* new_range = TypeTuple::make(range->cnt(), fields);
5234     tf = TypeFunc::make(tf->domain_cc(), new_range);
5235   }
5236   CallJavaNode* slow_call;
5237   if (is_static) {
5238     assert(!is_virtual, "");
5239     slow_call = new CallStaticJavaNode(C, tf,
5240                            SharedRuntime::get_resolve_static_call_stub(), method);
5241   } else if (is_virtual) {
5242     assert(!gvn().type(argument(0))->maybe_null(), "should not be null");
5243     int vtable_index = Method::invalid_vtable_index;
5244     if (UseInlineCaches) {
5245       // Suppress the vtable call
5246     } else {
5247       // hashCode and clone are not a miranda methods,
5248       // so the vtable index is fixed.
5249       // No need to use the linkResolver to get it.
5250        vtable_index = method->vtable_index();
5251        assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
5252               "bad index %d", vtable_index);
5253     }
5254     slow_call = new CallDynamicJavaNode(tf,

5271   set_edges_for_java_call(slow_call);
5272   return slow_call;
5273 }
5274 
5275 
5276 /**
5277  * Build special case code for calls to hashCode on an object. This call may
5278  * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
5279  * slightly different code.
5280  */
5281 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
5282   assert(is_static == callee()->is_static(), "correct intrinsic selection");
5283   assert(!(is_virtual && is_static), "either virtual, special, or static");
5284 
5285   enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
5286 
5287   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
5288   PhiNode*    result_val = new PhiNode(result_reg, TypeInt::INT);
5289   PhiNode*    result_io  = new PhiNode(result_reg, Type::ABIO);
5290   PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
5291   Node* obj = argument(0);
5292 
5293   // Don't intrinsify hashcode on inline types for now.
5294   // The "is locked" runtime check also subsumes the inline type check (as inline types cannot be locked) and goes to the slow path.
5295   if (gvn().type(obj)->is_inlinetypeptr()) {
5296     return false;
5297   }
5298 
5299   if (!is_static) {
5300     // Check for hashing null object
5301     obj = null_check_receiver();
5302     if (stopped())  return true;        // unconditionally null
5303     result_reg->init_req(_null_path, top());
5304     result_val->init_req(_null_path, top());
5305   } else {
5306     // Do a null check, and return zero if null.
5307     // System.identityHashCode(null) == 0

5308     Node* null_ctl = top();
5309     obj = null_check_oop(obj, &null_ctl);
5310     result_reg->init_req(_null_path, null_ctl);
5311     result_val->init_req(_null_path, _gvn.intcon(0));
5312   }
5313 
5314   // Unconditionally null?  Then return right away.
5315   if (stopped()) {
5316     set_control( result_reg->in(_null_path));
5317     if (!stopped())
5318       set_result(result_val->in(_null_path));
5319     return true;
5320   }
5321 
5322   // We only go to the fast case code if we pass a number of guards.  The
5323   // paths which do not pass are accumulated in the slow_region.
5324   RegionNode* slow_region = new RegionNode(1);
5325   record_for_igvn(slow_region);
5326 
5327   // If this is a virtual call, we generate a funny guard.  We pull out
5328   // the vtable entry corresponding to hashCode() from the target object.
5329   // If the target method which we are calling happens to be the native
5330   // Object hashCode() method, we pass the guard.  We do not need this
5331   // guard for non-virtual calls -- the caller is known to be the native
5332   // Object hashCode().
5333   if (is_virtual) {
5334     // After null check, get the object's klass.
5335     Node* obj_klass = load_object_klass(obj);
5336     generate_virtual_guard(obj_klass, slow_region);
5337   }
5338 
5339   // Get the header out of the object, use LoadMarkNode when available
5340   Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
5341   // The control of the load must be null. Otherwise, the load can move before
5342   // the null check after castPP removal.
5343   Node* no_ctrl = nullptr;
5344   Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
5345 
5346   if (!UseObjectMonitorTable) {
5347     // Test the header to see if it is safe to read w.r.t. locking.
5348     // We cannot use the inline type mask as this may check bits that are overriden
5349     // by an object monitor's pointer when inflating locking.
5350     Node *lock_mask      = _gvn.MakeConX(markWord::lock_mask_in_place);
5351     Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
5352     Node *monitor_val   = _gvn.MakeConX(markWord::monitor_value);
5353     Node *chk_monitor   = _gvn.transform(new CmpXNode(lmasked_header, monitor_val));
5354     Node *test_monitor  = _gvn.transform(new BoolNode(chk_monitor, BoolTest::eq));
5355 
5356     generate_slow_guard(test_monitor, slow_region);
5357   }
5358 
5359   // Get the hash value and check to see that it has been properly assigned.
5360   // We depend on hash_mask being at most 32 bits and avoid the use of
5361   // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
5362   // vm: see markWord.hpp.
5363   Node *hash_mask      = _gvn.intcon(markWord::hash_mask);
5364   Node *hash_shift     = _gvn.intcon(markWord::hash_shift);
5365   Node *hshifted_header= _gvn.transform(new URShiftXNode(header, hash_shift));
5366   // This hack lets the hash bits live anywhere in the mark object now, as long
5367   // as the shift drops the relevant bits into the low 32 bits.  Note that
5368   // Java spec says that HashCode is an int so there's no point in capturing
5369   // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build).

5397     // this->control() comes from set_results_for_java_call
5398     result_reg->init_req(_slow_path, control());
5399     result_val->init_req(_slow_path, slow_result);
5400     result_io  ->set_req(_slow_path, i_o());
5401     result_mem ->set_req(_slow_path, reset_memory());
5402   }
5403 
5404   // Return the combined state.
5405   set_i_o(        _gvn.transform(result_io)  );
5406   set_all_memory( _gvn.transform(result_mem));
5407 
5408   set_result(result_reg, result_val);
5409   return true;
5410 }
5411 
5412 //---------------------------inline_native_getClass----------------------------
5413 // public final native Class<?> java.lang.Object.getClass();
5414 //
5415 // Build special case code for calls to getClass on an object.
5416 bool LibraryCallKit::inline_native_getClass() {
5417   Node* obj = argument(0);
5418   if (obj->is_InlineType()) {
5419     const Type* t = _gvn.type(obj);
5420     if (t->maybe_null()) {
5421       null_check(obj);
5422     }
5423     set_result(makecon(TypeInstPtr::make(t->inline_klass()->java_mirror())));
5424     return true;
5425   }
5426   obj = null_check_receiver();
5427   if (stopped())  return true;
5428   set_result(load_mirror_from_klass(load_object_klass(obj)));
5429   return true;
5430 }
5431 
5432 //-----------------inline_native_Reflection_getCallerClass---------------------
5433 // public static native Class<?> sun.reflect.Reflection.getCallerClass();
5434 //
5435 // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
5436 //
5437 // NOTE: This code must perform the same logic as JVM_GetCallerClass
5438 // in that it must skip particular security frames and checks for
5439 // caller sensitive methods.
5440 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
5441 #ifndef PRODUCT
5442   if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
5443     tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
5444   }
5445 #endif
5446 

5828 //  not cloneable or finalizer => slow path to out-of-line Object.clone
5829 //
5830 // The general case has two steps, allocation and copying.
5831 // Allocation has two cases, and uses GraphKit::new_instance or new_array.
5832 //
5833 // Copying also has two cases, oop arrays and everything else.
5834 // Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
5835 // Everything else uses the tight inline loop supplied by CopyArrayNode.
5836 //
5837 // These steps fold up nicely if and when the cloned object's klass
5838 // can be sharply typed as an object array, a type array, or an instance.
5839 //
5840 bool LibraryCallKit::inline_native_clone(bool is_virtual) {
5841   PhiNode* result_val;
5842 
5843   // Set the reexecute bit for the interpreter to reexecute
5844   // the bytecode that invokes Object.clone if deoptimization happens.
5845   { PreserveReexecuteState preexecs(this);
5846     jvms()->set_should_reexecute(true);
5847 
5848     Node* obj = argument(0);
5849     obj = null_check_receiver();
5850     if (stopped())  return true;
5851 
5852     const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
5853     if (obj_type->is_inlinetypeptr()) {
5854       // If the object to clone is an inline type, we can simply return it (i.e. a nop) since inline types have
5855       // no identity.
5856       set_result(obj);
5857       return true;
5858     }
5859 
5860     // If we are going to clone an instance, we need its exact type to
5861     // know the number and types of fields to convert the clone to
5862     // loads/stores. Maybe a speculative type can help us.
5863     if (!obj_type->klass_is_exact() &&
5864         obj_type->speculative_type() != nullptr &&
5865         obj_type->speculative_type()->is_instance_klass() &&
5866         !obj_type->speculative_type()->is_inlinetype()) {
5867       ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass();
5868       if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem &&
5869           !spec_ik->has_injected_fields()) {
5870         if (!obj_type->isa_instptr() ||
5871             obj_type->is_instptr()->instance_klass()->has_subklass()) {
5872           obj = maybe_cast_profiled_obj(obj, obj_type->speculative_type(), false);
5873         }
5874       }
5875     }
5876 
5877     // Conservatively insert a memory barrier on all memory slices.
5878     // Do not let writes into the original float below the clone.
5879     insert_mem_bar(Op_MemBarCPUOrder);
5880 
5881     // paths into result_reg:
5882     enum {
5883       _slow_path = 1,     // out-of-line call to clone method (virtual or not)
5884       _objArray_path,     // plain array allocation, plus arrayof_oop_arraycopy
5885       _array_path,        // plain array allocation, plus arrayof_long_arraycopy
5886       _instance_path,     // plain instance allocation, plus arrayof_long_arraycopy
5887       PATH_LIMIT
5888     };
5889     RegionNode* result_reg = new RegionNode(PATH_LIMIT);
5890     result_val             = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
5891     PhiNode*    result_i_o = new PhiNode(result_reg, Type::ABIO);
5892     PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
5893     record_for_igvn(result_reg);
5894 
5895     Node* obj_klass = load_object_klass(obj);
5896     // We only go to the fast case code if we pass a number of guards.
5897     // The paths which do not pass are accumulated in the slow_region.
5898     RegionNode* slow_region = new RegionNode(1);
5899     record_for_igvn(slow_region);
5900 
5901     Node* array_obj = obj;
5902     Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)nullptr, &array_obj);
5903     if (array_ctl != nullptr) {
5904       // It's an array.
5905       PreserveJVMState pjvms(this);
5906       set_control(array_ctl);



5907 
5908       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
5909       const TypeAryPtr* ary_ptr = obj_type->isa_aryptr();
5910       if (UseArrayFlattening && bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Expansion) &&
5911           obj_type->can_be_inline_array() &&
5912           (ary_ptr == nullptr || (!ary_ptr->is_not_flat() && (!ary_ptr->is_flat() || ary_ptr->elem()->inline_klass()->contains_oops())))) {
5913         // Flat inline type array may have object field that would require a
5914         // write barrier. Conservatively, go to slow path.
5915         generate_fair_guard(flat_array_test(obj_klass), slow_region);













5916       }







5917 
5918       if (!stopped()) {
5919         Node* obj_length = load_array_length(array_obj);
5920         Node* array_size = nullptr; // Size of the array without object alignment padding.
5921         Node* alloc_obj = new_array(obj_klass, obj_length, 0, &array_size, /*deoptimize_on_exception=*/true);
5922 
5923         BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
5924         if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Parsing)) {
5925           // If it is an oop array, it requires very special treatment,
5926           // because gc barriers are required when accessing the array.
5927           Node* is_obja = generate_refArray_guard(obj_klass, (RegionNode*)nullptr);
5928           if (is_obja != nullptr) {
5929             PreserveJVMState pjvms2(this);
5930             set_control(is_obja);
5931             // Generate a direct call to the right arraycopy function(s).
5932             // Clones are always tightly coupled.
5933             ArrayCopyNode* ac = ArrayCopyNode::make(this, true, array_obj, intcon(0), alloc_obj, intcon(0), obj_length, true, false);
5934             ac->set_clone_oop_array();
5935             Node* n = _gvn.transform(ac);
5936             assert(n == ac, "cannot disappear");
5937             ac->connect_outputs(this, /*deoptimize_on_exception=*/true);
5938 
5939             result_reg->init_req(_objArray_path, control());
5940             result_val->init_req(_objArray_path, alloc_obj);
5941             result_i_o ->set_req(_objArray_path, i_o());
5942             result_mem ->set_req(_objArray_path, reset_memory());
5943           }
5944         }
5945         // Otherwise, there are no barriers to worry about.
5946         // (We can dispense with card marks if we know the allocation
5947         //  comes out of eden (TLAB)...  In fact, ReduceInitialCardMarks
5948         //  causes the non-eden paths to take compensating steps to
5949         //  simulate a fresh allocation, so that no further
5950         //  card marks are required in compiled code to initialize
5951         //  the object.)
5952 
5953         if (!stopped()) {
5954           copy_to_clone(obj, alloc_obj, array_size, true);
5955 
5956           // Present the results of the copy.
5957           result_reg->init_req(_array_path, control());
5958           result_val->init_req(_array_path, alloc_obj);
5959           result_i_o ->set_req(_array_path, i_o());
5960           result_mem ->set_req(_array_path, reset_memory());
5961         }
5962       }
5963     }
5964 




5965     if (!stopped()) {
5966       // It's an instance (we did array above).  Make the slow-path tests.
5967       // If this is a virtual call, we generate a funny guard.  We grab
5968       // the vtable entry corresponding to clone() from the target object.
5969       // If the target method which we are calling happens to be the
5970       // Object clone() method, we pass the guard.  We do not need this
5971       // guard for non-virtual calls; the caller is known to be the native
5972       // Object clone().
5973       if (is_virtual) {
5974         generate_virtual_guard(obj_klass, slow_region);
5975       }
5976 
5977       // The object must be easily cloneable and must not have a finalizer.
5978       // Both of these conditions may be checked in a single test.
5979       // We could optimize the test further, but we don't care.
5980       generate_misc_flags_guard(obj_klass,
5981                                 // Test both conditions:
5982                                 KlassFlags::_misc_is_cloneable_fast | KlassFlags::_misc_has_finalizer,
5983                                 // Must be cloneable but not finalizer:
5984                                 KlassFlags::_misc_is_cloneable_fast,

6076         set_jvms(sfpt->jvms());
6077         _reexecute_sp = jvms()->sp();
6078 
6079         return saved_jvms;
6080       }
6081     }
6082   }
6083   return nullptr;
6084 }
6085 
6086 // Clone the JVMState of the array allocation and create a new safepoint with it. Re-push the array length to the stack
6087 // such that uncommon traps can be emitted to re-execute the array allocation in the interpreter.
6088 SafePointNode* LibraryCallKit::create_safepoint_with_state_before_array_allocation(const AllocateArrayNode* alloc) const {
6089   JVMState* old_jvms = alloc->jvms()->clone_shallow(C);
6090   uint size = alloc->req();
6091   SafePointNode* sfpt = new SafePointNode(size, old_jvms);
6092   old_jvms->set_map(sfpt);
6093   for (uint i = 0; i < size; i++) {
6094     sfpt->init_req(i, alloc->in(i));
6095   }
6096   int adjustment = 1;
6097   const TypeAryKlassPtr* ary_klass_ptr = alloc->in(AllocateNode::KlassNode)->bottom_type()->is_aryklassptr();
6098   if (ary_klass_ptr->is_null_free()) {
6099     // A null-free, tightly coupled array allocation can only come from LibraryCallKit::inline_newArray which
6100     // also requires the componentType and initVal on stack for re-execution.
6101     // Re-create and push the componentType.
6102     ciArrayKlass* klass = ary_klass_ptr->exact_klass()->as_array_klass();
6103     ciInstance* instance = klass->component_mirror_instance();
6104     const TypeInstPtr* t_instance = TypeInstPtr::make(instance);
6105     sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp(), makecon(t_instance));
6106     adjustment++;
6107   }
6108   // re-push array length for deoptimization
6109   sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp() + adjustment - 1, alloc->in(AllocateNode::ALength));
6110   if (ary_klass_ptr->is_null_free()) {
6111     // Re-create and push the initVal.
6112     Node* init_val = alloc->in(AllocateNode::InitValue);
6113     if (init_val == nullptr) {
6114       init_val = InlineTypeNode::make_all_zero(_gvn, ary_klass_ptr->elem()->is_instklassptr()->instance_klass()->as_inline_klass());
6115     } else if (UseCompressedOops) {
6116       init_val = _gvn.transform(new DecodeNNode(init_val, init_val->bottom_type()->make_ptr()));
6117     }
6118     sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp() + adjustment, init_val);
6119     adjustment++;
6120   }
6121   old_jvms->set_sp(old_jvms->sp() + adjustment);
6122   old_jvms->set_monoff(old_jvms->monoff() + adjustment);
6123   old_jvms->set_scloff(old_jvms->scloff() + adjustment);
6124   old_jvms->set_endoff(old_jvms->endoff() + adjustment);
6125   old_jvms->set_should_reexecute(true);
6126 
6127   sfpt->set_i_o(map()->i_o());
6128   sfpt->set_memory(map()->memory());
6129   sfpt->set_control(map()->control());
6130   return sfpt;
6131 }
6132 
6133 // In case of a deoptimization, we restart execution at the
6134 // allocation, allocating a new array. We would leave an uninitialized
6135 // array in the heap that GCs wouldn't expect. Move the allocation
6136 // after the traps so we don't allocate the array if we
6137 // deoptimize. This is possible because tightly_coupled_allocation()
6138 // guarantees there's no observer of the allocated array at this point
6139 // and the control flow is simple enough.
6140 void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms_before_guards,
6141                                                     int saved_reexecute_sp, uint new_idx) {
6142   if (saved_jvms_before_guards != nullptr && !stopped()) {
6143     replace_unrelated_uncommon_traps_with_alloc_state(alloc, saved_jvms_before_guards);
6144 
6145     assert(alloc != nullptr, "only with a tightly coupled allocation");
6146     // restore JVM state to the state at the arraycopy
6147     saved_jvms_before_guards->map()->set_control(map()->control());
6148     assert(saved_jvms_before_guards->map()->memory() == map()->memory(), "memory state changed?");
6149     assert(saved_jvms_before_guards->map()->i_o() == map()->i_o(), "IO state changed?");
6150     // If we've improved the types of some nodes (null check) while
6151     // emitting the guards, propagate them to the current state
6152     map()->replaced_nodes().apply(saved_jvms_before_guards->map(), new_idx);
6153     set_jvms(saved_jvms_before_guards);
6154     _reexecute_sp = saved_reexecute_sp;
6155 
6156     // Remove the allocation from above the guards
6157     CallProjections* callprojs = alloc->extract_projections(true);

6158     InitializeNode* init = alloc->initialization();
6159     Node* alloc_mem = alloc->in(TypeFunc::Memory);
6160     C->gvn_replace_by(callprojs->fallthrough_ioproj, alloc->in(TypeFunc::I_O));
6161     init->replace_mem_projs_by(alloc_mem, C);
6162 
6163     // The CastIINode created in GraphKit::new_array (in AllocateArrayNode::make_ideal_length) must stay below
6164     // the allocation (i.e. is only valid if the allocation succeeds):
6165     // 1) replace CastIINode with AllocateArrayNode's length here
6166     // 2) Create CastIINode again once allocation has moved (see below) at the end of this method
6167     //
6168     // Multiple identical CastIINodes might exist here. Each GraphKit::load_array_length() call will generate
6169     // new separate CastIINode (arraycopy guard checks or any array length use between array allocation and ararycopy)
6170     Node* init_control = init->proj_out(TypeFunc::Control);
6171     Node* alloc_length = alloc->Ideal_length();
6172 #ifdef ASSERT
6173     Node* prev_cast = nullptr;
6174 #endif
6175     for (uint i = 0; i < init_control->outcnt(); i++) {
6176       Node* init_out = init_control->raw_out(i);
6177       if (init_out->is_CastII() && init_out->in(TypeFunc::Control) == init_control && init_out->in(1) == alloc_length) {
6178 #ifdef ASSERT
6179         if (prev_cast == nullptr) {
6180           prev_cast = init_out;

6182           if (prev_cast->cmp(*init_out) == false) {
6183             prev_cast->dump();
6184             init_out->dump();
6185             assert(false, "not equal CastIINode");
6186           }
6187         }
6188 #endif
6189         C->gvn_replace_by(init_out, alloc_length);
6190       }
6191     }
6192     C->gvn_replace_by(init->proj_out(TypeFunc::Control), alloc->in(0));
6193 
6194     // move the allocation here (after the guards)
6195     _gvn.hash_delete(alloc);
6196     alloc->set_req(TypeFunc::Control, control());
6197     alloc->set_req(TypeFunc::I_O, i_o());
6198     Node *mem = reset_memory();
6199     set_all_memory(mem);
6200     alloc->set_req(TypeFunc::Memory, mem);
6201     set_control(init->proj_out_or_null(TypeFunc::Control));
6202     set_i_o(callprojs->fallthrough_ioproj);
6203 
6204     // Update memory as done in GraphKit::set_output_for_allocation()
6205     const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
6206     const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
6207     if (ary_type->isa_aryptr() && length_type != nullptr) {
6208       ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
6209     }
6210     const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot);
6211     int            elemidx  = C->get_alias_index(telemref);
6212     // Need to properly move every memory projection for the Initialize
6213 #ifdef ASSERT
6214     int mark_idx = C->get_alias_index(ary_type->add_offset(oopDesc::mark_offset_in_bytes()));
6215     int klass_idx = C->get_alias_index(ary_type->add_offset(oopDesc::klass_offset_in_bytes()));
6216 #endif
6217     auto move_proj = [&](ProjNode* proj) {
6218       int alias_idx = C->get_alias_index(proj->adr_type());
6219       assert(alias_idx == Compile::AliasIdxRaw ||
6220              alias_idx == elemidx ||
6221              alias_idx == mark_idx ||
6222              alias_idx == klass_idx, "should be raw memory or array element type");

6532         top_src  = src_type->isa_aryptr();
6533         has_src = (top_src != nullptr && top_src->elem() != Type::BOTTOM);
6534         src_spec = true;
6535       }
6536       if (!has_dest) {
6537         dest = maybe_cast_profiled_obj(dest, dest_k, true);
6538         dest_type  = _gvn.type(dest);
6539         top_dest  = dest_type->isa_aryptr();
6540         has_dest = (top_dest != nullptr && top_dest->elem() != Type::BOTTOM);
6541         dest_spec = true;
6542       }
6543     }
6544   }
6545 
6546   if (has_src && has_dest && can_emit_guards) {
6547     BasicType src_elem = top_src->isa_aryptr()->elem()->array_element_basic_type();
6548     BasicType dest_elem = top_dest->isa_aryptr()->elem()->array_element_basic_type();
6549     if (is_reference_type(src_elem, true)) src_elem = T_OBJECT;
6550     if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT;
6551 
6552     if (src_elem == dest_elem && top_src->is_flat() == top_dest->is_flat() && src_elem == T_OBJECT) {
6553       // If both arrays are object arrays then having the exact types
6554       // for both will remove the need for a subtype check at runtime
6555       // before the call and may make it possible to pick a faster copy
6556       // routine (without a subtype check on every element)
6557       // Do we have the exact type of src?
6558       bool could_have_src = src_spec;
6559       // Do we have the exact type of dest?
6560       bool could_have_dest = dest_spec;
6561       ciKlass* src_k = nullptr;
6562       ciKlass* dest_k = nullptr;
6563       if (!src_spec) {
6564         src_k = src_type->speculative_type_not_null();
6565         if (src_k != nullptr && src_k->is_array_klass()) {
6566           could_have_src = true;
6567         }
6568       }
6569       if (!dest_spec) {
6570         dest_k = dest_type->speculative_type_not_null();
6571         if (dest_k != nullptr && dest_k->is_array_klass()) {
6572           could_have_dest = true;
6573         }
6574       }
6575       if (could_have_src && could_have_dest) {
6576         // If we can have both exact types, emit the missing guards
6577         if (could_have_src && !src_spec) {
6578           src = maybe_cast_profiled_obj(src, src_k, true);
6579           src_type = _gvn.type(src);
6580           top_src = src_type->isa_aryptr();
6581         }
6582         if (could_have_dest && !dest_spec) {
6583           dest = maybe_cast_profiled_obj(dest, dest_k, true);
6584           dest_type = _gvn.type(dest);
6585           top_dest = dest_type->isa_aryptr();
6586         }
6587       }
6588     }
6589   }
6590 
6591   ciMethod* trap_method = method();
6592   int trap_bci = bci();
6593   if (saved_jvms_before_guards != nullptr) {
6594     trap_method = alloc->jvms()->method();
6595     trap_bci = alloc->jvms()->bci();
6596   }
6597 
6598   bool negative_length_guard_generated = false;
6599 
6600   if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
6601       can_emit_guards && !src->is_top() && !dest->is_top()) {

6602     // validate arguments: enables transformation the ArrayCopyNode
6603     validated = true;
6604 
6605     RegionNode* slow_region = new RegionNode(1);
6606     record_for_igvn(slow_region);
6607 
6608     // (1) src and dest are arrays.
6609     generate_non_array_guard(load_object_klass(src), slow_region, &src);
6610     generate_non_array_guard(load_object_klass(dest), slow_region, &dest);
6611 
6612     // (2) src and dest arrays must have elements of the same BasicType
6613     // done at macro expansion or at Ideal transformation time
6614 
6615     // (4) src_offset must not be negative.
6616     generate_negative_guard(src_offset, slow_region);
6617 
6618     // (5) dest_offset must not be negative.
6619     generate_negative_guard(dest_offset, slow_region);
6620 
6621     // (7) src_offset + length must not exceed length of src.
6622     generate_limit_guard(src_offset, length,
6623                          load_array_length(src),
6624                          slow_region);
6625 
6626     // (8) dest_offset + length must not exceed length of dest.
6627     generate_limit_guard(dest_offset, length,
6628                          load_array_length(dest),
6629                          slow_region);
6630 
6631     // (6) length must not be negative.
6632     // This is also checked in generate_arraycopy() during macro expansion, but
6633     // we also have to check it here for the case where the ArrayCopyNode will
6634     // be eliminated by Escape Analysis.
6635     if (EliminateAllocations) {
6636       generate_negative_guard(length, slow_region);
6637       negative_length_guard_generated = true;
6638     }
6639 
6640     // (9) each element of an oop array must be assignable
6641     Node* dest_klass = load_object_klass(dest);
6642     Node* refined_dest_klass = dest_klass;
6643     if (src != dest) {
6644       dest_klass = load_non_refined_array_klass(refined_dest_klass);
6645       Node* not_subtype_ctrl = gen_subtype_check(src, dest_klass);
6646       slow_region->add_req(not_subtype_ctrl);







6647     }
6648 
6649     // TODO 8251971 Improve this. What about atomicity? Make sure this is always folded for type arrays.
6650     // If destination is null-restricted, source must be null-restricted as well: src_null_restricted || !dst_null_restricted
6651     Node* src_klass = load_object_klass(src);
6652     Node* adr_prop_src = basic_plus_adr(top(), src_klass, in_bytes(ArrayKlass::properties_offset()));
6653     Node* prop_src = _gvn.transform(LoadNode::make(_gvn, control(), immutable_memory(), adr_prop_src,
6654                                                    _gvn.type(adr_prop_src)->is_ptr(), TypeInt::INT, T_INT,
6655                                                    MemNode::unordered));
6656     Node* adr_prop_dest = basic_plus_adr(top(), refined_dest_klass, in_bytes(ArrayKlass::properties_offset()));
6657     Node* prop_dest = _gvn.transform(LoadNode::make(_gvn, control(), immutable_memory(), adr_prop_dest,
6658                                                     _gvn.type(adr_prop_dest)->is_ptr(), TypeInt::INT, T_INT,
6659                                                     MemNode::unordered));
6660 
6661     const ArrayProperties props_null_restricted = ArrayProperties::Default().with_null_restricted();
6662     jint props_value = (jint)props_null_restricted.value();
6663 
6664     prop_dest = _gvn.transform(new XorINode(prop_dest, intcon(props_value)));
6665     prop_src = _gvn.transform(new OrINode(prop_dest, prop_src));
6666     prop_src = _gvn.transform(new AndINode(prop_src, intcon(props_value)));
6667 
6668     Node* chk = _gvn.transform(new CmpINode(prop_src, intcon(props_value)));
6669     Node* tst = _gvn.transform(new BoolNode(chk, BoolTest::ne));
6670     generate_fair_guard(tst, slow_region);
6671 
6672     // TODO 8251971 This is too strong
6673     generate_fair_guard(flat_array_test(src), slow_region);
6674     generate_fair_guard(flat_array_test(dest), slow_region);
6675 
6676     {
6677       PreserveJVMState pjvms(this);
6678       set_control(_gvn.transform(slow_region));
6679       uncommon_trap(Deoptimization::Reason_intrinsic,
6680                     Deoptimization::Action_make_not_entrant);
6681       assert(stopped(), "Should be stopped");
6682     }
6683 
6684     const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->isa_klassptr();
6685     if (dest_klass_t == nullptr) {
6686       // refined_dest_klass may not be an array, which leads to dest_klass being top. This means we
6687       // are in a dead path.
6688       uncommon_trap(Deoptimization::Reason_intrinsic,
6689                     Deoptimization::Action_make_not_entrant);
6690       return true;
6691     }
6692 
6693     const Type* toop = dest_klass_t->cast_to_exactness(false)->as_instance_type();
6694     src = _gvn.transform(new CheckCastPPNode(control(), src, toop));
6695     arraycopy_move_allocation_here(alloc, dest, saved_jvms_before_guards, saved_reexecute_sp, new_idx);
6696   }
6697 
6698   if (stopped()) {
6699     return true;
6700   }
6701 
6702   Node* dest_klass = load_object_klass(dest);
6703   dest_klass = load_non_refined_array_klass(dest_klass);
6704 
6705   ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != nullptr, negative_length_guard_generated,
6706                                           // Create LoadRange and LoadKlass nodes for use during macro expansion here
6707                                           // so the compiler has a chance to eliminate them: during macro expansion,
6708                                           // we have to set their control (CastPP nodes are eliminated).
6709                                           load_object_klass(src), dest_klass,
6710                                           load_array_length(src), load_array_length(dest));
6711 
6712   ac->set_arraycopy(validated);
6713 
6714   Node* n = _gvn.transform(ac);
6715   if (n == ac) {
6716     ac->connect_outputs(this);
6717   } else {
6718     assert(validated, "shouldn't transform if all arguments not validated");
6719     set_all_memory(n);
6720   }
6721   clear_upper_avx();
6722 
6723 
6724   return true;
6725 }
6726 
6727 
6728 // Helper function which determines if an arraycopy immediately follows
6729 // an allocation, with no intervening tests or other escapes for the object.
< prev index next >