< prev index next >

src/hotspot/share/opto/library_call.cpp

Print this page

   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"

  27 #include "ci/ciUtilities.inline.hpp"
  28 #include "classfile/vmIntrinsics.hpp"
  29 #include "compiler/compileBroker.hpp"
  30 #include "compiler/compileLog.hpp"
  31 #include "gc/shared/barrierSet.hpp"
  32 #include "jfr/support/jfrIntrinsics.hpp"
  33 #include "memory/resourceArea.hpp"
  34 #include "oops/klass.inline.hpp"
  35 #include "oops/objArrayKlass.hpp"
  36 #include "opto/addnode.hpp"
  37 #include "opto/arraycopynode.hpp"
  38 #include "opto/c2compiler.hpp"
  39 #include "opto/castnode.hpp"
  40 #include "opto/cfgnode.hpp"
  41 #include "opto/convertnode.hpp"
  42 #include "opto/countbitsnode.hpp"
  43 #include "opto/idealKit.hpp"
  44 #include "opto/library_call.hpp"
  45 #include "opto/mathexactnode.hpp"
  46 #include "opto/mulnode.hpp"

 308   case vmIntrinsics::_indexOfIL:                return inline_string_indexOfI(StrIntrinsicNode::LL);
 309   case vmIntrinsics::_indexOfIU:                return inline_string_indexOfI(StrIntrinsicNode::UU);
 310   case vmIntrinsics::_indexOfIUL:               return inline_string_indexOfI(StrIntrinsicNode::UL);
 311   case vmIntrinsics::_indexOfU_char:            return inline_string_indexOfChar(StrIntrinsicNode::U);
 312   case vmIntrinsics::_indexOfL_char:            return inline_string_indexOfChar(StrIntrinsicNode::L);
 313 
 314   case vmIntrinsics::_equalsL:                  return inline_string_equals(StrIntrinsicNode::LL);
 315 
 316   case vmIntrinsics::_vectorizedHashCode:       return inline_vectorizedHashCode();
 317 
 318   case vmIntrinsics::_toBytesStringU:           return inline_string_toBytesU();
 319   case vmIntrinsics::_getCharsStringU:          return inline_string_getCharsU();
 320   case vmIntrinsics::_getCharStringU:           return inline_string_char_access(!is_store);
 321   case vmIntrinsics::_putCharStringU:           return inline_string_char_access( is_store);
 322 
 323   case vmIntrinsics::_compressStringC:
 324   case vmIntrinsics::_compressStringB:          return inline_string_copy( is_compress);
 325   case vmIntrinsics::_inflateStringC:
 326   case vmIntrinsics::_inflateStringB:           return inline_string_copy(!is_compress);
 327 


 328   case vmIntrinsics::_getReference:             return inline_unsafe_access(!is_store, T_OBJECT,   Relaxed, false);
 329   case vmIntrinsics::_getBoolean:               return inline_unsafe_access(!is_store, T_BOOLEAN,  Relaxed, false);
 330   case vmIntrinsics::_getByte:                  return inline_unsafe_access(!is_store, T_BYTE,     Relaxed, false);
 331   case vmIntrinsics::_getShort:                 return inline_unsafe_access(!is_store, T_SHORT,    Relaxed, false);
 332   case vmIntrinsics::_getChar:                  return inline_unsafe_access(!is_store, T_CHAR,     Relaxed, false);
 333   case vmIntrinsics::_getInt:                   return inline_unsafe_access(!is_store, T_INT,      Relaxed, false);
 334   case vmIntrinsics::_getLong:                  return inline_unsafe_access(!is_store, T_LONG,     Relaxed, false);
 335   case vmIntrinsics::_getFloat:                 return inline_unsafe_access(!is_store, T_FLOAT,    Relaxed, false);
 336   case vmIntrinsics::_getDouble:                return inline_unsafe_access(!is_store, T_DOUBLE,   Relaxed, false);

 337 
 338   case vmIntrinsics::_putReference:             return inline_unsafe_access( is_store, T_OBJECT,   Relaxed, false);
 339   case vmIntrinsics::_putBoolean:               return inline_unsafe_access( is_store, T_BOOLEAN,  Relaxed, false);
 340   case vmIntrinsics::_putByte:                  return inline_unsafe_access( is_store, T_BYTE,     Relaxed, false);
 341   case vmIntrinsics::_putShort:                 return inline_unsafe_access( is_store, T_SHORT,    Relaxed, false);
 342   case vmIntrinsics::_putChar:                  return inline_unsafe_access( is_store, T_CHAR,     Relaxed, false);
 343   case vmIntrinsics::_putInt:                   return inline_unsafe_access( is_store, T_INT,      Relaxed, false);
 344   case vmIntrinsics::_putLong:                  return inline_unsafe_access( is_store, T_LONG,     Relaxed, false);
 345   case vmIntrinsics::_putFloat:                 return inline_unsafe_access( is_store, T_FLOAT,    Relaxed, false);
 346   case vmIntrinsics::_putDouble:                return inline_unsafe_access( is_store, T_DOUBLE,   Relaxed, false);

 347 
 348   case vmIntrinsics::_getReferenceVolatile:     return inline_unsafe_access(!is_store, T_OBJECT,   Volatile, false);
 349   case vmIntrinsics::_getBooleanVolatile:       return inline_unsafe_access(!is_store, T_BOOLEAN,  Volatile, false);
 350   case vmIntrinsics::_getByteVolatile:          return inline_unsafe_access(!is_store, T_BYTE,     Volatile, false);
 351   case vmIntrinsics::_getShortVolatile:         return inline_unsafe_access(!is_store, T_SHORT,    Volatile, false);
 352   case vmIntrinsics::_getCharVolatile:          return inline_unsafe_access(!is_store, T_CHAR,     Volatile, false);
 353   case vmIntrinsics::_getIntVolatile:           return inline_unsafe_access(!is_store, T_INT,      Volatile, false);
 354   case vmIntrinsics::_getLongVolatile:          return inline_unsafe_access(!is_store, T_LONG,     Volatile, false);
 355   case vmIntrinsics::_getFloatVolatile:         return inline_unsafe_access(!is_store, T_FLOAT,    Volatile, false);
 356   case vmIntrinsics::_getDoubleVolatile:        return inline_unsafe_access(!is_store, T_DOUBLE,   Volatile, false);
 357 
 358   case vmIntrinsics::_putReferenceVolatile:     return inline_unsafe_access( is_store, T_OBJECT,   Volatile, false);
 359   case vmIntrinsics::_putBooleanVolatile:       return inline_unsafe_access( is_store, T_BOOLEAN,  Volatile, false);
 360   case vmIntrinsics::_putByteVolatile:          return inline_unsafe_access( is_store, T_BYTE,     Volatile, false);
 361   case vmIntrinsics::_putShortVolatile:         return inline_unsafe_access( is_store, T_SHORT,    Volatile, false);
 362   case vmIntrinsics::_putCharVolatile:          return inline_unsafe_access( is_store, T_CHAR,     Volatile, false);
 363   case vmIntrinsics::_putIntVolatile:           return inline_unsafe_access( is_store, T_INT,      Volatile, false);
 364   case vmIntrinsics::_putLongVolatile:          return inline_unsafe_access( is_store, T_LONG,     Volatile, false);
 365   case vmIntrinsics::_putFloatVolatile:         return inline_unsafe_access( is_store, T_FLOAT,    Volatile, false);
 366   case vmIntrinsics::_putDoubleVolatile:        return inline_unsafe_access( is_store, T_DOUBLE,   Volatile, false);

 493                                                                                          "notifyJvmtiEnd", false, true);
 494   case vmIntrinsics::_notifyJvmtiVThreadMount:   return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_mount()),
 495                                                                                          "notifyJvmtiMount", false, false);
 496   case vmIntrinsics::_notifyJvmtiVThreadUnmount: return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_unmount()),
 497                                                                                          "notifyJvmtiUnmount", false, false);
 498   case vmIntrinsics::_notifyJvmtiVThreadDisableSuspend: return inline_native_notify_jvmti_sync();
 499 #endif
 500 
 501 #ifdef JFR_HAVE_INTRINSICS
 502   case vmIntrinsics::_counterTime:              return inline_native_time_funcs(CAST_FROM_FN_PTR(address, JfrTime::time_function()), "counterTime");
 503   case vmIntrinsics::_getEventWriter:           return inline_native_getEventWriter();
 504   case vmIntrinsics::_jvm_commit:               return inline_native_jvm_commit();
 505 #endif
 506   case vmIntrinsics::_currentTimeMillis:        return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
 507   case vmIntrinsics::_nanoTime:                 return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
 508   case vmIntrinsics::_writeback0:               return inline_unsafe_writeback0();
 509   case vmIntrinsics::_writebackPreSync0:        return inline_unsafe_writebackSync0(true);
 510   case vmIntrinsics::_writebackPostSync0:       return inline_unsafe_writebackSync0(false);
 511   case vmIntrinsics::_allocateInstance:         return inline_unsafe_allocate();
 512   case vmIntrinsics::_copyMemory:               return inline_unsafe_copyMemory();

 513   case vmIntrinsics::_setMemory:                return inline_unsafe_setMemory();
 514   case vmIntrinsics::_getLength:                return inline_native_getLength();
 515   case vmIntrinsics::_copyOf:                   return inline_array_copyOf(false);
 516   case vmIntrinsics::_copyOfRange:              return inline_array_copyOf(true);
 517   case vmIntrinsics::_equalsB:                  return inline_array_equals(StrIntrinsicNode::LL);
 518   case vmIntrinsics::_equalsC:                  return inline_array_equals(StrIntrinsicNode::UU);
 519   case vmIntrinsics::_Preconditions_checkIndex: return inline_preconditions_checkIndex(T_INT);
 520   case vmIntrinsics::_Preconditions_checkLongIndex: return inline_preconditions_checkIndex(T_LONG);
 521   case vmIntrinsics::_clone:                    return inline_native_clone(intrinsic()->is_virtual());
 522 
 523   case vmIntrinsics::_allocateUninitializedArray: return inline_unsafe_newArray(true);
 524   case vmIntrinsics::_newArray:                   return inline_unsafe_newArray(false);

 525 
 526   case vmIntrinsics::_isAssignableFrom:         return inline_native_subtype_check();
 527 
 528   case vmIntrinsics::_isInstance:
 529   case vmIntrinsics::_getModifiers:
 530   case vmIntrinsics::_isInterface:
 531   case vmIntrinsics::_isArray:
 532   case vmIntrinsics::_isPrimitive:
 533   case vmIntrinsics::_isHidden:
 534   case vmIntrinsics::_getSuperclass:
 535   case vmIntrinsics::_getClassAccessFlags:      return inline_native_Class_query(intrinsic_id());
 536 
 537   case vmIntrinsics::_floatToRawIntBits:
 538   case vmIntrinsics::_floatToIntBits:
 539   case vmIntrinsics::_intBitsToFloat:
 540   case vmIntrinsics::_doubleToRawLongBits:
 541   case vmIntrinsics::_doubleToLongBits:
 542   case vmIntrinsics::_longBitsToDouble:
 543   case vmIntrinsics::_floatToFloat16:
 544   case vmIntrinsics::_float16ToFloat:           return inline_fp_conversions(intrinsic_id());

2236     case vmIntrinsics::_remainderUnsigned_l: {
2237       zero_check_long(argument(2));
2238       // Compile-time detect of null-exception
2239       if (stopped()) {
2240         return true; // keep the graph constructed so far
2241       }
2242       n = new UModLNode(control(), argument(0), argument(2));
2243       break;
2244     }
2245     default:  fatal_unexpected_iid(id);  break;
2246   }
2247   set_result(_gvn.transform(n));
2248   return true;
2249 }
2250 
2251 //----------------------------inline_unsafe_access----------------------------
2252 
2253 const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type) {
2254   // Attempt to infer a sharper value type from the offset and base type.
2255   ciKlass* sharpened_klass = nullptr;

2256 
2257   // See if it is an instance field, with an object type.
2258   if (alias_type->field() != nullptr) {
2259     if (alias_type->field()->type()->is_klass()) {
2260       sharpened_klass = alias_type->field()->type()->as_klass();

2261     }
2262   }
2263 
2264   const TypeOopPtr* result = nullptr;
2265   // See if it is a narrow oop array.
2266   if (adr_type->isa_aryptr()) {
2267     if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
2268       const TypeOopPtr* elem_type = adr_type->is_aryptr()->elem()->make_oopptr();

2269       if (elem_type != nullptr && elem_type->is_loaded()) {
2270         // Sharpen the value type.
2271         result = elem_type;
2272       }
2273     }
2274   }
2275 
2276   // The sharpened class might be unloaded if there is no class loader
2277   // contraint in place.
2278   if (result == nullptr && sharpened_klass != nullptr && sharpened_klass->is_loaded()) {
2279     // Sharpen the value type.
2280     result = TypeOopPtr::make_from_klass(sharpened_klass);



2281   }
2282   if (result != nullptr) {
2283 #ifndef PRODUCT
2284     if (C->print_intrinsics() || C->print_inlining()) {
2285       tty->print("  from base type:  ");  adr_type->dump(); tty->cr();
2286       tty->print("  sharpened value: ");  result->dump();    tty->cr();
2287     }
2288 #endif
2289   }
2290   return result;
2291 }
2292 
2293 DecoratorSet LibraryCallKit::mo_decorator_for_access_kind(AccessKind kind) {
2294   switch (kind) {
2295       case Relaxed:
2296         return MO_UNORDERED;
2297       case Opaque:
2298         return MO_RELAXED;
2299       case Acquire:
2300         return MO_ACQUIRE;
2301       case Release:
2302         return MO_RELEASE;
2303       case Volatile:
2304         return MO_SEQ_CST;
2305       default:
2306         ShouldNotReachHere();
2307         return 0;
2308   }
2309 }
2310 
2311 bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, const AccessKind kind, const bool unaligned) {
2312   if (callee()->is_static())  return false;  // caller must have the capability!
2313   DecoratorSet decorators = C2_UNSAFE_ACCESS;
2314   guarantee(!is_store || kind != Acquire, "Acquire accesses can be produced only for loads");
2315   guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
2316   assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
2317 
2318   if (is_reference_type(type)) {
2319     decorators |= ON_UNKNOWN_OOP_REF;
2320   }
2321 
2322   if (unaligned) {
2323     decorators |= C2_UNALIGNED;
2324   }
2325 
2326 #ifndef PRODUCT
2327   {
2328     ResourceMark rm;
2329     // Check the signatures.
2330     ciSignature* sig = callee()->signature();
2331 #ifdef ASSERT
2332     if (!is_store) {
2333       // Object getReference(Object base, int/long offset), etc.
2334       BasicType rtype = sig->return_type()->basic_type();
2335       assert(rtype == type, "getter must return the expected value");
2336       assert(sig->count() == 2, "oop getter has 2 arguments");
2337       assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2338       assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2339     } else {
2340       // void putReference(Object base, int/long offset, Object x), etc.
2341       assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
2342       assert(sig->count() == 3, "oop putter has 3 arguments");
2343       assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
2344       assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
2345       BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
2346       assert(vtype == type, "putter must accept the expected value");
2347     }
2348 #endif // ASSERT
2349  }
2350 #endif //PRODUCT
2351 
2352   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
2353 
2354   Node* receiver = argument(0);  // type: oop
2355 
2356   // Build address expression.
2357   Node* heap_base_oop = top();
2358 
2359   // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2360   Node* base = argument(1);  // type: oop
2361   // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2362   Node* offset = argument(2);  // type: long
2363   // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2364   // to be plain byte offsets, which are also the same as those accepted
2365   // by oopDesc::field_addr.
2366   assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2367          "fieldOffset must be byte-scaled");























































2368   // 32-bit machines ignore the high half!
2369   offset = ConvL2X(offset);
2370 
2371   // Save state and restore on bailout
2372   uint old_sp = sp();
2373   SafePointNode* old_map = clone_map();
2374 
2375   Node* adr = make_unsafe_address(base, offset, type, kind == Relaxed);
2376   assert(!stopped(), "Inlining of unsafe access failed: address construction stopped unexpectedly");
2377 
2378   if (_gvn.type(base->uncast())->isa_ptr() == TypePtr::NULL_PTR) {
2379     if (type != T_OBJECT) {
2380       decorators |= IN_NATIVE; // off-heap primitive access
2381     } else {
2382       set_map(old_map);
2383       set_sp(old_sp);
2384       return false; // off-heap oop accesses are not supported
2385     }
2386   } else {
2387     heap_base_oop = base; // on-heap or mixed access
2388   }
2389 
2390   // Can base be null? Otherwise, always on-heap access.
2391   bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(base));
2392 
2393   if (!can_access_non_heap) {
2394     decorators |= IN_HEAP;
2395   }
2396 
2397   Node* val = is_store ? argument(4) : nullptr;
2398 
2399   const TypePtr* adr_type = _gvn.type(adr)->isa_ptr();
2400   if (adr_type == TypePtr::NULL_PTR) {
2401     set_map(old_map);
2402     set_sp(old_sp);
2403     return false; // off-heap access with zero address
2404   }
2405 
2406   // Try to categorize the address.
2407   Compile::AliasType* alias_type = C->alias_type(adr_type);
2408   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2409 
2410   if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2411       alias_type->adr_type() == TypeAryPtr::RANGE) {
2412     set_map(old_map);
2413     set_sp(old_sp);
2414     return false; // not supported
2415   }
2416 
2417   bool mismatched = false;
2418   BasicType bt = alias_type->basic_type();





















2419   if (bt != T_ILLEGAL) {
2420     assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
2421     if (bt == T_BYTE && adr_type->isa_aryptr()) {
2422       // Alias type doesn't differentiate between byte[] and boolean[]).
2423       // Use address type to get the element type.
2424       bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2425     }
2426     if (is_reference_type(bt, true)) {
2427       // accessing an array field with getReference is not a mismatch
2428       bt = T_OBJECT;
2429     }
2430     if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2431       // Don't intrinsify mismatched object accesses
2432       set_map(old_map);
2433       set_sp(old_sp);
2434       return false;
2435     }
2436     mismatched = (bt != type);
2437   } else if (alias_type->adr_type()->isa_oopptr()) {
2438     mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
2439   }
2440 























2441   destruct_map_clone(old_map);
2442   assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2443 
2444   if (mismatched) {
2445     decorators |= C2_MISMATCHED;
2446   }
2447 
2448   // First guess at the value type.
2449   const Type *value_type = Type::get_const_basic_type(type);
2450 
2451   // Figure out the memory ordering.
2452   decorators |= mo_decorator_for_access_kind(kind);
2453 
2454   if (!is_store && type == T_OBJECT) {
2455     const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2456     if (tjp != nullptr) {
2457       value_type = tjp;


2458     }
2459   }
2460 
2461   receiver = null_check(receiver);
2462   if (stopped()) {
2463     return true;
2464   }
2465   // Heap pointers get a null-check from the interpreter,
2466   // as a courtesy.  However, this is not guaranteed by Unsafe,
2467   // and it is not possible to fully distinguish unintended nulls
2468   // from intended ones in this API.
2469 
2470   if (!is_store) {
2471     Node* p = nullptr;
2472     // Try to constant fold a load from a constant field
2473     ciField* field = alias_type->field();
2474     if (heap_base_oop != top() && field != nullptr && field->is_constant() && !mismatched) {
2475       // final or stable field
2476       p = make_constant_from_field(field, heap_base_oop);
2477     }
2478 
2479     if (p == nullptr) { // Could not constant fold the load
2480       p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);















2481       // Normalize the value returned by getBoolean in the following cases
2482       if (type == T_BOOLEAN &&
2483           (mismatched ||
2484            heap_base_oop == top() ||                  // - heap_base_oop is null or
2485            (can_access_non_heap && field == nullptr)) // - heap_base_oop is potentially null
2486                                                       //   and the unsafe access is made to large offset
2487                                                       //   (i.e., larger than the maximum offset necessary for any
2488                                                       //   field access)
2489             ) {
2490           IdealKit ideal = IdealKit(this);
2491 #define __ ideal.
2492           IdealVariable normalized_result(ideal);
2493           __ declarations_done();
2494           __ set(normalized_result, p);
2495           __ if_then(p, BoolTest::ne, ideal.ConI(0));
2496           __ set(normalized_result, ideal.ConI(1));
2497           ideal.end_if();
2498           final_sync(ideal);
2499           p = __ value(normalized_result);
2500 #undef __
2501       }
2502     }
2503     if (type == T_ADDRESS) {
2504       p = gvn().transform(new CastP2XNode(nullptr, p));
2505       p = ConvX2UL(p);
2506     }
2507     // The load node has the control of the preceding MemBarCPUOrder.  All
2508     // following nodes will have the control of the MemBarCPUOrder inserted at
2509     // the end of this method.  So, pushing the load onto the stack at a later
2510     // point is fine.
2511     set_result(p);
2512   } else {
2513     if (bt == T_ADDRESS) {
2514       // Repackage the long as a pointer.
2515       val = ConvL2X(val);
2516       val = gvn().transform(new CastX2PNode(val));
2517     }
2518     access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators);
















2519   }
2520 
2521   return true;
2522 }
2523 








































2524 //----------------------------inline_unsafe_load_store----------------------------
2525 // This method serves a couple of different customers (depending on LoadStoreKind):
2526 //
2527 // LS_cmp_swap:
2528 //
2529 //   boolean compareAndSetReference(Object o, long offset, Object expected, Object x);
2530 //   boolean compareAndSetInt(   Object o, long offset, int    expected, int    x);
2531 //   boolean compareAndSetLong(  Object o, long offset, long   expected, long   x);
2532 //
2533 // LS_cmp_swap_weak:
2534 //
2535 //   boolean weakCompareAndSetReference(       Object o, long offset, Object expected, Object x);
2536 //   boolean weakCompareAndSetReferencePlain(  Object o, long offset, Object expected, Object x);
2537 //   boolean weakCompareAndSetReferenceAcquire(Object o, long offset, Object expected, Object x);
2538 //   boolean weakCompareAndSetReferenceRelease(Object o, long offset, Object expected, Object x);
2539 //
2540 //   boolean weakCompareAndSetInt(          Object o, long offset, int    expected, int    x);
2541 //   boolean weakCompareAndSetIntPlain(     Object o, long offset, int    expected, int    x);
2542 //   boolean weakCompareAndSetIntAcquire(   Object o, long offset, int    expected, int    x);
2543 //   boolean weakCompareAndSetIntRelease(   Object o, long offset, int    expected, int    x);

2709     }
2710     case LS_cmp_swap:
2711     case LS_cmp_swap_weak:
2712     case LS_get_add:
2713       break;
2714     default:
2715       ShouldNotReachHere();
2716   }
2717 
2718   // Null check receiver.
2719   receiver = null_check(receiver);
2720   if (stopped()) {
2721     return true;
2722   }
2723 
2724   int alias_idx = C->get_alias_index(adr_type);
2725 
2726   if (is_reference_type(type)) {
2727     decorators |= IN_HEAP | ON_UNKNOWN_OOP_REF;
2728 













2729     // Transformation of a value which could be null pointer (CastPP #null)
2730     // could be delayed during Parse (for example, in adjust_map_after_if()).
2731     // Execute transformation here to avoid barrier generation in such case.
2732     if (_gvn.type(newval) == TypePtr::NULL_PTR)
2733       newval = _gvn.makecon(TypePtr::NULL_PTR);
2734 
2735     if (oldval != nullptr && _gvn.type(oldval) == TypePtr::NULL_PTR) {
2736       // Refine the value to a null constant, when it is known to be null
2737       oldval = _gvn.makecon(TypePtr::NULL_PTR);
2738     }
2739   }
2740 
2741   Node* result = nullptr;
2742   switch (kind) {
2743     case LS_cmp_exchange: {
2744       result = access_atomic_cmpxchg_val_at(base, adr, adr_type, alias_idx,
2745                                             oldval, newval, value_type, type, decorators);
2746       break;
2747     }
2748     case LS_cmp_swap_weak:

2895                     Deoptimization::Action_make_not_entrant);
2896     }
2897     if (stopped()) {
2898       return true;
2899     }
2900 #endif //INCLUDE_JVMTI
2901 
2902   Node* test = nullptr;
2903   if (LibraryCallKit::klass_needs_init_guard(kls)) {
2904     // Note:  The argument might still be an illegal value like
2905     // Serializable.class or Object[].class.   The runtime will handle it.
2906     // But we must make an explicit check for initialization.
2907     Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset()));
2908     // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
2909     // can generate code to load it as unsigned byte.
2910     Node* inst = make_load(nullptr, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::acquire);
2911     Node* bits = intcon(InstanceKlass::fully_initialized);
2912     test = _gvn.transform(new SubINode(inst, bits));
2913     // The 'test' is non-zero if we need to take a slow path.
2914   }
2915 
2916   Node* obj = new_instance(kls, test);





2917   set_result(obj);
2918   return true;
2919 }
2920 
2921 //------------------------inline_native_time_funcs--------------
2922 // inline code for System.currentTimeMillis() and System.nanoTime()
2923 // these have the same type and signature
2924 bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
2925   const TypeFunc* tf = OptoRuntime::void_long_Type();
2926   const TypePtr* no_memory_effects = nullptr;
2927   Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
2928   Node* value = _gvn.transform(new ProjNode(time, TypeFunc::Parms+0));
2929 #ifdef ASSERT
2930   Node* value_top = _gvn.transform(new ProjNode(time, TypeFunc::Parms+1));
2931   assert(value_top == top(), "second value must be top");
2932 #endif
2933   set_result(value);
2934   return true;
2935 }
2936 

3669 
3670 //------------------------inline_native_setVthread------------------
3671 bool LibraryCallKit::inline_native_setCurrentThread() {
3672   assert(C->method()->changes_current_thread(),
3673          "method changes current Thread but is not annotated ChangesCurrentThread");
3674   Node* arr = argument(1);
3675   Node* thread = _gvn.transform(new ThreadLocalNode());
3676   Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::vthread_offset()));
3677   Node* thread_obj_handle
3678     = make_load(nullptr, p, p->bottom_type()->is_ptr(), T_OBJECT, MemNode::unordered);
3679   thread_obj_handle = _gvn.transform(thread_obj_handle);
3680   const TypePtr *adr_type = _gvn.type(thread_obj_handle)->isa_ptr();
3681   access_store_at(nullptr, thread_obj_handle, adr_type, arr, _gvn.type(arr), T_OBJECT, IN_NATIVE | MO_UNORDERED);
3682   JFR_ONLY(extend_setCurrentThread(thread, arr);)
3683   return true;
3684 }
3685 
3686 const Type* LibraryCallKit::scopedValueCache_type() {
3687   ciKlass* objects_klass = ciObjArrayKlass::make(env()->Object_klass());
3688   const TypeOopPtr* etype = TypeOopPtr::make_from_klass(env()->Object_klass());
3689   const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS);
3690 
3691   // Because we create the scopedValue cache lazily we have to make the
3692   // type of the result BotPTR.
3693   bool xk = etype->klass_is_exact();
3694   const Type* objects_type = TypeAryPtr::make(TypePtr::BotPTR, arr0, objects_klass, xk, 0);
3695   return objects_type;
3696 }
3697 
3698 Node* LibraryCallKit::scopedValueCache_helper() {
3699   Node* thread = _gvn.transform(new ThreadLocalNode());
3700   Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::scopedValueCache_offset()));
3701   // We cannot use immutable_memory() because we might flip onto a
3702   // different carrier thread, at which point we'll need to use that
3703   // carrier thread's cache.
3704   // return _gvn.transform(LoadNode::make(_gvn, nullptr, immutable_memory(), p, p->bottom_type()->is_ptr(),
3705   //       TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered));
3706   return make_load(nullptr, p, p->bottom_type()->is_ptr(), T_ADDRESS, MemNode::unordered);
3707 }
3708 
3709 //------------------------inline_native_scopedValueCache------------------
3710 bool LibraryCallKit::inline_native_scopedValueCache() {
3711   Node* cache_obj_handle = scopedValueCache_helper();
3712   const Type* objects_type = scopedValueCache_type();
3713   set_result(access_load(cache_obj_handle, objects_type, T_OBJECT, IN_NATIVE));
3714 

3797   }
3798 
3799   // Result of top level CFG and Memory.
3800   RegionNode* result_rgn = new RegionNode(PATH_LIMIT);
3801   record_for_igvn(result_rgn);
3802   PhiNode* result_mem = new PhiNode(result_rgn, Type::MEMORY, TypePtr::BOTTOM);
3803   record_for_igvn(result_mem);
3804 
3805   result_rgn->init_req(_true_path, _gvn.transform(valid_pin_count));
3806   result_rgn->init_req(_false_path, _gvn.transform(continuation_is_null));
3807   result_mem->init_req(_true_path, _gvn.transform(updated_pin_count_memory));
3808   result_mem->init_req(_false_path, _gvn.transform(input_memory_state));
3809 
3810   // Set output state.
3811   set_control(_gvn.transform(result_rgn));
3812   set_all_memory(_gvn.transform(result_mem));
3813 
3814   return true;
3815 }
3816 
3817 //---------------------------load_mirror_from_klass----------------------------
3818 // Given a klass oop, load its java mirror (a java.lang.Class oop).
3819 Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
3820   Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
3821   Node* load = make_load(nullptr, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
3822   // mirror = ((OopHandle)mirror)->resolve();
3823   return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE);
3824 }
3825 
3826 //-----------------------load_klass_from_mirror_common-------------------------
3827 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
3828 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
3829 // and branch to the given path on the region.
3830 // If never_see_null, take an uncommon trap on null, so we can optimistically
3831 // compile for the non-null case.
3832 // If the region is null, force never_see_null = true.
3833 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
3834                                                     bool never_see_null,
3835                                                     RegionNode* region,
3836                                                     int null_path,
3837                                                     int offset) {
3838   if (region == nullptr)  never_see_null = true;
3839   Node* p = basic_plus_adr(mirror, offset);
3840   const TypeKlassPtr*  kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
3841   Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
3842   Node* null_ctl = top();
3843   kls = null_check_oop(kls, &null_ctl, never_see_null);
3844   if (region != nullptr) {
3845     // Set region->in(null_path) if the mirror is a primitive (e.g, int.class).

3849   }
3850   return kls;
3851 }
3852 
3853 //--------------------(inline_native_Class_query helpers)---------------------
3854 // Use this for JVM_ACC_INTERFACE.
3855 // Fall through if (mods & mask) == bits, take the guard otherwise.
3856 Node* LibraryCallKit::generate_klass_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region,
3857                                                  ByteSize offset, const Type* type, BasicType bt) {
3858   // Branch around if the given klass has the given modifier bit set.
3859   // Like generate_guard, adds a new path onto the region.
3860   Node* modp = basic_plus_adr(kls, in_bytes(offset));
3861   Node* mods = make_load(nullptr, modp, type, bt, MemNode::unordered);
3862   Node* mask = intcon(modifier_mask);
3863   Node* bits = intcon(modifier_bits);
3864   Node* mbit = _gvn.transform(new AndINode(mods, mask));
3865   Node* cmp  = _gvn.transform(new CmpINode(mbit, bits));
3866   Node* bol  = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
3867   return generate_fair_guard(bol, region);
3868 }

3869 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
3870   return generate_klass_flags_guard(kls, JVM_ACC_INTERFACE, 0, region,
3871                                     Klass::access_flags_offset(), TypeInt::INT, T_INT);
3872 }
3873 
3874 // Use this for testing if Klass is_hidden, has_finalizer, and is_cloneable_fast.
3875 Node* LibraryCallKit::generate_misc_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
3876   return generate_klass_flags_guard(kls, modifier_mask, modifier_bits, region,
3877                                     Klass::misc_flags_offset(), TypeInt::UBYTE, T_BOOLEAN);
3878 }
3879 
3880 Node* LibraryCallKit::generate_hidden_class_guard(Node* kls, RegionNode* region) {
3881   return generate_misc_flags_guard(kls, KlassFlags::_misc_is_hidden_class, 0, region);
3882 }
3883 
3884 //-------------------------inline_native_Class_query-------------------
3885 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
3886   const Type* return_type = TypeInt::BOOL;
3887   Node* prim_return_value = top();  // what happens if it's a primitive class?
3888   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);

4050 
4051   case vmIntrinsics::_getClassAccessFlags:
4052     p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
4053     query_value = make_load(nullptr, p, TypeInt::INT, T_INT, MemNode::unordered);
4054     break;
4055 
4056   default:
4057     fatal_unexpected_iid(id);
4058     break;
4059   }
4060 
4061   // Fall-through is the normal case of a query to a real class.
4062   phi->init_req(1, query_value);
4063   region->init_req(1, control());
4064 
4065   C->set_has_split_ifs(true); // Has chance for split-if optimization
4066   set_result(region, phi);
4067   return true;
4068 }
4069 

4070 //-------------------------inline_Class_cast-------------------
4071 bool LibraryCallKit::inline_Class_cast() {
4072   Node* mirror = argument(0); // Class
4073   Node* obj    = argument(1);
4074   const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
4075   if (mirror_con == nullptr) {
4076     return false;  // dead path (mirror->is_top()).
4077   }
4078   if (obj == nullptr || obj->is_top()) {
4079     return false;  // dead path
4080   }
4081   const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr();
4082 
4083   // First, see if Class.cast() can be folded statically.
4084   // java_mirror_type() returns non-null for compile-time Class constants.
4085   ciType* tm = mirror_con->java_mirror_type();

4086   if (tm != nullptr && tm->is_klass() &&
4087       tp != nullptr) {
4088     if (!tp->is_loaded()) {
4089       // Don't use intrinsic when class is not loaded.
4090       return false;
4091     } else {
4092       int static_res = C->static_subtype_check(TypeKlassPtr::make(tm->as_klass(), Type::trust_interfaces), tp->as_klass_type());




4093       if (static_res == Compile::SSC_always_true) {
4094         // isInstance() is true - fold the code.
4095         set_result(obj);
4096         return true;
4097       } else if (static_res == Compile::SSC_always_false) {
4098         // Don't use intrinsic, have to throw ClassCastException.
4099         // If the reference is null, the non-intrinsic bytecode will
4100         // be optimized appropriately.
4101         return false;
4102       }
4103     }
4104   }
4105 
4106   // Bailout intrinsic and do normal inlining if exception path is frequent.
4107   if (too_many_traps(Deoptimization::Reason_intrinsic)) {
4108     return false;
4109   }
4110 
4111   // Generate dynamic checks.
4112   // Class.cast() is java implementation of _checkcast bytecode.
4113   // Do checkcast (Parse::do_checkcast()) optimizations here.
4114 
4115   mirror = null_check(mirror);
4116   // If mirror is dead, only null-path is taken.
4117   if (stopped()) {
4118     return true;
4119   }
4120 
4121   // Not-subtype or the mirror's klass ptr is null (in case it is a primitive).
4122   enum { _bad_type_path = 1, _prim_path = 2, PATH_LIMIT };
4123   RegionNode* region = new RegionNode(PATH_LIMIT);
4124   record_for_igvn(region);
4125 
4126   // Now load the mirror's klass metaobject, and null-check it.
4127   // If kls is null, we have a primitive mirror and
4128   // nothing is an instance of a primitive type.
4129   Node* kls = load_klass_from_mirror(mirror, false, region, _prim_path);
4130 
4131   Node* res = top();


4132   if (!stopped()) {

4133     Node* bad_type_ctrl = top();
4134     // Do checkcast optimizations.
4135     res = gen_checkcast(obj, kls, &bad_type_ctrl);
4136     region->init_req(_bad_type_path, bad_type_ctrl);
4137   }
4138   if (region->in(_prim_path) != top() ||
4139       region->in(_bad_type_path) != top()) {

4140     // Let Interpreter throw ClassCastException.
4141     PreserveJVMState pjvms(this);
4142     set_control(_gvn.transform(region));



4143     uncommon_trap(Deoptimization::Reason_intrinsic,
4144                   Deoptimization::Action_maybe_recompile);
4145   }
4146   if (!stopped()) {
4147     set_result(res);
4148   }
4149   return true;
4150 }
4151 
4152 
4153 //--------------------------inline_native_subtype_check------------------------
4154 // This intrinsic takes the JNI calls out of the heart of
4155 // UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
4156 bool LibraryCallKit::inline_native_subtype_check() {
4157   // Pull both arguments off the stack.
4158   Node* args[2];                // two java.lang.Class mirrors: superc, subc
4159   args[0] = argument(0);
4160   args[1] = argument(1);
4161   Node* klasses[2];             // corresponding Klasses: superk, subk
4162   klasses[0] = klasses[1] = top();
4163 
4164   enum {
4165     // A full decision tree on {superc is prim, subc is prim}:
4166     _prim_0_path = 1,           // {P,N} => false
4167                                 // {P,P} & superc!=subc => false
4168     _prim_same_path,            // {P,P} & superc==subc => true
4169     _prim_1_path,               // {N,P} => false
4170     _ref_subtype_path,          // {N,N} & subtype check wins => true
4171     _both_ref_path,             // {N,N} & subtype check loses => false
4172     PATH_LIMIT
4173   };
4174 
4175   RegionNode* region = new RegionNode(PATH_LIMIT);

4176   Node*       phi    = new PhiNode(region, TypeInt::BOOL);
4177   record_for_igvn(region);

4178 
4179   const TypePtr* adr_type = TypeRawPtr::BOTTOM;   // memory type of loads
4180   const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
4181   int class_klass_offset = java_lang_Class::klass_offset();
4182 
4183   // First null-check both mirrors and load each mirror's klass metaobject.
4184   int which_arg;
4185   for (which_arg = 0; which_arg <= 1; which_arg++) {
4186     Node* arg = args[which_arg];
4187     arg = null_check(arg);
4188     if (stopped())  break;
4189     args[which_arg] = arg;
4190 
4191     Node* p = basic_plus_adr(arg, class_klass_offset);
4192     Node* kls = LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, adr_type, kls_type);
4193     klasses[which_arg] = _gvn.transform(kls);
4194   }
4195 
4196   // Having loaded both klasses, test each for null.
4197   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
4198   for (which_arg = 0; which_arg <= 1; which_arg++) {
4199     Node* kls = klasses[which_arg];
4200     Node* null_ctl = top();
4201     kls = null_check_oop(kls, &null_ctl, never_see_null);
4202     int prim_path = (which_arg == 0 ? _prim_0_path : _prim_1_path);
4203     region->init_req(prim_path, null_ctl);



4204     if (stopped())  break;
4205     klasses[which_arg] = kls;
4206   }
4207 
4208   if (!stopped()) {
4209     // now we have two reference types, in klasses[0..1]
4210     Node* subk   = klasses[1];  // the argument to isAssignableFrom
4211     Node* superk = klasses[0];  // the receiver
4212     region->set_req(_both_ref_path, gen_subtype_check(subk, superk));
4213     // now we have a successful reference subtype check
4214     region->set_req(_ref_subtype_path, control());
4215   }
4216 
4217   // If both operands are primitive (both klasses null), then
4218   // we must return true when they are identical primitives.
4219   // It is convenient to test this after the first null klass check.
4220   set_control(region->in(_prim_0_path)); // go back to first null check

4221   if (!stopped()) {
4222     // Since superc is primitive, make a guard for the superc==subc case.
4223     Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1]));
4224     Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq));
4225     generate_guard(bol_eq, region, PROB_FAIR);
4226     if (region->req() == PATH_LIMIT+1) {
4227       // A guard was added.  If the added guard is taken, superc==subc.
4228       region->swap_edges(PATH_LIMIT, _prim_same_path);
4229       region->del_req(PATH_LIMIT);
4230     }
4231     region->set_req(_prim_0_path, control()); // Not equal after all.
4232   }
4233 
4234   // these are the only paths that produce 'true':
4235   phi->set_req(_prim_same_path,   intcon(1));
4236   phi->set_req(_ref_subtype_path, intcon(1));
4237 
4238   // pull together the cases:
4239   assert(region->req() == PATH_LIMIT, "sane region");
4240   for (uint i = 1; i < region->req(); i++) {
4241     Node* ctl = region->in(i);
4242     if (ctl == nullptr || ctl == top()) {
4243       region->set_req(i, top());
4244       phi   ->set_req(i, top());
4245     } else if (phi->in(i) == nullptr) {
4246       phi->set_req(i, intcon(0)); // all other paths produce 'false'
4247     }
4248   }
4249 
4250   set_control(_gvn.transform(region));
4251   set_result(_gvn.transform(phi));
4252   return true;
4253 }
4254 
4255 //---------------------generate_array_guard_common------------------------
4256 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region,
4257                                                   bool obj_array, bool not_array) {
4258 
4259   if (stopped()) {
4260     return nullptr;
4261   }
4262 
4263   // If obj_array/non_array==false/false:
4264   // Branch around if the given klass is in fact an array (either obj or prim).
4265   // If obj_array/non_array==false/true:
4266   // Branch around if the given klass is not an array klass of any kind.
4267   // If obj_array/non_array==true/true:
4268   // Branch around if the kls is not an oop array (kls is int[], String, etc.)
4269   // If obj_array/non_array==true/false:
4270   // Branch around if the kls is an oop array (Object[] or subtype)
4271   //
4272   // Like generate_guard, adds a new path onto the region.
4273   jint  layout_con = 0;
4274   Node* layout_val = get_layout_helper(kls, layout_con);
4275   if (layout_val == nullptr) {
4276     bool query = (obj_array
4277                   ? Klass::layout_helper_is_objArray(layout_con)
4278                   : Klass::layout_helper_is_array(layout_con));
4279     if (query == not_array) {







4280       return nullptr;                       // never a branch
4281     } else {                             // always a branch
4282       Node* always_branch = control();
4283       if (region != nullptr)
4284         region->add_req(always_branch);
4285       set_control(top());
4286       return always_branch;
4287     }
4288   }





















4289   // Now test the correct condition.
4290   jint  nval = (obj_array
4291                 ? (jint)(Klass::_lh_array_tag_type_value
4292                    <<    Klass::_lh_array_tag_shift)
4293                 : Klass::_lh_neutral_value);
4294   Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval)));
4295   BoolTest::mask btest = BoolTest::lt;  // correct for testing is_[obj]array
4296   // invert the test if we are looking for a non-array
4297   if (not_array)  btest = BoolTest(btest).negate();
4298   Node* bol = _gvn.transform(new BoolNode(cmp, btest));
4299   return generate_fair_guard(bol, region);
4300 }
4301 


























4302 
4303 //-----------------------inline_native_newArray--------------------------
4304 // private static native Object java.lang.reflect.newArray(Class<?> componentType, int length);
4305 // private        native Object Unsafe.allocateUninitializedArray0(Class<?> cls, int size);
4306 bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) {
4307   Node* mirror;
4308   Node* count_val;
4309   if (uninitialized) {
4310     null_check_receiver();
4311     mirror    = argument(1);
4312     count_val = argument(2);
4313   } else {
4314     mirror    = argument(0);
4315     count_val = argument(1);
4316   }
4317 
4318   mirror = null_check(mirror);
4319   // If mirror or obj is dead, only null-path is taken.
4320   if (stopped())  return true;
4321 
4322   enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
4323   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4324   PhiNode*    result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);

4430   // the bytecode that invokes Arrays.copyOf if deoptimization happens.
4431   { PreserveReexecuteState preexecs(this);
4432     jvms()->set_should_reexecute(true);
4433 
4434     array_type_mirror = null_check(array_type_mirror);
4435     original          = null_check(original);
4436 
4437     // Check if a null path was taken unconditionally.
4438     if (stopped())  return true;
4439 
4440     Node* orig_length = load_array_length(original);
4441 
4442     Node* klass_node = load_klass_from_mirror(array_type_mirror, false, nullptr, 0);
4443     klass_node = null_check(klass_node);
4444 
4445     RegionNode* bailout = new RegionNode(1);
4446     record_for_igvn(bailout);
4447 
4448     // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
4449     // Bail out if that is so.
4450     Node* not_objArray = generate_non_objArray_guard(klass_node, bailout);












4451     if (not_objArray != nullptr) {
4452       // Improve the klass node's type from the new optimistic assumption:
4453       ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
4454       const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/);
4455       Node* cast = new CastPPNode(control(), klass_node, akls);
4456       klass_node = _gvn.transform(cast);
4457     }
4458 
4459     // Bail out if either start or end is negative.
4460     generate_negative_guard(start, bailout, &start);
4461     generate_negative_guard(end,   bailout, &end);
4462 
4463     Node* length = end;
4464     if (_gvn.type(start) != TypeInt::ZERO) {
4465       length = _gvn.transform(new SubINode(end, start));
4466     }
4467 
4468     // Bail out if length is negative (i.e., if start > end).
4469     // Without this the new_array would throw
4470     // NegativeArraySizeException but IllegalArgumentException is what
4471     // should be thrown
4472     generate_negative_guard(length, bailout, &length);
4473 






































4474     // Bail out if start is larger than the original length
4475     Node* orig_tail = _gvn.transform(new SubINode(orig_length, start));
4476     generate_negative_guard(orig_tail, bailout, &orig_tail);
4477 
4478     if (bailout->req() > 1) {
4479       PreserveJVMState pjvms(this);
4480       set_control(_gvn.transform(bailout));
4481       uncommon_trap(Deoptimization::Reason_intrinsic,
4482                     Deoptimization::Action_maybe_recompile);
4483     }
4484 
4485     if (!stopped()) {
4486       // How many elements will we copy from the original?
4487       // The answer is MinI(orig_tail, length).
4488       Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
4489 
4490       // Generate a direct call to the right arraycopy function(s).
4491       // We know the copy is disjoint but we might not know if the
4492       // oop stores need checking.
4493       // Extreme case:  Arrays.copyOf((Integer[])x, 10, String[].class).

4499       // to the copyOf to be validated, including that the copy to the
4500       // new array won't trigger an ArrayStoreException. That subtype
4501       // check can be optimized if we know something on the type of
4502       // the input array from type speculation.
4503       if (_gvn.type(klass_node)->singleton()) {
4504         const TypeKlassPtr* subk = _gvn.type(load_object_klass(original))->is_klassptr();
4505         const TypeKlassPtr* superk = _gvn.type(klass_node)->is_klassptr();
4506 
4507         int test = C->static_subtype_check(superk, subk);
4508         if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
4509           const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();
4510           if (t_original->speculative_type() != nullptr) {
4511             original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true);
4512           }
4513         }
4514       }
4515 
4516       bool validated = false;
4517       // Reason_class_check rather than Reason_intrinsic because we
4518       // want to intrinsify even if this traps.
4519       if (!too_many_traps(Deoptimization::Reason_class_check)) {
4520         Node* not_subtype_ctrl = gen_subtype_check(original, klass_node);
4521 
4522         if (not_subtype_ctrl != top()) {
4523           PreserveJVMState pjvms(this);
4524           set_control(not_subtype_ctrl);
4525           uncommon_trap(Deoptimization::Reason_class_check,
4526                         Deoptimization::Action_make_not_entrant);
4527           assert(stopped(), "Should be stopped");
4528         }
4529         validated = true;
4530       }
4531 
4532       if (!stopped()) {
4533         newcopy = new_array(klass_node, length, 0);  // no arguments to push
4534 
4535         ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true, true,
4536                                                 load_object_klass(original), klass_node);
4537         if (!is_copyOfRange) {
4538           ac->set_copyof(validated);
4539         } else {

4585 
4586 //-----------------------generate_method_call----------------------------
4587 // Use generate_method_call to make a slow-call to the real
4588 // method if the fast path fails.  An alternative would be to
4589 // use a stub like OptoRuntime::slow_arraycopy_Java.
4590 // This only works for expanding the current library call,
4591 // not another intrinsic.  (E.g., don't use this for making an
4592 // arraycopy call inside of the copyOf intrinsic.)
4593 CallJavaNode*
4594 LibraryCallKit::generate_method_call(vmIntrinsicID method_id, bool is_virtual, bool is_static, bool res_not_null) {
4595   // When compiling the intrinsic method itself, do not use this technique.
4596   guarantee(callee() != C->method(), "cannot make slow-call to self");
4597 
4598   ciMethod* method = callee();
4599   // ensure the JVMS we have will be correct for this call
4600   guarantee(method_id == method->intrinsic_id(), "must match");
4601 
4602   const TypeFunc* tf = TypeFunc::make(method);
4603   if (res_not_null) {
4604     assert(tf->return_type() == T_OBJECT, "");
4605     const TypeTuple* range = tf->range();
4606     const Type** fields = TypeTuple::fields(range->cnt());
4607     fields[TypeFunc::Parms] = range->field_at(TypeFunc::Parms)->filter_speculative(TypePtr::NOTNULL);
4608     const TypeTuple* new_range = TypeTuple::make(range->cnt(), fields);
4609     tf = TypeFunc::make(tf->domain(), new_range);
4610   }
4611   CallJavaNode* slow_call;
4612   if (is_static) {
4613     assert(!is_virtual, "");
4614     slow_call = new CallStaticJavaNode(C, tf,
4615                            SharedRuntime::get_resolve_static_call_stub(), method);
4616   } else if (is_virtual) {
4617     assert(!gvn().type(argument(0))->maybe_null(), "should not be null");
4618     int vtable_index = Method::invalid_vtable_index;
4619     if (UseInlineCaches) {
4620       // Suppress the vtable call
4621     } else {
4622       // hashCode and clone are not a miranda methods,
4623       // so the vtable index is fixed.
4624       // No need to use the linkResolver to get it.
4625        vtable_index = method->vtable_index();
4626        assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
4627               "bad index %d", vtable_index);
4628     }
4629     slow_call = new CallDynamicJavaNode(tf,

4646   set_edges_for_java_call(slow_call);
4647   return slow_call;
4648 }
4649 
4650 
4651 /**
4652  * Build special case code for calls to hashCode on an object. This call may
4653  * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
4654  * slightly different code.
4655  */
4656 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
4657   assert(is_static == callee()->is_static(), "correct intrinsic selection");
4658   assert(!(is_virtual && is_static), "either virtual, special, or static");
4659 
4660   enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
4661 
4662   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4663   PhiNode*    result_val = new PhiNode(result_reg, TypeInt::INT);
4664   PhiNode*    result_io  = new PhiNode(result_reg, Type::ABIO);
4665   PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4666   Node* obj = nullptr;







4667   if (!is_static) {
4668     // Check for hashing null object
4669     obj = null_check_receiver();
4670     if (stopped())  return true;        // unconditionally null
4671     result_reg->init_req(_null_path, top());
4672     result_val->init_req(_null_path, top());
4673   } else {
4674     // Do a null check, and return zero if null.
4675     // System.identityHashCode(null) == 0
4676     obj = argument(0);
4677     Node* null_ctl = top();
4678     obj = null_check_oop(obj, &null_ctl);
4679     result_reg->init_req(_null_path, null_ctl);
4680     result_val->init_req(_null_path, _gvn.intcon(0));
4681   }
4682 
4683   // Unconditionally null?  Then return right away.
4684   if (stopped()) {
4685     set_control( result_reg->in(_null_path));
4686     if (!stopped())
4687       set_result(result_val->in(_null_path));
4688     return true;
4689   }
4690 
4691   // We only go to the fast case code if we pass a number of guards.  The
4692   // paths which do not pass are accumulated in the slow_region.
4693   RegionNode* slow_region = new RegionNode(1);
4694   record_for_igvn(slow_region);
4695 
4696   // If this is a virtual call, we generate a funny guard.  We pull out
4697   // the vtable entry corresponding to hashCode() from the target object.
4698   // If the target method which we are calling happens to be the native
4699   // Object hashCode() method, we pass the guard.  We do not need this
4700   // guard for non-virtual calls -- the caller is known to be the native
4701   // Object hashCode().
4702   if (is_virtual) {
4703     // After null check, get the object's klass.
4704     Node* obj_klass = load_object_klass(obj);
4705     generate_virtual_guard(obj_klass, slow_region);
4706   }
4707 
4708   // Get the header out of the object, use LoadMarkNode when available
4709   Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
4710   // The control of the load must be null. Otherwise, the load can move before
4711   // the null check after castPP removal.
4712   Node* no_ctrl = nullptr;
4713   Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
4714 
4715   if (!UseObjectMonitorTable) {
4716     // Test the header to see if it is safe to read w.r.t. locking.
4717     Node *lock_mask      = _gvn.MakeConX(markWord::lock_mask_in_place);

4718     Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
4719     if (LockingMode == LM_LIGHTWEIGHT) {
4720       Node *monitor_val   = _gvn.MakeConX(markWord::monitor_value);
4721       Node *chk_monitor   = _gvn.transform(new CmpXNode(lmasked_header, monitor_val));
4722       Node *test_monitor  = _gvn.transform(new BoolNode(chk_monitor, BoolTest::eq));
4723 
4724       generate_slow_guard(test_monitor, slow_region);
4725     } else {
4726       Node *unlocked_val      = _gvn.MakeConX(markWord::unlocked_value);
4727       Node *chk_unlocked      = _gvn.transform(new CmpXNode(lmasked_header, unlocked_val));
4728       Node *test_not_unlocked = _gvn.transform(new BoolNode(chk_unlocked, BoolTest::ne));
4729 
4730       generate_slow_guard(test_not_unlocked, slow_region);
4731     }
4732   }
4733 
4734   // Get the hash value and check to see that it has been properly assigned.
4735   // We depend on hash_mask being at most 32 bits and avoid the use of
4736   // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
4737   // vm: see markWord.hpp.

4772     // this->control() comes from set_results_for_java_call
4773     result_reg->init_req(_slow_path, control());
4774     result_val->init_req(_slow_path, slow_result);
4775     result_io  ->set_req(_slow_path, i_o());
4776     result_mem ->set_req(_slow_path, reset_memory());
4777   }
4778 
4779   // Return the combined state.
4780   set_i_o(        _gvn.transform(result_io)  );
4781   set_all_memory( _gvn.transform(result_mem));
4782 
4783   set_result(result_reg, result_val);
4784   return true;
4785 }
4786 
4787 //---------------------------inline_native_getClass----------------------------
4788 // public final native Class<?> java.lang.Object.getClass();
4789 //
4790 // Build special case code for calls to getClass on an object.
4791 bool LibraryCallKit::inline_native_getClass() {
4792   Node* obj = null_check_receiver();









4793   if (stopped())  return true;
4794   set_result(load_mirror_from_klass(load_object_klass(obj)));
4795   return true;
4796 }
4797 
4798 //-----------------inline_native_Reflection_getCallerClass---------------------
4799 // public static native Class<?> sun.reflect.Reflection.getCallerClass();
4800 //
4801 // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
4802 //
4803 // NOTE: This code must perform the same logic as JVM_GetCallerClass
4804 // in that it must skip particular security frames and checks for
4805 // caller sensitive methods.
4806 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
4807 #ifndef PRODUCT
4808   if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4809     tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
4810   }
4811 #endif
4812 

5124     dst_type = _gvn.type(dst_addr)->is_ptr(); // narrow out memory
5125 
5126     flags |= RC_NARROW_MEM; // narrow in memory
5127   }
5128 
5129   // Call it.  Note that the length argument is not scaled.
5130   make_runtime_call(flags,
5131                     OptoRuntime::make_setmemory_Type(),
5132                     StubRoutines::unsafe_setmemory(),
5133                     "unsafe_setmemory",
5134                     dst_type,
5135                     dst_addr, size XTOP, byte);
5136 
5137   store_to_memory(control(), doing_unsafe_access_addr, intcon(0), doing_unsafe_access_bt, Compile::AliasIdxRaw, MemNode::unordered);
5138 
5139   return true;
5140 }
5141 
5142 #undef XTOP
5143 














5144 //------------------------clone_coping-----------------------------------
5145 // Helper function for inline_native_clone.
5146 void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array) {
5147   assert(obj_size != nullptr, "");
5148   Node* raw_obj = alloc_obj->in(1);
5149   assert(alloc_obj->is_CheckCastPP() && raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
5150 
5151   AllocateNode* alloc = nullptr;
5152   if (ReduceBulkZeroing &&
5153       // If we are implementing an array clone without knowing its source type
5154       // (can happen when compiling the array-guarded branch of a reflective
5155       // Object.clone() invocation), initialize the array within the allocation.
5156       // This is needed because some GCs (e.g. ZGC) might fall back in this case
5157       // to a runtime clone call that assumes fully initialized source arrays.
5158       (!is_array || obj->get_ptr_type()->isa_aryptr() != nullptr)) {
5159     // We will be completely responsible for initializing this object -
5160     // mark Initialize node as complete.
5161     alloc = AllocateNode::Ideal_allocation(alloc_obj);
5162     // The object was just allocated - there should be no any stores!
5163     guarantee(alloc != nullptr && alloc->maybe_set_complete(&_gvn), "");

5194 //  not cloneable or finalizer => slow path to out-of-line Object.clone
5195 //
5196 // The general case has two steps, allocation and copying.
5197 // Allocation has two cases, and uses GraphKit::new_instance or new_array.
5198 //
5199 // Copying also has two cases, oop arrays and everything else.
5200 // Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
5201 // Everything else uses the tight inline loop supplied by CopyArrayNode.
5202 //
5203 // These steps fold up nicely if and when the cloned object's klass
5204 // can be sharply typed as an object array, a type array, or an instance.
5205 //
5206 bool LibraryCallKit::inline_native_clone(bool is_virtual) {
5207   PhiNode* result_val;
5208 
5209   // Set the reexecute bit for the interpreter to reexecute
5210   // the bytecode that invokes Object.clone if deoptimization happens.
5211   { PreserveReexecuteState preexecs(this);
5212     jvms()->set_should_reexecute(true);
5213 
5214     Node* obj = null_check_receiver();

5215     if (stopped())  return true;
5216 
5217     const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();






5218 
5219     // If we are going to clone an instance, we need its exact type to
5220     // know the number and types of fields to convert the clone to
5221     // loads/stores. Maybe a speculative type can help us.
5222     if (!obj_type->klass_is_exact() &&
5223         obj_type->speculative_type() != nullptr &&
5224         obj_type->speculative_type()->is_instance_klass()) {

5225       ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass();
5226       if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem &&
5227           !spec_ik->has_injected_fields()) {
5228         if (!obj_type->isa_instptr() ||
5229             obj_type->is_instptr()->instance_klass()->has_subklass()) {
5230           obj = maybe_cast_profiled_obj(obj, obj_type->speculative_type(), false);
5231         }
5232       }
5233     }
5234 
5235     // Conservatively insert a memory barrier on all memory slices.
5236     // Do not let writes into the original float below the clone.
5237     insert_mem_bar(Op_MemBarCPUOrder);
5238 
5239     // paths into result_reg:
5240     enum {
5241       _slow_path = 1,     // out-of-line call to clone method (virtual or not)
5242       _objArray_path,     // plain array allocation, plus arrayof_oop_arraycopy
5243       _array_path,        // plain array allocation, plus arrayof_long_arraycopy
5244       _instance_path,     // plain instance allocation, plus arrayof_long_arraycopy
5245       PATH_LIMIT
5246     };
5247     RegionNode* result_reg = new RegionNode(PATH_LIMIT);
5248     result_val             = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
5249     PhiNode*    result_i_o = new PhiNode(result_reg, Type::ABIO);
5250     PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
5251     record_for_igvn(result_reg);
5252 
5253     Node* obj_klass = load_object_klass(obj);





5254     Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)nullptr);
5255     if (array_ctl != nullptr) {
5256       // It's an array.
5257       PreserveJVMState pjvms(this);
5258       set_control(array_ctl);
5259       Node* obj_length = load_array_length(obj);
5260       Node* array_size = nullptr; // Size of the array without object alignment padding.
5261       Node* alloc_obj = new_array(obj_klass, obj_length, 0, &array_size, /*deoptimize_on_exception=*/true);
5262 
5263       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
5264       if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Parsing)) {
5265         // If it is an oop array, it requires very special treatment,
5266         // because gc barriers are required when accessing the array.
5267         Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)nullptr);
5268         if (is_obja != nullptr) {
5269           PreserveJVMState pjvms2(this);
5270           set_control(is_obja);
5271           // Generate a direct call to the right arraycopy function(s).
5272           // Clones are always tightly coupled.
5273           ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, true, false);
5274           ac->set_clone_oop_array();
5275           Node* n = _gvn.transform(ac);
5276           assert(n == ac, "cannot disappear");
5277           ac->connect_outputs(this, /*deoptimize_on_exception=*/true);
5278 
5279           result_reg->init_req(_objArray_path, control());
5280           result_val->init_req(_objArray_path, alloc_obj);
5281           result_i_o ->set_req(_objArray_path, i_o());
5282           result_mem ->set_req(_objArray_path, reset_memory());
5283         }
5284       }
5285       // Otherwise, there are no barriers to worry about.
5286       // (We can dispense with card marks if we know the allocation
5287       //  comes out of eden (TLAB)...  In fact, ReduceInitialCardMarks
5288       //  causes the non-eden paths to take compensating steps to
5289       //  simulate a fresh allocation, so that no further
5290       //  card marks are required in compiled code to initialize
5291       //  the object.)
5292 
5293       if (!stopped()) {
5294         copy_to_clone(obj, alloc_obj, array_size, true);
5295 
5296         // Present the results of the copy.
5297         result_reg->init_req(_array_path, control());
5298         result_val->init_req(_array_path, alloc_obj);
5299         result_i_o ->set_req(_array_path, i_o());
5300         result_mem ->set_req(_array_path, reset_memory());




































5301       }
5302     }
5303 
5304     // We only go to the instance fast case code if we pass a number of guards.
5305     // The paths which do not pass are accumulated in the slow_region.
5306     RegionNode* slow_region = new RegionNode(1);
5307     record_for_igvn(slow_region);
5308     if (!stopped()) {
5309       // It's an instance (we did array above).  Make the slow-path tests.
5310       // If this is a virtual call, we generate a funny guard.  We grab
5311       // the vtable entry corresponding to clone() from the target object.
5312       // If the target method which we are calling happens to be the
5313       // Object clone() method, we pass the guard.  We do not need this
5314       // guard for non-virtual calls; the caller is known to be the native
5315       // Object clone().
5316       if (is_virtual) {
5317         generate_virtual_guard(obj_klass, slow_region);
5318       }
5319 
5320       // The object must be easily cloneable and must not have a finalizer.
5321       // Both of these conditions may be checked in a single test.
5322       // We could optimize the test further, but we don't care.
5323       generate_misc_flags_guard(obj_klass,
5324                                 // Test both conditions:
5325                                 KlassFlags::_misc_is_cloneable_fast | KlassFlags::_misc_has_finalizer,
5326                                 // Must be cloneable but not finalizer:
5327                                 KlassFlags::_misc_is_cloneable_fast,

5419         set_jvms(sfpt->jvms());
5420         _reexecute_sp = jvms()->sp();
5421 
5422         return saved_jvms;
5423       }
5424     }
5425   }
5426   return nullptr;
5427 }
5428 
5429 // Clone the JVMState of the array allocation and create a new safepoint with it. Re-push the array length to the stack
5430 // such that uncommon traps can be emitted to re-execute the array allocation in the interpreter.
5431 SafePointNode* LibraryCallKit::create_safepoint_with_state_before_array_allocation(const AllocateArrayNode* alloc) const {
5432   JVMState* old_jvms = alloc->jvms()->clone_shallow(C);
5433   uint size = alloc->req();
5434   SafePointNode* sfpt = new SafePointNode(size, old_jvms);
5435   old_jvms->set_map(sfpt);
5436   for (uint i = 0; i < size; i++) {
5437     sfpt->init_req(i, alloc->in(i));
5438   }












5439   // re-push array length for deoptimization
5440   sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp(), alloc->in(AllocateNode::ALength));
5441   old_jvms->set_sp(old_jvms->sp()+1);
5442   old_jvms->set_monoff(old_jvms->monoff()+1);
5443   old_jvms->set_scloff(old_jvms->scloff()+1);
5444   old_jvms->set_endoff(old_jvms->endoff()+1);
5445   old_jvms->set_should_reexecute(true);
5446 
5447   sfpt->set_i_o(map()->i_o());
5448   sfpt->set_memory(map()->memory());
5449   sfpt->set_control(map()->control());
5450   return sfpt;
5451 }
5452 
5453 // In case of a deoptimization, we restart execution at the
5454 // allocation, allocating a new array. We would leave an uninitialized
5455 // array in the heap that GCs wouldn't expect. Move the allocation
5456 // after the traps so we don't allocate the array if we
5457 // deoptimize. This is possible because tightly_coupled_allocation()
5458 // guarantees there's no observer of the allocated array at this point
5459 // and the control flow is simple enough.
5460 void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms_before_guards,
5461                                                     int saved_reexecute_sp, uint new_idx) {
5462   if (saved_jvms_before_guards != nullptr && !stopped()) {
5463     replace_unrelated_uncommon_traps_with_alloc_state(alloc, saved_jvms_before_guards);
5464 
5465     assert(alloc != nullptr, "only with a tightly coupled allocation");
5466     // restore JVM state to the state at the arraycopy
5467     saved_jvms_before_guards->map()->set_control(map()->control());
5468     assert(saved_jvms_before_guards->map()->memory() == map()->memory(), "memory state changed?");
5469     assert(saved_jvms_before_guards->map()->i_o() == map()->i_o(), "IO state changed?");
5470     // If we've improved the types of some nodes (null check) while
5471     // emitting the guards, propagate them to the current state
5472     map()->replaced_nodes().apply(saved_jvms_before_guards->map(), new_idx);
5473     set_jvms(saved_jvms_before_guards);
5474     _reexecute_sp = saved_reexecute_sp;
5475 
5476     // Remove the allocation from above the guards
5477     CallProjections callprojs;
5478     alloc->extract_projections(&callprojs, true);
5479     InitializeNode* init = alloc->initialization();
5480     Node* alloc_mem = alloc->in(TypeFunc::Memory);
5481     C->gvn_replace_by(callprojs.fallthrough_ioproj, alloc->in(TypeFunc::I_O));
5482     C->gvn_replace_by(init->proj_out(TypeFunc::Memory), alloc_mem);
5483 
5484     // The CastIINode created in GraphKit::new_array (in AllocateArrayNode::make_ideal_length) must stay below
5485     // the allocation (i.e. is only valid if the allocation succeeds):
5486     // 1) replace CastIINode with AllocateArrayNode's length here
5487     // 2) Create CastIINode again once allocation has moved (see below) at the end of this method
5488     //
5489     // Multiple identical CastIINodes might exist here. Each GraphKit::load_array_length() call will generate
5490     // new separate CastIINode (arraycopy guard checks or any array length use between array allocation and ararycopy)
5491     Node* init_control = init->proj_out(TypeFunc::Control);
5492     Node* alloc_length = alloc->Ideal_length();
5493 #ifdef ASSERT
5494     Node* prev_cast = nullptr;
5495 #endif
5496     for (uint i = 0; i < init_control->outcnt(); i++) {
5497       Node* init_out = init_control->raw_out(i);
5498       if (init_out->is_CastII() && init_out->in(TypeFunc::Control) == init_control && init_out->in(1) == alloc_length) {
5499 #ifdef ASSERT
5500         if (prev_cast == nullptr) {
5501           prev_cast = init_out;

5503           if (prev_cast->cmp(*init_out) == false) {
5504             prev_cast->dump();
5505             init_out->dump();
5506             assert(false, "not equal CastIINode");
5507           }
5508         }
5509 #endif
5510         C->gvn_replace_by(init_out, alloc_length);
5511       }
5512     }
5513     C->gvn_replace_by(init->proj_out(TypeFunc::Control), alloc->in(0));
5514 
5515     // move the allocation here (after the guards)
5516     _gvn.hash_delete(alloc);
5517     alloc->set_req(TypeFunc::Control, control());
5518     alloc->set_req(TypeFunc::I_O, i_o());
5519     Node *mem = reset_memory();
5520     set_all_memory(mem);
5521     alloc->set_req(TypeFunc::Memory, mem);
5522     set_control(init->proj_out_or_null(TypeFunc::Control));
5523     set_i_o(callprojs.fallthrough_ioproj);
5524 
5525     // Update memory as done in GraphKit::set_output_for_allocation()
5526     const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
5527     const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
5528     if (ary_type->isa_aryptr() && length_type != nullptr) {
5529       ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
5530     }
5531     const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot);
5532     int            elemidx  = C->get_alias_index(telemref);
5533     set_memory(init->proj_out_or_null(TypeFunc::Memory), Compile::AliasIdxRaw);
5534     set_memory(init->proj_out_or_null(TypeFunc::Memory), elemidx);
5535 
5536     Node* allocx = _gvn.transform(alloc);
5537     assert(allocx == alloc, "where has the allocation gone?");
5538     assert(dest->is_CheckCastPP(), "not an allocation result?");
5539 
5540     _gvn.hash_delete(dest);
5541     dest->set_req(0, control());
5542     Node* destx = _gvn.transform(dest);
5543     assert(destx == dest, "where has the allocation result gone?");

5841         top_src  = src_type->isa_aryptr();
5842         has_src = (top_src != nullptr && top_src->elem() != Type::BOTTOM);
5843         src_spec = true;
5844       }
5845       if (!has_dest) {
5846         dest = maybe_cast_profiled_obj(dest, dest_k, true);
5847         dest_type  = _gvn.type(dest);
5848         top_dest  = dest_type->isa_aryptr();
5849         has_dest = (top_dest != nullptr && top_dest->elem() != Type::BOTTOM);
5850         dest_spec = true;
5851       }
5852     }
5853   }
5854 
5855   if (has_src && has_dest && can_emit_guards) {
5856     BasicType src_elem = top_src->isa_aryptr()->elem()->array_element_basic_type();
5857     BasicType dest_elem = top_dest->isa_aryptr()->elem()->array_element_basic_type();
5858     if (is_reference_type(src_elem, true)) src_elem = T_OBJECT;
5859     if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT;
5860 
5861     if (src_elem == dest_elem && src_elem == T_OBJECT) {
5862       // If both arrays are object arrays then having the exact types
5863       // for both will remove the need for a subtype check at runtime
5864       // before the call and may make it possible to pick a faster copy
5865       // routine (without a subtype check on every element)
5866       // Do we have the exact type of src?
5867       bool could_have_src = src_spec;
5868       // Do we have the exact type of dest?
5869       bool could_have_dest = dest_spec;
5870       ciKlass* src_k = nullptr;
5871       ciKlass* dest_k = nullptr;
5872       if (!src_spec) {
5873         src_k = src_type->speculative_type_not_null();
5874         if (src_k != nullptr && src_k->is_array_klass()) {
5875           could_have_src = true;
5876         }
5877       }
5878       if (!dest_spec) {
5879         dest_k = dest_type->speculative_type_not_null();
5880         if (dest_k != nullptr && dest_k->is_array_klass()) {
5881           could_have_dest = true;
5882         }
5883       }
5884       if (could_have_src && could_have_dest) {
5885         // If we can have both exact types, emit the missing guards
5886         if (could_have_src && !src_spec) {
5887           src = maybe_cast_profiled_obj(src, src_k, true);


5888         }
5889         if (could_have_dest && !dest_spec) {
5890           dest = maybe_cast_profiled_obj(dest, dest_k, true);


5891         }
5892       }
5893     }
5894   }
5895 
5896   ciMethod* trap_method = method();
5897   int trap_bci = bci();
5898   if (saved_jvms_before_guards != nullptr) {
5899     trap_method = alloc->jvms()->method();
5900     trap_bci = alloc->jvms()->bci();
5901   }
5902 
5903   bool negative_length_guard_generated = false;
5904 
5905   if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
5906       can_emit_guards &&
5907       !src->is_top() && !dest->is_top()) {
5908     // validate arguments: enables transformation the ArrayCopyNode
5909     validated = true;
5910 
5911     RegionNode* slow_region = new RegionNode(1);
5912     record_for_igvn(slow_region);
5913 
5914     // (1) src and dest are arrays.
5915     generate_non_array_guard(load_object_klass(src), slow_region);
5916     generate_non_array_guard(load_object_klass(dest), slow_region);
5917 
5918     // (2) src and dest arrays must have elements of the same BasicType
5919     // done at macro expansion or at Ideal transformation time
5920 
5921     // (4) src_offset must not be negative.
5922     generate_negative_guard(src_offset, slow_region);
5923 
5924     // (5) dest_offset must not be negative.
5925     generate_negative_guard(dest_offset, slow_region);
5926 
5927     // (7) src_offset + length must not exceed length of src.

5930                          slow_region);
5931 
5932     // (8) dest_offset + length must not exceed length of dest.
5933     generate_limit_guard(dest_offset, length,
5934                          load_array_length(dest),
5935                          slow_region);
5936 
5937     // (6) length must not be negative.
5938     // This is also checked in generate_arraycopy() during macro expansion, but
5939     // we also have to check it here for the case where the ArrayCopyNode will
5940     // be eliminated by Escape Analysis.
5941     if (EliminateAllocations) {
5942       generate_negative_guard(length, slow_region);
5943       negative_length_guard_generated = true;
5944     }
5945 
5946     // (9) each element of an oop array must be assignable
5947     Node* dest_klass = load_object_klass(dest);
5948     if (src != dest) {
5949       Node* not_subtype_ctrl = gen_subtype_check(src, dest_klass);


5950 
5951       if (not_subtype_ctrl != top()) {
5952         PreserveJVMState pjvms(this);
5953         set_control(not_subtype_ctrl);
5954         uncommon_trap(Deoptimization::Reason_intrinsic,
5955                       Deoptimization::Action_make_not_entrant);
5956         assert(stopped(), "Should be stopped");






















5957       }
5958     }

5959     {
5960       PreserveJVMState pjvms(this);
5961       set_control(_gvn.transform(slow_region));
5962       uncommon_trap(Deoptimization::Reason_intrinsic,
5963                     Deoptimization::Action_make_not_entrant);
5964       assert(stopped(), "Should be stopped");
5965     }
5966 
5967     const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr();
5968     const Type *toop = dest_klass_t->cast_to_exactness(false)->as_instance_type();
5969     src = _gvn.transform(new CheckCastPPNode(control(), src, toop));
5970     arraycopy_move_allocation_here(alloc, dest, saved_jvms_before_guards, saved_reexecute_sp, new_idx);
5971   }
5972 
5973   if (stopped()) {
5974     return true;
5975   }
5976 
5977   ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != nullptr, negative_length_guard_generated,
5978                                           // Create LoadRange and LoadKlass nodes for use during macro expansion here
5979                                           // so the compiler has a chance to eliminate them: during macro expansion,
5980                                           // we have to set their control (CastPP nodes are eliminated).
5981                                           load_object_klass(src), load_object_klass(dest),
5982                                           load_array_length(src), load_array_length(dest));
5983 
5984   ac->set_arraycopy(validated);
5985 
5986   Node* n = _gvn.transform(ac);
5987   if (n == ac) {
5988     ac->connect_outputs(this);
5989   } else {

   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "ci/ciFlatArrayKlass.hpp"
  28 #include "ci/ciUtilities.inline.hpp"
  29 #include "classfile/vmIntrinsics.hpp"
  30 #include "compiler/compileBroker.hpp"
  31 #include "compiler/compileLog.hpp"
  32 #include "gc/shared/barrierSet.hpp"
  33 #include "jfr/support/jfrIntrinsics.hpp"
  34 #include "memory/resourceArea.hpp"
  35 #include "oops/klass.inline.hpp"
  36 #include "oops/objArrayKlass.hpp"
  37 #include "opto/addnode.hpp"
  38 #include "opto/arraycopynode.hpp"
  39 #include "opto/c2compiler.hpp"
  40 #include "opto/castnode.hpp"
  41 #include "opto/cfgnode.hpp"
  42 #include "opto/convertnode.hpp"
  43 #include "opto/countbitsnode.hpp"
  44 #include "opto/idealKit.hpp"
  45 #include "opto/library_call.hpp"
  46 #include "opto/mathexactnode.hpp"
  47 #include "opto/mulnode.hpp"

 309   case vmIntrinsics::_indexOfIL:                return inline_string_indexOfI(StrIntrinsicNode::LL);
 310   case vmIntrinsics::_indexOfIU:                return inline_string_indexOfI(StrIntrinsicNode::UU);
 311   case vmIntrinsics::_indexOfIUL:               return inline_string_indexOfI(StrIntrinsicNode::UL);
 312   case vmIntrinsics::_indexOfU_char:            return inline_string_indexOfChar(StrIntrinsicNode::U);
 313   case vmIntrinsics::_indexOfL_char:            return inline_string_indexOfChar(StrIntrinsicNode::L);
 314 
 315   case vmIntrinsics::_equalsL:                  return inline_string_equals(StrIntrinsicNode::LL);
 316 
 317   case vmIntrinsics::_vectorizedHashCode:       return inline_vectorizedHashCode();
 318 
 319   case vmIntrinsics::_toBytesStringU:           return inline_string_toBytesU();
 320   case vmIntrinsics::_getCharsStringU:          return inline_string_getCharsU();
 321   case vmIntrinsics::_getCharStringU:           return inline_string_char_access(!is_store);
 322   case vmIntrinsics::_putCharStringU:           return inline_string_char_access( is_store);
 323 
 324   case vmIntrinsics::_compressStringC:
 325   case vmIntrinsics::_compressStringB:          return inline_string_copy( is_compress);
 326   case vmIntrinsics::_inflateStringC:
 327   case vmIntrinsics::_inflateStringB:           return inline_string_copy(!is_compress);
 328 
 329   case vmIntrinsics::_makePrivateBuffer:        return inline_unsafe_make_private_buffer();
 330   case vmIntrinsics::_finishPrivateBuffer:      return inline_unsafe_finish_private_buffer();
 331   case vmIntrinsics::_getReference:             return inline_unsafe_access(!is_store, T_OBJECT,   Relaxed, false);
 332   case vmIntrinsics::_getBoolean:               return inline_unsafe_access(!is_store, T_BOOLEAN,  Relaxed, false);
 333   case vmIntrinsics::_getByte:                  return inline_unsafe_access(!is_store, T_BYTE,     Relaxed, false);
 334   case vmIntrinsics::_getShort:                 return inline_unsafe_access(!is_store, T_SHORT,    Relaxed, false);
 335   case vmIntrinsics::_getChar:                  return inline_unsafe_access(!is_store, T_CHAR,     Relaxed, false);
 336   case vmIntrinsics::_getInt:                   return inline_unsafe_access(!is_store, T_INT,      Relaxed, false);
 337   case vmIntrinsics::_getLong:                  return inline_unsafe_access(!is_store, T_LONG,     Relaxed, false);
 338   case vmIntrinsics::_getFloat:                 return inline_unsafe_access(!is_store, T_FLOAT,    Relaxed, false);
 339   case vmIntrinsics::_getDouble:                return inline_unsafe_access(!is_store, T_DOUBLE,   Relaxed, false);
 340   case vmIntrinsics::_getValue:                 return inline_unsafe_access(!is_store, T_OBJECT,   Relaxed, false, true);
 341 
 342   case vmIntrinsics::_putReference:             return inline_unsafe_access( is_store, T_OBJECT,   Relaxed, false);
 343   case vmIntrinsics::_putBoolean:               return inline_unsafe_access( is_store, T_BOOLEAN,  Relaxed, false);
 344   case vmIntrinsics::_putByte:                  return inline_unsafe_access( is_store, T_BYTE,     Relaxed, false);
 345   case vmIntrinsics::_putShort:                 return inline_unsafe_access( is_store, T_SHORT,    Relaxed, false);
 346   case vmIntrinsics::_putChar:                  return inline_unsafe_access( is_store, T_CHAR,     Relaxed, false);
 347   case vmIntrinsics::_putInt:                   return inline_unsafe_access( is_store, T_INT,      Relaxed, false);
 348   case vmIntrinsics::_putLong:                  return inline_unsafe_access( is_store, T_LONG,     Relaxed, false);
 349   case vmIntrinsics::_putFloat:                 return inline_unsafe_access( is_store, T_FLOAT,    Relaxed, false);
 350   case vmIntrinsics::_putDouble:                return inline_unsafe_access( is_store, T_DOUBLE,   Relaxed, false);
 351   case vmIntrinsics::_putValue:                 return inline_unsafe_access( is_store, T_OBJECT,   Relaxed, false, true);
 352 
 353   case vmIntrinsics::_getReferenceVolatile:     return inline_unsafe_access(!is_store, T_OBJECT,   Volatile, false);
 354   case vmIntrinsics::_getBooleanVolatile:       return inline_unsafe_access(!is_store, T_BOOLEAN,  Volatile, false);
 355   case vmIntrinsics::_getByteVolatile:          return inline_unsafe_access(!is_store, T_BYTE,     Volatile, false);
 356   case vmIntrinsics::_getShortVolatile:         return inline_unsafe_access(!is_store, T_SHORT,    Volatile, false);
 357   case vmIntrinsics::_getCharVolatile:          return inline_unsafe_access(!is_store, T_CHAR,     Volatile, false);
 358   case vmIntrinsics::_getIntVolatile:           return inline_unsafe_access(!is_store, T_INT,      Volatile, false);
 359   case vmIntrinsics::_getLongVolatile:          return inline_unsafe_access(!is_store, T_LONG,     Volatile, false);
 360   case vmIntrinsics::_getFloatVolatile:         return inline_unsafe_access(!is_store, T_FLOAT,    Volatile, false);
 361   case vmIntrinsics::_getDoubleVolatile:        return inline_unsafe_access(!is_store, T_DOUBLE,   Volatile, false);
 362 
 363   case vmIntrinsics::_putReferenceVolatile:     return inline_unsafe_access( is_store, T_OBJECT,   Volatile, false);
 364   case vmIntrinsics::_putBooleanVolatile:       return inline_unsafe_access( is_store, T_BOOLEAN,  Volatile, false);
 365   case vmIntrinsics::_putByteVolatile:          return inline_unsafe_access( is_store, T_BYTE,     Volatile, false);
 366   case vmIntrinsics::_putShortVolatile:         return inline_unsafe_access( is_store, T_SHORT,    Volatile, false);
 367   case vmIntrinsics::_putCharVolatile:          return inline_unsafe_access( is_store, T_CHAR,     Volatile, false);
 368   case vmIntrinsics::_putIntVolatile:           return inline_unsafe_access( is_store, T_INT,      Volatile, false);
 369   case vmIntrinsics::_putLongVolatile:          return inline_unsafe_access( is_store, T_LONG,     Volatile, false);
 370   case vmIntrinsics::_putFloatVolatile:         return inline_unsafe_access( is_store, T_FLOAT,    Volatile, false);
 371   case vmIntrinsics::_putDoubleVolatile:        return inline_unsafe_access( is_store, T_DOUBLE,   Volatile, false);

 498                                                                                          "notifyJvmtiEnd", false, true);
 499   case vmIntrinsics::_notifyJvmtiVThreadMount:   return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_mount()),
 500                                                                                          "notifyJvmtiMount", false, false);
 501   case vmIntrinsics::_notifyJvmtiVThreadUnmount: return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_unmount()),
 502                                                                                          "notifyJvmtiUnmount", false, false);
 503   case vmIntrinsics::_notifyJvmtiVThreadDisableSuspend: return inline_native_notify_jvmti_sync();
 504 #endif
 505 
 506 #ifdef JFR_HAVE_INTRINSICS
 507   case vmIntrinsics::_counterTime:              return inline_native_time_funcs(CAST_FROM_FN_PTR(address, JfrTime::time_function()), "counterTime");
 508   case vmIntrinsics::_getEventWriter:           return inline_native_getEventWriter();
 509   case vmIntrinsics::_jvm_commit:               return inline_native_jvm_commit();
 510 #endif
 511   case vmIntrinsics::_currentTimeMillis:        return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
 512   case vmIntrinsics::_nanoTime:                 return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
 513   case vmIntrinsics::_writeback0:               return inline_unsafe_writeback0();
 514   case vmIntrinsics::_writebackPreSync0:        return inline_unsafe_writebackSync0(true);
 515   case vmIntrinsics::_writebackPostSync0:       return inline_unsafe_writebackSync0(false);
 516   case vmIntrinsics::_allocateInstance:         return inline_unsafe_allocate();
 517   case vmIntrinsics::_copyMemory:               return inline_unsafe_copyMemory();
 518   case vmIntrinsics::_isFlatArray:              return inline_unsafe_isFlatArray();
 519   case vmIntrinsics::_setMemory:                return inline_unsafe_setMemory();
 520   case vmIntrinsics::_getLength:                return inline_native_getLength();
 521   case vmIntrinsics::_copyOf:                   return inline_array_copyOf(false);
 522   case vmIntrinsics::_copyOfRange:              return inline_array_copyOf(true);
 523   case vmIntrinsics::_equalsB:                  return inline_array_equals(StrIntrinsicNode::LL);
 524   case vmIntrinsics::_equalsC:                  return inline_array_equals(StrIntrinsicNode::UU);
 525   case vmIntrinsics::_Preconditions_checkIndex: return inline_preconditions_checkIndex(T_INT);
 526   case vmIntrinsics::_Preconditions_checkLongIndex: return inline_preconditions_checkIndex(T_LONG);
 527   case vmIntrinsics::_clone:                    return inline_native_clone(intrinsic()->is_virtual());
 528 
 529   case vmIntrinsics::_allocateUninitializedArray: return inline_unsafe_newArray(true);
 530   case vmIntrinsics::_newArray:                   return inline_unsafe_newArray(false);
 531   case vmIntrinsics::_newNullRestrictedArray:   return inline_newNullRestrictedArray();
 532 
 533   case vmIntrinsics::_isAssignableFrom:         return inline_native_subtype_check();
 534 
 535   case vmIntrinsics::_isInstance:
 536   case vmIntrinsics::_getModifiers:
 537   case vmIntrinsics::_isInterface:
 538   case vmIntrinsics::_isArray:
 539   case vmIntrinsics::_isPrimitive:
 540   case vmIntrinsics::_isHidden:
 541   case vmIntrinsics::_getSuperclass:
 542   case vmIntrinsics::_getClassAccessFlags:      return inline_native_Class_query(intrinsic_id());
 543 
 544   case vmIntrinsics::_floatToRawIntBits:
 545   case vmIntrinsics::_floatToIntBits:
 546   case vmIntrinsics::_intBitsToFloat:
 547   case vmIntrinsics::_doubleToRawLongBits:
 548   case vmIntrinsics::_doubleToLongBits:
 549   case vmIntrinsics::_longBitsToDouble:
 550   case vmIntrinsics::_floatToFloat16:
 551   case vmIntrinsics::_float16ToFloat:           return inline_fp_conversions(intrinsic_id());

2243     case vmIntrinsics::_remainderUnsigned_l: {
2244       zero_check_long(argument(2));
2245       // Compile-time detect of null-exception
2246       if (stopped()) {
2247         return true; // keep the graph constructed so far
2248       }
2249       n = new UModLNode(control(), argument(0), argument(2));
2250       break;
2251     }
2252     default:  fatal_unexpected_iid(id);  break;
2253   }
2254   set_result(_gvn.transform(n));
2255   return true;
2256 }
2257 
2258 //----------------------------inline_unsafe_access----------------------------
2259 
2260 const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type) {
2261   // Attempt to infer a sharper value type from the offset and base type.
2262   ciKlass* sharpened_klass = nullptr;
2263   bool null_free = false;
2264 
2265   // See if it is an instance field, with an object type.
2266   if (alias_type->field() != nullptr) {
2267     if (alias_type->field()->type()->is_klass()) {
2268       sharpened_klass = alias_type->field()->type()->as_klass();
2269       null_free = alias_type->field()->is_null_free();
2270     }
2271   }
2272 
2273   const TypeOopPtr* result = nullptr;
2274   // See if it is a narrow oop array.
2275   if (adr_type->isa_aryptr()) {
2276     if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
2277       const TypeOopPtr* elem_type = adr_type->is_aryptr()->elem()->make_oopptr();
2278       null_free = adr_type->is_aryptr()->is_null_free();
2279       if (elem_type != nullptr && elem_type->is_loaded()) {
2280         // Sharpen the value type.
2281         result = elem_type;
2282       }
2283     }
2284   }
2285 
2286   // The sharpened class might be unloaded if there is no class loader
2287   // contraint in place.
2288   if (result == nullptr && sharpened_klass != nullptr && sharpened_klass->is_loaded()) {
2289     // Sharpen the value type.
2290     result = TypeOopPtr::make_from_klass(sharpened_klass);
2291     if (null_free) {
2292       result = result->join_speculative(TypePtr::NOTNULL)->is_oopptr();
2293     }
2294   }
2295   if (result != nullptr) {
2296 #ifndef PRODUCT
2297     if (C->print_intrinsics() || C->print_inlining()) {
2298       tty->print("  from base type:  ");  adr_type->dump(); tty->cr();
2299       tty->print("  sharpened value: ");  result->dump();    tty->cr();
2300     }
2301 #endif
2302   }
2303   return result;
2304 }
2305 
2306 DecoratorSet LibraryCallKit::mo_decorator_for_access_kind(AccessKind kind) {
2307   switch (kind) {
2308       case Relaxed:
2309         return MO_UNORDERED;
2310       case Opaque:
2311         return MO_RELAXED;
2312       case Acquire:
2313         return MO_ACQUIRE;
2314       case Release:
2315         return MO_RELEASE;
2316       case Volatile:
2317         return MO_SEQ_CST;
2318       default:
2319         ShouldNotReachHere();
2320         return 0;
2321   }
2322 }
2323 
2324 bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, const AccessKind kind, const bool unaligned, const bool is_flat) {
2325   if (callee()->is_static())  return false;  // caller must have the capability!
2326   DecoratorSet decorators = C2_UNSAFE_ACCESS;
2327   guarantee(!is_store || kind != Acquire, "Acquire accesses can be produced only for loads");
2328   guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
2329   assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
2330 
2331   if (is_reference_type(type)) {
2332     decorators |= ON_UNKNOWN_OOP_REF;
2333   }
2334 
2335   if (unaligned) {
2336     decorators |= C2_UNALIGNED;
2337   }
2338 
2339 #ifndef PRODUCT
2340   {
2341     ResourceMark rm;
2342     // Check the signatures.
2343     ciSignature* sig = callee()->signature();
2344 #ifdef ASSERT
2345     if (!is_store) {
2346       // Object getReference(Object base, int/long offset), etc.
2347       BasicType rtype = sig->return_type()->basic_type();
2348       assert(rtype == type, "getter must return the expected value");
2349       assert(sig->count() == 2 || (is_flat && sig->count() == 3), "oop getter has 2 or 3 arguments");
2350       assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2351       assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2352     } else {
2353       // void putReference(Object base, int/long offset, Object x), etc.
2354       assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
2355       assert(sig->count() == 3 || (is_flat && sig->count() == 4), "oop putter has 3 arguments");
2356       assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
2357       assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
2358       BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
2359       assert(vtype == type, "putter must accept the expected value");
2360     }
2361 #endif // ASSERT
2362  }
2363 #endif //PRODUCT
2364 
2365   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
2366 
2367   Node* receiver = argument(0);  // type: oop
2368 
2369   // Build address expression.
2370   Node* heap_base_oop = top();
2371 
2372   // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2373   Node* base = argument(1);  // type: oop
2374   // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2375   Node* offset = argument(2);  // type: long
2376   // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2377   // to be plain byte offsets, which are also the same as those accepted
2378   // by oopDesc::field_addr.
2379   assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2380          "fieldOffset must be byte-scaled");
2381 
2382   ciInlineKlass* inline_klass = nullptr;
2383   if (is_flat) {
2384     const TypeInstPtr* cls = _gvn.type(argument(4))->isa_instptr();
2385     if (cls == nullptr || cls->const_oop() == nullptr) {
2386       return false;
2387     }
2388     ciType* mirror_type = cls->const_oop()->as_instance()->java_mirror_type();
2389     if (!mirror_type->is_inlinetype()) {
2390       return false;
2391     }
2392     inline_klass = mirror_type->as_inline_klass();
2393   }
2394 
2395   if (base->is_InlineType()) {
2396     InlineTypeNode* vt = base->as_InlineType();
2397     if (is_store) {
2398       if (!vt->is_allocated(&_gvn)) {
2399         return false;
2400       }
2401       base = vt->get_oop();
2402     } else {
2403       if (offset->is_Con()) {
2404         long off = find_long_con(offset, 0);
2405         ciInlineKlass* vk = vt->type()->inline_klass();
2406         if ((long)(int)off != off || !vk->contains_field_offset(off)) {
2407           return false;
2408         }
2409 
2410         ciField* field = vk->get_non_flat_field_by_offset(off);
2411         if (field != nullptr) {
2412           BasicType bt = type2field[field->type()->basic_type()];
2413           if (bt == T_ARRAY || bt == T_NARROWOOP) {
2414             bt = T_OBJECT;
2415           }
2416           if (bt == type && (!field->is_flat() || field->type() == inline_klass)) {
2417             Node* value = vt->field_value_by_offset(off, false);
2418             if (value->is_InlineType()) {
2419               value = value->as_InlineType()->adjust_scalarization_depth(this);
2420             }
2421             set_result(value);
2422             return true;
2423           }
2424         }
2425       }
2426       {
2427         // Re-execute the unsafe access if allocation triggers deoptimization.
2428         PreserveReexecuteState preexecs(this);
2429         jvms()->set_should_reexecute(true);
2430         vt = vt->buffer(this);
2431       }
2432       base = vt->get_oop();
2433     }
2434   }
2435 
2436   // 32-bit machines ignore the high half!
2437   offset = ConvL2X(offset);
2438 
2439   // Save state and restore on bailout
2440   uint old_sp = sp();
2441   SafePointNode* old_map = clone_map();
2442 
2443   Node* adr = make_unsafe_address(base, offset, type, kind == Relaxed);
2444   assert(!stopped(), "Inlining of unsafe access failed: address construction stopped unexpectedly");
2445 
2446   if (_gvn.type(base->uncast())->isa_ptr() == TypePtr::NULL_PTR) {
2447     if (type != T_OBJECT && (inline_klass == nullptr || !inline_klass->has_object_fields())) {
2448       decorators |= IN_NATIVE; // off-heap primitive access
2449     } else {
2450       set_map(old_map);
2451       set_sp(old_sp);
2452       return false; // off-heap oop accesses are not supported
2453     }
2454   } else {
2455     heap_base_oop = base; // on-heap or mixed access
2456   }
2457 
2458   // Can base be null? Otherwise, always on-heap access.
2459   bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(base));
2460 
2461   if (!can_access_non_heap) {
2462     decorators |= IN_HEAP;
2463   }
2464 
2465   Node* val = is_store ? argument(4 + (is_flat ? 1 : 0)) : nullptr;
2466 
2467   const TypePtr* adr_type = _gvn.type(adr)->isa_ptr();
2468   if (adr_type == TypePtr::NULL_PTR) {
2469     set_map(old_map);
2470     set_sp(old_sp);
2471     return false; // off-heap access with zero address
2472   }
2473 
2474   // Try to categorize the address.
2475   Compile::AliasType* alias_type = C->alias_type(adr_type);
2476   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2477 
2478   if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2479       alias_type->adr_type() == TypeAryPtr::RANGE) {
2480     set_map(old_map);
2481     set_sp(old_sp);
2482     return false; // not supported
2483   }
2484 
2485   bool mismatched = false;
2486   BasicType bt = T_ILLEGAL;
2487   ciField* field = nullptr;
2488   if (adr_type->isa_instptr()) {
2489     const TypeInstPtr* instptr = adr_type->is_instptr();
2490     ciInstanceKlass* k = instptr->instance_klass();
2491     int off = instptr->offset();
2492     if (instptr->const_oop() != nullptr &&
2493         k == ciEnv::current()->Class_klass() &&
2494         instptr->offset() >= (k->size_helper() * wordSize)) {
2495       k = instptr->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
2496       field = k->get_field_by_offset(off, true);
2497     } else {
2498       field = k->get_non_flat_field_by_offset(off);
2499     }
2500     if (field != nullptr) {
2501       bt = type2field[field->type()->basic_type()];
2502     }
2503     assert(bt == alias_type->basic_type() || is_flat, "should match");
2504   } else {
2505     bt = alias_type->basic_type();
2506   }
2507 
2508   if (bt != T_ILLEGAL) {
2509     assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
2510     if (bt == T_BYTE && adr_type->isa_aryptr()) {
2511       // Alias type doesn't differentiate between byte[] and boolean[]).
2512       // Use address type to get the element type.
2513       bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2514     }
2515     if (is_reference_type(bt, true)) {
2516       // accessing an array field with getReference is not a mismatch
2517       bt = T_OBJECT;
2518     }
2519     if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2520       // Don't intrinsify mismatched object accesses
2521       set_map(old_map);
2522       set_sp(old_sp);
2523       return false;
2524     }
2525     mismatched = (bt != type);
2526   } else if (alias_type->adr_type()->isa_oopptr()) {
2527     mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
2528   }
2529 
2530   if (is_flat) {
2531     if (adr_type->isa_instptr()) {
2532       if (field == nullptr || field->type() != inline_klass) {
2533         mismatched = true;
2534       }
2535     } else if (adr_type->isa_aryptr()) {
2536       const Type* elem = adr_type->is_aryptr()->elem();
2537       if (!adr_type->is_flat() || elem->inline_klass() != inline_klass) {
2538         mismatched = true;
2539       }
2540     } else {
2541       mismatched = true;
2542     }
2543     if (is_store) {
2544       const Type* val_t = _gvn.type(val);
2545       if (!val_t->is_inlinetypeptr() || val_t->inline_klass() != inline_klass) {
2546         set_map(old_map);
2547         set_sp(old_sp);
2548         return false;
2549       }
2550     }
2551   }
2552 
2553   destruct_map_clone(old_map);
2554   assert(!mismatched || is_flat || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2555 
2556   if (mismatched) {
2557     decorators |= C2_MISMATCHED;
2558   }
2559 
2560   // First guess at the value type.
2561   const Type *value_type = Type::get_const_basic_type(type);
2562 
2563   // Figure out the memory ordering.
2564   decorators |= mo_decorator_for_access_kind(kind);
2565 
2566   if (!is_store) {
2567     if (type == T_OBJECT && !is_flat) {
2568       const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2569       if (tjp != nullptr) {
2570         value_type = tjp;
2571       }
2572     }
2573   }
2574 
2575   receiver = null_check(receiver);
2576   if (stopped()) {
2577     return true;
2578   }
2579   // Heap pointers get a null-check from the interpreter,
2580   // as a courtesy.  However, this is not guaranteed by Unsafe,
2581   // and it is not possible to fully distinguish unintended nulls
2582   // from intended ones in this API.
2583 
2584   if (!is_store) {
2585     Node* p = nullptr;
2586     // Try to constant fold a load from a constant field
2587 
2588     if (heap_base_oop != top() && field != nullptr && field->is_constant() && !field->is_flat() && !mismatched) {
2589       // final or stable field
2590       p = make_constant_from_field(field, heap_base_oop);
2591     }
2592 
2593     if (p == nullptr) { // Could not constant fold the load
2594       if (is_flat) {
2595         if (adr_type->isa_instptr() && !mismatched) {
2596           ciInstanceKlass* holder = adr_type->is_instptr()->instance_klass();
2597           int offset = adr_type->is_instptr()->offset();
2598           p = InlineTypeNode::make_from_flat(this, inline_klass, base, base, holder, offset, decorators);
2599         } else {
2600           p = InlineTypeNode::make_from_flat(this, inline_klass, base, adr, nullptr, 0, decorators);
2601         }
2602       } else {
2603         p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);
2604         const TypeOopPtr* ptr = value_type->make_oopptr();
2605         if (ptr != nullptr && ptr->is_inlinetypeptr()) {
2606           // Load a non-flattened inline type from memory
2607           p = InlineTypeNode::make_from_oop(this, p, ptr->inline_klass(), !ptr->maybe_null());
2608         }
2609       }
2610       // Normalize the value returned by getBoolean in the following cases
2611       if (type == T_BOOLEAN &&
2612           (mismatched ||
2613            heap_base_oop == top() ||                  // - heap_base_oop is null or
2614            (can_access_non_heap && field == nullptr)) // - heap_base_oop is potentially null
2615                                                       //   and the unsafe access is made to large offset
2616                                                       //   (i.e., larger than the maximum offset necessary for any
2617                                                       //   field access)
2618             ) {
2619           IdealKit ideal = IdealKit(this);
2620 #define __ ideal.
2621           IdealVariable normalized_result(ideal);
2622           __ declarations_done();
2623           __ set(normalized_result, p);
2624           __ if_then(p, BoolTest::ne, ideal.ConI(0));
2625           __ set(normalized_result, ideal.ConI(1));
2626           ideal.end_if();
2627           final_sync(ideal);
2628           p = __ value(normalized_result);
2629 #undef __
2630       }
2631     }
2632     if (type == T_ADDRESS) {
2633       p = gvn().transform(new CastP2XNode(nullptr, p));
2634       p = ConvX2UL(p);
2635     }
2636     // The load node has the control of the preceding MemBarCPUOrder.  All
2637     // following nodes will have the control of the MemBarCPUOrder inserted at
2638     // the end of this method.  So, pushing the load onto the stack at a later
2639     // point is fine.
2640     set_result(p);
2641   } else {
2642     if (bt == T_ADDRESS) {
2643       // Repackage the long as a pointer.
2644       val = ConvL2X(val);
2645       val = gvn().transform(new CastX2PNode(val));
2646     }
2647     if (is_flat) {
2648       if (adr_type->isa_instptr() && !mismatched) {
2649         ciInstanceKlass* holder = adr_type->is_instptr()->instance_klass();
2650         int offset = adr_type->is_instptr()->offset();
2651         val->as_InlineType()->store_flat(this, base, base, holder, offset, decorators);
2652       } else {
2653         val->as_InlineType()->store_flat(this, base, adr, nullptr, 0, decorators);
2654       }
2655     } else {
2656       access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators);
2657     }
2658   }
2659 
2660   if (argument(1)->is_InlineType() && is_store) {
2661     InlineTypeNode* value = InlineTypeNode::make_from_oop(this, base, _gvn.type(argument(1))->inline_klass());
2662     value = value->make_larval(this, false);
2663     replace_in_map(argument(1), value);
2664   }
2665 
2666   return true;
2667 }
2668 
2669 bool LibraryCallKit::inline_unsafe_make_private_buffer() {
2670   Node* receiver = argument(0);
2671   Node* value = argument(1);
2672   if (!value->is_InlineType()) {
2673     return false;
2674   }
2675 
2676   receiver = null_check(receiver);
2677   if (stopped()) {
2678     return true;
2679   }
2680 
2681   set_result(value->as_InlineType()->make_larval(this, true));
2682   return true;
2683 }
2684 
2685 bool LibraryCallKit::inline_unsafe_finish_private_buffer() {
2686   Node* receiver = argument(0);
2687   Node* buffer = argument(1);
2688   if (!buffer->is_InlineType()) {
2689     return false;
2690   }
2691   InlineTypeNode* vt = buffer->as_InlineType();
2692   if (!vt->is_allocated(&_gvn)) {
2693     return false;
2694   }
2695   // TODO 8239003 Why is this needed?
2696   if (AllocateNode::Ideal_allocation(vt->get_oop()) == nullptr) {
2697     return false;
2698   }
2699 
2700   receiver = null_check(receiver);
2701   if (stopped()) {
2702     return true;
2703   }
2704 
2705   set_result(vt->finish_larval(this));
2706   return true;
2707 }
2708 
2709 //----------------------------inline_unsafe_load_store----------------------------
2710 // This method serves a couple of different customers (depending on LoadStoreKind):
2711 //
2712 // LS_cmp_swap:
2713 //
2714 //   boolean compareAndSetReference(Object o, long offset, Object expected, Object x);
2715 //   boolean compareAndSetInt(   Object o, long offset, int    expected, int    x);
2716 //   boolean compareAndSetLong(  Object o, long offset, long   expected, long   x);
2717 //
2718 // LS_cmp_swap_weak:
2719 //
2720 //   boolean weakCompareAndSetReference(       Object o, long offset, Object expected, Object x);
2721 //   boolean weakCompareAndSetReferencePlain(  Object o, long offset, Object expected, Object x);
2722 //   boolean weakCompareAndSetReferenceAcquire(Object o, long offset, Object expected, Object x);
2723 //   boolean weakCompareAndSetReferenceRelease(Object o, long offset, Object expected, Object x);
2724 //
2725 //   boolean weakCompareAndSetInt(          Object o, long offset, int    expected, int    x);
2726 //   boolean weakCompareAndSetIntPlain(     Object o, long offset, int    expected, int    x);
2727 //   boolean weakCompareAndSetIntAcquire(   Object o, long offset, int    expected, int    x);
2728 //   boolean weakCompareAndSetIntRelease(   Object o, long offset, int    expected, int    x);

2894     }
2895     case LS_cmp_swap:
2896     case LS_cmp_swap_weak:
2897     case LS_get_add:
2898       break;
2899     default:
2900       ShouldNotReachHere();
2901   }
2902 
2903   // Null check receiver.
2904   receiver = null_check(receiver);
2905   if (stopped()) {
2906     return true;
2907   }
2908 
2909   int alias_idx = C->get_alias_index(adr_type);
2910 
2911   if (is_reference_type(type)) {
2912     decorators |= IN_HEAP | ON_UNKNOWN_OOP_REF;
2913 
2914     if (oldval != nullptr && oldval->is_InlineType()) {
2915       // Re-execute the unsafe access if allocation triggers deoptimization.
2916       PreserveReexecuteState preexecs(this);
2917       jvms()->set_should_reexecute(true);
2918       oldval = oldval->as_InlineType()->buffer(this)->get_oop();
2919     }
2920     if (newval != nullptr && newval->is_InlineType()) {
2921       // Re-execute the unsafe access if allocation triggers deoptimization.
2922       PreserveReexecuteState preexecs(this);
2923       jvms()->set_should_reexecute(true);
2924       newval = newval->as_InlineType()->buffer(this)->get_oop();
2925     }
2926 
2927     // Transformation of a value which could be null pointer (CastPP #null)
2928     // could be delayed during Parse (for example, in adjust_map_after_if()).
2929     // Execute transformation here to avoid barrier generation in such case.
2930     if (_gvn.type(newval) == TypePtr::NULL_PTR)
2931       newval = _gvn.makecon(TypePtr::NULL_PTR);
2932 
2933     if (oldval != nullptr && _gvn.type(oldval) == TypePtr::NULL_PTR) {
2934       // Refine the value to a null constant, when it is known to be null
2935       oldval = _gvn.makecon(TypePtr::NULL_PTR);
2936     }
2937   }
2938 
2939   Node* result = nullptr;
2940   switch (kind) {
2941     case LS_cmp_exchange: {
2942       result = access_atomic_cmpxchg_val_at(base, adr, adr_type, alias_idx,
2943                                             oldval, newval, value_type, type, decorators);
2944       break;
2945     }
2946     case LS_cmp_swap_weak:

3093                     Deoptimization::Action_make_not_entrant);
3094     }
3095     if (stopped()) {
3096       return true;
3097     }
3098 #endif //INCLUDE_JVMTI
3099 
3100   Node* test = nullptr;
3101   if (LibraryCallKit::klass_needs_init_guard(kls)) {
3102     // Note:  The argument might still be an illegal value like
3103     // Serializable.class or Object[].class.   The runtime will handle it.
3104     // But we must make an explicit check for initialization.
3105     Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset()));
3106     // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
3107     // can generate code to load it as unsigned byte.
3108     Node* inst = make_load(nullptr, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::acquire);
3109     Node* bits = intcon(InstanceKlass::fully_initialized);
3110     test = _gvn.transform(new SubINode(inst, bits));
3111     // The 'test' is non-zero if we need to take a slow path.
3112   }
3113   Node* obj = nullptr;
3114   const TypeInstKlassPtr* tkls = _gvn.type(kls)->isa_instklassptr();
3115   if (tkls != nullptr && tkls->instance_klass()->is_inlinetype()) {
3116     obj = InlineTypeNode::make_default(_gvn, tkls->instance_klass()->as_inline_klass())->buffer(this);
3117   } else {
3118     obj = new_instance(kls, test);
3119   }
3120   set_result(obj);
3121   return true;
3122 }
3123 
3124 //------------------------inline_native_time_funcs--------------
3125 // inline code for System.currentTimeMillis() and System.nanoTime()
3126 // these have the same type and signature
3127 bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
3128   const TypeFunc* tf = OptoRuntime::void_long_Type();
3129   const TypePtr* no_memory_effects = nullptr;
3130   Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
3131   Node* value = _gvn.transform(new ProjNode(time, TypeFunc::Parms+0));
3132 #ifdef ASSERT
3133   Node* value_top = _gvn.transform(new ProjNode(time, TypeFunc::Parms+1));
3134   assert(value_top == top(), "second value must be top");
3135 #endif
3136   set_result(value);
3137   return true;
3138 }
3139 

3872 
3873 //------------------------inline_native_setVthread------------------
3874 bool LibraryCallKit::inline_native_setCurrentThread() {
3875   assert(C->method()->changes_current_thread(),
3876          "method changes current Thread but is not annotated ChangesCurrentThread");
3877   Node* arr = argument(1);
3878   Node* thread = _gvn.transform(new ThreadLocalNode());
3879   Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::vthread_offset()));
3880   Node* thread_obj_handle
3881     = make_load(nullptr, p, p->bottom_type()->is_ptr(), T_OBJECT, MemNode::unordered);
3882   thread_obj_handle = _gvn.transform(thread_obj_handle);
3883   const TypePtr *adr_type = _gvn.type(thread_obj_handle)->isa_ptr();
3884   access_store_at(nullptr, thread_obj_handle, adr_type, arr, _gvn.type(arr), T_OBJECT, IN_NATIVE | MO_UNORDERED);
3885   JFR_ONLY(extend_setCurrentThread(thread, arr);)
3886   return true;
3887 }
3888 
3889 const Type* LibraryCallKit::scopedValueCache_type() {
3890   ciKlass* objects_klass = ciObjArrayKlass::make(env()->Object_klass());
3891   const TypeOopPtr* etype = TypeOopPtr::make_from_klass(env()->Object_klass());
3892   const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS, /* stable= */ false, /* flat= */ false, /* not_flat= */ true, /* not_null_free= */ true);
3893 
3894   // Because we create the scopedValue cache lazily we have to make the
3895   // type of the result BotPTR.
3896   bool xk = etype->klass_is_exact();
3897   const Type* objects_type = TypeAryPtr::make(TypePtr::BotPTR, arr0, objects_klass, xk, TypeAryPtr::Offset(0));
3898   return objects_type;
3899 }
3900 
3901 Node* LibraryCallKit::scopedValueCache_helper() {
3902   Node* thread = _gvn.transform(new ThreadLocalNode());
3903   Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::scopedValueCache_offset()));
3904   // We cannot use immutable_memory() because we might flip onto a
3905   // different carrier thread, at which point we'll need to use that
3906   // carrier thread's cache.
3907   // return _gvn.transform(LoadNode::make(_gvn, nullptr, immutable_memory(), p, p->bottom_type()->is_ptr(),
3908   //       TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered));
3909   return make_load(nullptr, p, p->bottom_type()->is_ptr(), T_ADDRESS, MemNode::unordered);
3910 }
3911 
3912 //------------------------inline_native_scopedValueCache------------------
3913 bool LibraryCallKit::inline_native_scopedValueCache() {
3914   Node* cache_obj_handle = scopedValueCache_helper();
3915   const Type* objects_type = scopedValueCache_type();
3916   set_result(access_load(cache_obj_handle, objects_type, T_OBJECT, IN_NATIVE));
3917 

4000   }
4001 
4002   // Result of top level CFG and Memory.
4003   RegionNode* result_rgn = new RegionNode(PATH_LIMIT);
4004   record_for_igvn(result_rgn);
4005   PhiNode* result_mem = new PhiNode(result_rgn, Type::MEMORY, TypePtr::BOTTOM);
4006   record_for_igvn(result_mem);
4007 
4008   result_rgn->init_req(_true_path, _gvn.transform(valid_pin_count));
4009   result_rgn->init_req(_false_path, _gvn.transform(continuation_is_null));
4010   result_mem->init_req(_true_path, _gvn.transform(updated_pin_count_memory));
4011   result_mem->init_req(_false_path, _gvn.transform(input_memory_state));
4012 
4013   // Set output state.
4014   set_control(_gvn.transform(result_rgn));
4015   set_all_memory(_gvn.transform(result_mem));
4016 
4017   return true;
4018 }
4019 









4020 //-----------------------load_klass_from_mirror_common-------------------------
4021 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
4022 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
4023 // and branch to the given path on the region.
4024 // If never_see_null, take an uncommon trap on null, so we can optimistically
4025 // compile for the non-null case.
4026 // If the region is null, force never_see_null = true.
4027 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
4028                                                     bool never_see_null,
4029                                                     RegionNode* region,
4030                                                     int null_path,
4031                                                     int offset) {
4032   if (region == nullptr)  never_see_null = true;
4033   Node* p = basic_plus_adr(mirror, offset);
4034   const TypeKlassPtr*  kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
4035   Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
4036   Node* null_ctl = top();
4037   kls = null_check_oop(kls, &null_ctl, never_see_null);
4038   if (region != nullptr) {
4039     // Set region->in(null_path) if the mirror is a primitive (e.g, int.class).

4043   }
4044   return kls;
4045 }
4046 
4047 //--------------------(inline_native_Class_query helpers)---------------------
4048 // Use this for JVM_ACC_INTERFACE.
4049 // Fall through if (mods & mask) == bits, take the guard otherwise.
4050 Node* LibraryCallKit::generate_klass_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region,
4051                                                  ByteSize offset, const Type* type, BasicType bt) {
4052   // Branch around if the given klass has the given modifier bit set.
4053   // Like generate_guard, adds a new path onto the region.
4054   Node* modp = basic_plus_adr(kls, in_bytes(offset));
4055   Node* mods = make_load(nullptr, modp, type, bt, MemNode::unordered);
4056   Node* mask = intcon(modifier_mask);
4057   Node* bits = intcon(modifier_bits);
4058   Node* mbit = _gvn.transform(new AndINode(mods, mask));
4059   Node* cmp  = _gvn.transform(new CmpINode(mbit, bits));
4060   Node* bol  = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
4061   return generate_fair_guard(bol, region);
4062 }
4063 
4064 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
4065   return generate_klass_flags_guard(kls, JVM_ACC_INTERFACE, 0, region,
4066                                     Klass::access_flags_offset(), TypeInt::INT, T_INT);
4067 }
4068 
4069 // Use this for testing if Klass is_hidden, has_finalizer, and is_cloneable_fast.
4070 Node* LibraryCallKit::generate_misc_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
4071   return generate_klass_flags_guard(kls, modifier_mask, modifier_bits, region,
4072                                     Klass::misc_flags_offset(), TypeInt::UBYTE, T_BOOLEAN);
4073 }
4074 
4075 Node* LibraryCallKit::generate_hidden_class_guard(Node* kls, RegionNode* region) {
4076   return generate_misc_flags_guard(kls, KlassFlags::_misc_is_hidden_class, 0, region);
4077 }
4078 
4079 //-------------------------inline_native_Class_query-------------------
4080 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
4081   const Type* return_type = TypeInt::BOOL;
4082   Node* prim_return_value = top();  // what happens if it's a primitive class?
4083   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);

4245 
4246   case vmIntrinsics::_getClassAccessFlags:
4247     p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
4248     query_value = make_load(nullptr, p, TypeInt::INT, T_INT, MemNode::unordered);
4249     break;
4250 
4251   default:
4252     fatal_unexpected_iid(id);
4253     break;
4254   }
4255 
4256   // Fall-through is the normal case of a query to a real class.
4257   phi->init_req(1, query_value);
4258   region->init_req(1, control());
4259 
4260   C->set_has_split_ifs(true); // Has chance for split-if optimization
4261   set_result(region, phi);
4262   return true;
4263 }
4264 
4265 
4266 //-------------------------inline_Class_cast-------------------
4267 bool LibraryCallKit::inline_Class_cast() {
4268   Node* mirror = argument(0); // Class
4269   Node* obj    = argument(1);
4270   const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
4271   if (mirror_con == nullptr) {
4272     return false;  // dead path (mirror->is_top()).
4273   }
4274   if (obj == nullptr || obj->is_top()) {
4275     return false;  // dead path
4276   }
4277   const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr();
4278 
4279   // First, see if Class.cast() can be folded statically.
4280   // java_mirror_type() returns non-null for compile-time Class constants.
4281   bool is_null_free_array = false;
4282   ciType* tm = mirror_con->java_mirror_type(&is_null_free_array);
4283   if (tm != nullptr && tm->is_klass() &&
4284       tp != nullptr) {
4285     if (!tp->is_loaded()) {
4286       // Don't use intrinsic when class is not loaded.
4287       return false;
4288     } else {
4289       const TypeKlassPtr* tklass = TypeKlassPtr::make(tm->as_klass(), Type::trust_interfaces);
4290       if (is_null_free_array) {
4291         tklass = tklass->is_aryklassptr()->cast_to_null_free();
4292       }
4293       int static_res = C->static_subtype_check(tklass, tp->as_klass_type());
4294       if (static_res == Compile::SSC_always_true) {
4295         // isInstance() is true - fold the code.
4296         set_result(obj);
4297         return true;
4298       } else if (static_res == Compile::SSC_always_false) {
4299         // Don't use intrinsic, have to throw ClassCastException.
4300         // If the reference is null, the non-intrinsic bytecode will
4301         // be optimized appropriately.
4302         return false;
4303       }
4304     }
4305   }
4306 
4307   // Bailout intrinsic and do normal inlining if exception path is frequent.
4308   if (too_many_traps(Deoptimization::Reason_intrinsic)) {
4309     return false;
4310   }
4311 
4312   // Generate dynamic checks.
4313   // Class.cast() is java implementation of _checkcast bytecode.
4314   // Do checkcast (Parse::do_checkcast()) optimizations here.
4315 
4316   mirror = null_check(mirror);
4317   // If mirror is dead, only null-path is taken.
4318   if (stopped()) {
4319     return true;
4320   }
4321 
4322   // Not-subtype or the mirror's klass ptr is nullptr (in case it is a primitive).
4323   enum { _bad_type_path = 1, _prim_path = 2, _npe_path = 3, PATH_LIMIT };
4324   RegionNode* region = new RegionNode(PATH_LIMIT);
4325   record_for_igvn(region);
4326 
4327   // Now load the mirror's klass metaobject, and null-check it.
4328   // If kls is null, we have a primitive mirror and
4329   // nothing is an instance of a primitive type.
4330   Node* kls = load_klass_from_mirror(mirror, false, region, _prim_path);
4331 
4332   Node* res = top();
4333   Node* io = i_o();
4334   Node* mem = merged_memory();
4335   if (!stopped()) {
4336 
4337     Node* bad_type_ctrl = top();
4338     // Do checkcast optimizations.
4339     res = gen_checkcast(obj, kls, &bad_type_ctrl);
4340     region->init_req(_bad_type_path, bad_type_ctrl);
4341   }
4342   if (region->in(_prim_path) != top() ||
4343       region->in(_bad_type_path) != top() ||
4344       region->in(_npe_path) != top()) {
4345     // Let Interpreter throw ClassCastException.
4346     PreserveJVMState pjvms(this);
4347     set_control(_gvn.transform(region));
4348     // Set IO and memory because gen_checkcast may override them when buffering inline types
4349     set_i_o(io);
4350     set_all_memory(mem);
4351     uncommon_trap(Deoptimization::Reason_intrinsic,
4352                   Deoptimization::Action_maybe_recompile);
4353   }
4354   if (!stopped()) {
4355     set_result(res);
4356   }
4357   return true;
4358 }
4359 
4360 
4361 //--------------------------inline_native_subtype_check------------------------
4362 // This intrinsic takes the JNI calls out of the heart of
4363 // UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
4364 bool LibraryCallKit::inline_native_subtype_check() {
4365   // Pull both arguments off the stack.
4366   Node* args[2];                // two java.lang.Class mirrors: superc, subc
4367   args[0] = argument(0);
4368   args[1] = argument(1);
4369   Node* klasses[2];             // corresponding Klasses: superk, subk
4370   klasses[0] = klasses[1] = top();
4371 
4372   enum {
4373     // A full decision tree on {superc is prim, subc is prim}:
4374     _prim_0_path = 1,           // {P,N} => false
4375                                 // {P,P} & superc!=subc => false
4376     _prim_same_path,            // {P,P} & superc==subc => true
4377     _prim_1_path,               // {N,P} => false
4378     _ref_subtype_path,          // {N,N} & subtype check wins => true
4379     _both_ref_path,             // {N,N} & subtype check loses => false
4380     PATH_LIMIT
4381   };
4382 
4383   RegionNode* region = new RegionNode(PATH_LIMIT);
4384   RegionNode* prim_region = new RegionNode(2);
4385   Node*       phi    = new PhiNode(region, TypeInt::BOOL);
4386   record_for_igvn(region);
4387   record_for_igvn(prim_region);
4388 
4389   const TypePtr* adr_type = TypeRawPtr::BOTTOM;   // memory type of loads
4390   const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
4391   int class_klass_offset = java_lang_Class::klass_offset();
4392 
4393   // First null-check both mirrors and load each mirror's klass metaobject.
4394   int which_arg;
4395   for (which_arg = 0; which_arg <= 1; which_arg++) {
4396     Node* arg = args[which_arg];
4397     arg = null_check(arg);
4398     if (stopped())  break;
4399     args[which_arg] = arg;
4400 
4401     Node* p = basic_plus_adr(arg, class_klass_offset);
4402     Node* kls = LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, adr_type, kls_type);
4403     klasses[which_arg] = _gvn.transform(kls);
4404   }
4405 
4406   // Having loaded both klasses, test each for null.
4407   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
4408   for (which_arg = 0; which_arg <= 1; which_arg++) {
4409     Node* kls = klasses[which_arg];
4410     Node* null_ctl = top();
4411     kls = null_check_oop(kls, &null_ctl, never_see_null);
4412     if (which_arg == 0) {
4413       prim_region->init_req(1, null_ctl);
4414     } else {
4415       region->init_req(_prim_1_path, null_ctl);
4416     }
4417     if (stopped())  break;
4418     klasses[which_arg] = kls;
4419   }
4420 
4421   if (!stopped()) {
4422     // now we have two reference types, in klasses[0..1]
4423     Node* subk   = klasses[1];  // the argument to isAssignableFrom
4424     Node* superk = klasses[0];  // the receiver
4425     region->set_req(_both_ref_path, gen_subtype_check(subk, superk));

4426     region->set_req(_ref_subtype_path, control());
4427   }
4428 
4429   // If both operands are primitive (both klasses null), then
4430   // we must return true when they are identical primitives.
4431   // It is convenient to test this after the first null klass check.
4432   // This path is also used if superc is a value mirror.
4433   set_control(_gvn.transform(prim_region));
4434   if (!stopped()) {
4435     // Since superc is primitive, make a guard for the superc==subc case.
4436     Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1]));
4437     Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq));
4438     generate_fair_guard(bol_eq, region);
4439     if (region->req() == PATH_LIMIT+1) {
4440       // A guard was added.  If the added guard is taken, superc==subc.
4441       region->swap_edges(PATH_LIMIT, _prim_same_path);
4442       region->del_req(PATH_LIMIT);
4443     }
4444     region->set_req(_prim_0_path, control()); // Not equal after all.
4445   }
4446 
4447   // these are the only paths that produce 'true':
4448   phi->set_req(_prim_same_path,   intcon(1));
4449   phi->set_req(_ref_subtype_path, intcon(1));
4450 
4451   // pull together the cases:
4452   assert(region->req() == PATH_LIMIT, "sane region");
4453   for (uint i = 1; i < region->req(); i++) {
4454     Node* ctl = region->in(i);
4455     if (ctl == nullptr || ctl == top()) {
4456       region->set_req(i, top());
4457       phi   ->set_req(i, top());
4458     } else if (phi->in(i) == nullptr) {
4459       phi->set_req(i, intcon(0)); // all other paths produce 'false'
4460     }
4461   }
4462 
4463   set_control(_gvn.transform(region));
4464   set_result(_gvn.transform(phi));
4465   return true;
4466 }
4467 
4468 //---------------------generate_array_guard_common------------------------
4469 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region, ArrayKind kind) {

4470 
4471   if (stopped()) {
4472     return nullptr;
4473   }
4474 









4475   // Like generate_guard, adds a new path onto the region.
4476   jint  layout_con = 0;
4477   Node* layout_val = get_layout_helper(kls, layout_con);
4478   if (layout_val == nullptr) {
4479     bool query = 0;
4480     switch(kind) {
4481       case ObjectArray:    query = Klass::layout_helper_is_objArray(layout_con); break;
4482       case NonObjectArray: query = !Klass::layout_helper_is_objArray(layout_con); break;
4483       case TypeArray:      query = Klass::layout_helper_is_typeArray(layout_con); break;
4484       case AnyArray:       query = Klass::layout_helper_is_array(layout_con); break;
4485       case NonArray:       query = !Klass::layout_helper_is_array(layout_con); break;
4486       default:
4487         ShouldNotReachHere();
4488     }
4489     if (!query) {
4490       return nullptr;                       // never a branch
4491     } else {                             // always a branch
4492       Node* always_branch = control();
4493       if (region != nullptr)
4494         region->add_req(always_branch);
4495       set_control(top());
4496       return always_branch;
4497     }
4498   }
4499   unsigned int value = 0;
4500   BoolTest::mask btest = BoolTest::illegal;
4501   switch(kind) {
4502     case ObjectArray:
4503     case NonObjectArray: {
4504       value = Klass::_lh_array_tag_obj_value;
4505       layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
4506       btest = (kind == ObjectArray) ? BoolTest::eq : BoolTest::ne;
4507       break;
4508     }
4509     case TypeArray: {
4510       value = Klass::_lh_array_tag_type_value;
4511       layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
4512       btest = BoolTest::eq;
4513       break;
4514     }
4515     case AnyArray:    value = Klass::_lh_neutral_value; btest = BoolTest::lt; break;
4516     case NonArray:    value = Klass::_lh_neutral_value; btest = BoolTest::gt; break;
4517     default:
4518       ShouldNotReachHere();
4519   }
4520   // Now test the correct condition.
4521   jint nval = (jint)value;



4522   Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval)));



4523   Node* bol = _gvn.transform(new BoolNode(cmp, btest));
4524   return generate_fair_guard(bol, region);
4525 }
4526 
4527 //-----------------------inline_newNullRestrictedArray--------------------------
4528 // public static native Object[] newNullRestrictedArray(Class<?> componentType, int length);
4529 bool LibraryCallKit::inline_newNullRestrictedArray() {
4530   Node* componentType = argument(0);
4531   Node* length = argument(1);
4532 
4533   const TypeInstPtr* tp = _gvn.type(componentType)->isa_instptr();
4534   if (tp != nullptr) {
4535     ciInstanceKlass* ik = tp->instance_klass();
4536     if (ik == C->env()->Class_klass()) {
4537       ciType* t = tp->java_mirror_type();
4538       if (t != nullptr && t->is_inlinetype()) {
4539         ciArrayKlass* array_klass = ciArrayKlass::make(t, true);
4540         if (array_klass->is_loaded() && array_klass->element_klass()->as_inline_klass()->is_initialized()) {
4541           const TypeAryKlassPtr* array_klass_type = TypeKlassPtr::make(array_klass, Type::trust_interfaces)->is_aryklassptr();
4542           array_klass_type = array_klass_type->cast_to_null_free();
4543           Node* obj = new_array(makecon(array_klass_type), length, 0, nullptr, false);  // no arguments to push
4544           set_result(obj);
4545           assert(gvn().type(obj)->is_aryptr()->is_null_free(), "must be null-free");
4546           return true;
4547         }
4548       }
4549     }
4550   }
4551   return false;
4552 }
4553 
4554 //-----------------------inline_native_newArray--------------------------
4555 // private static native Object java.lang.reflect.Array.newArray(Class<?> componentType, int length);
4556 // private        native Object Unsafe.allocateUninitializedArray0(Class<?> cls, int size);
4557 bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) {
4558   Node* mirror;
4559   Node* count_val;
4560   if (uninitialized) {
4561     null_check_receiver();
4562     mirror    = argument(1);
4563     count_val = argument(2);
4564   } else {
4565     mirror    = argument(0);
4566     count_val = argument(1);
4567   }
4568 
4569   mirror = null_check(mirror);
4570   // If mirror or obj is dead, only null-path is taken.
4571   if (stopped())  return true;
4572 
4573   enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
4574   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4575   PhiNode*    result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);

4681   // the bytecode that invokes Arrays.copyOf if deoptimization happens.
4682   { PreserveReexecuteState preexecs(this);
4683     jvms()->set_should_reexecute(true);
4684 
4685     array_type_mirror = null_check(array_type_mirror);
4686     original          = null_check(original);
4687 
4688     // Check if a null path was taken unconditionally.
4689     if (stopped())  return true;
4690 
4691     Node* orig_length = load_array_length(original);
4692 
4693     Node* klass_node = load_klass_from_mirror(array_type_mirror, false, nullptr, 0);
4694     klass_node = null_check(klass_node);
4695 
4696     RegionNode* bailout = new RegionNode(1);
4697     record_for_igvn(bailout);
4698 
4699     // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
4700     // Bail out if that is so.
4701     // Inline type array may have object field that would require a
4702     // write barrier. Conservatively, go to slow path.
4703     // TODO 8251971: Optimize for the case when flat src/dst are later found
4704     // to not contain oops (i.e., move this check to the macro expansion phase).
4705     BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
4706     const TypeAryPtr* orig_t = _gvn.type(original)->isa_aryptr();
4707     const TypeKlassPtr* tklass = _gvn.type(klass_node)->is_klassptr();
4708     bool exclude_flat = UseFlatArray && bs->array_copy_requires_gc_barriers(true, T_OBJECT, false, false, BarrierSetC2::Parsing) &&
4709                         // Can src array be flat and contain oops?
4710                         (orig_t == nullptr || (!orig_t->is_not_flat() && (!orig_t->is_flat() || orig_t->elem()->inline_klass()->contains_oops()))) &&
4711                         // Can dest array be flat and contain oops?
4712                         tklass->can_be_inline_array() && (!tklass->is_flat() || tklass->is_aryklassptr()->elem()->is_instklassptr()->instance_klass()->as_inline_klass()->contains_oops());
4713     Node* not_objArray = exclude_flat ? generate_non_objArray_guard(klass_node, bailout) : generate_typeArray_guard(klass_node, bailout);
4714     if (not_objArray != nullptr) {
4715       // Improve the klass node's type from the new optimistic assumption:
4716       ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
4717       const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, Type::Offset(0));
4718       Node* cast = new CastPPNode(control(), klass_node, akls);
4719       klass_node = _gvn.transform(cast);
4720     }
4721 
4722     // Bail out if either start or end is negative.
4723     generate_negative_guard(start, bailout, &start);
4724     generate_negative_guard(end,   bailout, &end);
4725 
4726     Node* length = end;
4727     if (_gvn.type(start) != TypeInt::ZERO) {
4728       length = _gvn.transform(new SubINode(end, start));
4729     }
4730 
4731     // Bail out if length is negative (i.e., if start > end).
4732     // Without this the new_array would throw
4733     // NegativeArraySizeException but IllegalArgumentException is what
4734     // should be thrown
4735     generate_negative_guard(length, bailout, &length);
4736 
4737     // Handle inline type arrays
4738     bool can_validate = !too_many_traps(Deoptimization::Reason_class_check);
4739     if (!stopped()) {
4740       // TODO JDK-8329224
4741       if (!orig_t->is_null_free()) {
4742         // Not statically known to be null free, add a check
4743         generate_fair_guard(null_free_array_test(original), bailout);
4744       }
4745       orig_t = _gvn.type(original)->isa_aryptr();
4746       if (orig_t != nullptr && orig_t->is_flat()) {
4747         // Src is flat, check that dest is flat as well
4748         if (exclude_flat) {
4749           // Dest can't be flat, bail out
4750           bailout->add_req(control());
4751           set_control(top());
4752         } else {
4753           generate_fair_guard(flat_array_test(klass_node, /* flat = */ false), bailout);
4754         }
4755       } else if (UseFlatArray && (orig_t == nullptr || !orig_t->is_not_flat()) &&
4756                  // If dest is flat, src must be flat as well (guaranteed by src <: dest check if validated).
4757                  ((!tklass->is_flat() && tklass->can_be_inline_array()) || !can_validate)) {
4758         // Src might be flat and dest might not be flat. Go to the slow path if src is flat.
4759         // TODO 8251971: Optimize for the case when src/dest are later found to be both flat.
4760         generate_fair_guard(flat_array_test(load_object_klass(original)), bailout);
4761         if (orig_t != nullptr) {
4762           orig_t = orig_t->cast_to_not_flat();
4763           original = _gvn.transform(new CheckCastPPNode(control(), original, orig_t));
4764         }
4765       }
4766       if (!can_validate) {
4767         // No validation. The subtype check emitted at macro expansion time will not go to the slow
4768         // path but call checkcast_arraycopy which can not handle flat/null-free inline type arrays.
4769         // TODO 8251971: Optimize for the case when src/dest are later found to be both flat/null-free.
4770         generate_fair_guard(flat_array_test(klass_node), bailout);
4771         generate_fair_guard(null_free_array_test(original), bailout);
4772       }
4773     }
4774 
4775     // Bail out if start is larger than the original length
4776     Node* orig_tail = _gvn.transform(new SubINode(orig_length, start));
4777     generate_negative_guard(orig_tail, bailout, &orig_tail);
4778 
4779     if (bailout->req() > 1) {
4780       PreserveJVMState pjvms(this);
4781       set_control(_gvn.transform(bailout));
4782       uncommon_trap(Deoptimization::Reason_intrinsic,
4783                     Deoptimization::Action_maybe_recompile);
4784     }
4785 
4786     if (!stopped()) {
4787       // How many elements will we copy from the original?
4788       // The answer is MinI(orig_tail, length).
4789       Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
4790 
4791       // Generate a direct call to the right arraycopy function(s).
4792       // We know the copy is disjoint but we might not know if the
4793       // oop stores need checking.
4794       // Extreme case:  Arrays.copyOf((Integer[])x, 10, String[].class).

4800       // to the copyOf to be validated, including that the copy to the
4801       // new array won't trigger an ArrayStoreException. That subtype
4802       // check can be optimized if we know something on the type of
4803       // the input array from type speculation.
4804       if (_gvn.type(klass_node)->singleton()) {
4805         const TypeKlassPtr* subk = _gvn.type(load_object_klass(original))->is_klassptr();
4806         const TypeKlassPtr* superk = _gvn.type(klass_node)->is_klassptr();
4807 
4808         int test = C->static_subtype_check(superk, subk);
4809         if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
4810           const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();
4811           if (t_original->speculative_type() != nullptr) {
4812             original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true);
4813           }
4814         }
4815       }
4816 
4817       bool validated = false;
4818       // Reason_class_check rather than Reason_intrinsic because we
4819       // want to intrinsify even if this traps.
4820       if (can_validate) {
4821         Node* not_subtype_ctrl = gen_subtype_check(original, klass_node);
4822 
4823         if (not_subtype_ctrl != top()) {
4824           PreserveJVMState pjvms(this);
4825           set_control(not_subtype_ctrl);
4826           uncommon_trap(Deoptimization::Reason_class_check,
4827                         Deoptimization::Action_make_not_entrant);
4828           assert(stopped(), "Should be stopped");
4829         }
4830         validated = true;
4831       }
4832 
4833       if (!stopped()) {
4834         newcopy = new_array(klass_node, length, 0);  // no arguments to push
4835 
4836         ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true, true,
4837                                                 load_object_klass(original), klass_node);
4838         if (!is_copyOfRange) {
4839           ac->set_copyof(validated);
4840         } else {

4886 
4887 //-----------------------generate_method_call----------------------------
4888 // Use generate_method_call to make a slow-call to the real
4889 // method if the fast path fails.  An alternative would be to
4890 // use a stub like OptoRuntime::slow_arraycopy_Java.
4891 // This only works for expanding the current library call,
4892 // not another intrinsic.  (E.g., don't use this for making an
4893 // arraycopy call inside of the copyOf intrinsic.)
4894 CallJavaNode*
4895 LibraryCallKit::generate_method_call(vmIntrinsicID method_id, bool is_virtual, bool is_static, bool res_not_null) {
4896   // When compiling the intrinsic method itself, do not use this technique.
4897   guarantee(callee() != C->method(), "cannot make slow-call to self");
4898 
4899   ciMethod* method = callee();
4900   // ensure the JVMS we have will be correct for this call
4901   guarantee(method_id == method->intrinsic_id(), "must match");
4902 
4903   const TypeFunc* tf = TypeFunc::make(method);
4904   if (res_not_null) {
4905     assert(tf->return_type() == T_OBJECT, "");
4906     const TypeTuple* range = tf->range_cc();
4907     const Type** fields = TypeTuple::fields(range->cnt());
4908     fields[TypeFunc::Parms] = range->field_at(TypeFunc::Parms)->filter_speculative(TypePtr::NOTNULL);
4909     const TypeTuple* new_range = TypeTuple::make(range->cnt(), fields);
4910     tf = TypeFunc::make(tf->domain_cc(), new_range);
4911   }
4912   CallJavaNode* slow_call;
4913   if (is_static) {
4914     assert(!is_virtual, "");
4915     slow_call = new CallStaticJavaNode(C, tf,
4916                            SharedRuntime::get_resolve_static_call_stub(), method);
4917   } else if (is_virtual) {
4918     assert(!gvn().type(argument(0))->maybe_null(), "should not be null");
4919     int vtable_index = Method::invalid_vtable_index;
4920     if (UseInlineCaches) {
4921       // Suppress the vtable call
4922     } else {
4923       // hashCode and clone are not a miranda methods,
4924       // so the vtable index is fixed.
4925       // No need to use the linkResolver to get it.
4926        vtable_index = method->vtable_index();
4927        assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
4928               "bad index %d", vtable_index);
4929     }
4930     slow_call = new CallDynamicJavaNode(tf,

4947   set_edges_for_java_call(slow_call);
4948   return slow_call;
4949 }
4950 
4951 
4952 /**
4953  * Build special case code for calls to hashCode on an object. This call may
4954  * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
4955  * slightly different code.
4956  */
4957 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
4958   assert(is_static == callee()->is_static(), "correct intrinsic selection");
4959   assert(!(is_virtual && is_static), "either virtual, special, or static");
4960 
4961   enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
4962 
4963   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4964   PhiNode*    result_val = new PhiNode(result_reg, TypeInt::INT);
4965   PhiNode*    result_io  = new PhiNode(result_reg, Type::ABIO);
4966   PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4967   Node* obj = argument(0);
4968 
4969   // Don't intrinsify hashcode on inline types for now.
4970   // The "is locked" runtime check below also serves as inline type check and goes to the slow path.
4971   if (gvn().type(obj)->is_inlinetypeptr()) {
4972     return false;
4973   }
4974 
4975   if (!is_static) {
4976     // Check for hashing null object
4977     obj = null_check_receiver();
4978     if (stopped())  return true;        // unconditionally null
4979     result_reg->init_req(_null_path, top());
4980     result_val->init_req(_null_path, top());
4981   } else {
4982     // Do a null check, and return zero if null.
4983     // System.identityHashCode(null) == 0

4984     Node* null_ctl = top();
4985     obj = null_check_oop(obj, &null_ctl);
4986     result_reg->init_req(_null_path, null_ctl);
4987     result_val->init_req(_null_path, _gvn.intcon(0));
4988   }
4989 
4990   // Unconditionally null?  Then return right away.
4991   if (stopped()) {
4992     set_control( result_reg->in(_null_path));
4993     if (!stopped())
4994       set_result(result_val->in(_null_path));
4995     return true;
4996   }
4997 
4998   // We only go to the fast case code if we pass a number of guards.  The
4999   // paths which do not pass are accumulated in the slow_region.
5000   RegionNode* slow_region = new RegionNode(1);
5001   record_for_igvn(slow_region);
5002 
5003   // If this is a virtual call, we generate a funny guard.  We pull out
5004   // the vtable entry corresponding to hashCode() from the target object.
5005   // If the target method which we are calling happens to be the native
5006   // Object hashCode() method, we pass the guard.  We do not need this
5007   // guard for non-virtual calls -- the caller is known to be the native
5008   // Object hashCode().
5009   if (is_virtual) {
5010     // After null check, get the object's klass.
5011     Node* obj_klass = load_object_klass(obj);
5012     generate_virtual_guard(obj_klass, slow_region);
5013   }
5014 
5015   // Get the header out of the object, use LoadMarkNode when available
5016   Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
5017   // The control of the load must be null. Otherwise, the load can move before
5018   // the null check after castPP removal.
5019   Node* no_ctrl = nullptr;
5020   Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
5021 
5022   if (!UseObjectMonitorTable) {
5023     // Test the header to see if it is safe to read w.r.t. locking.
5024   // This also serves as guard against inline types
5025     Node *lock_mask      = _gvn.MakeConX(markWord::inline_type_mask_in_place);
5026     Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
5027     if (LockingMode == LM_LIGHTWEIGHT) {
5028       Node *monitor_val   = _gvn.MakeConX(markWord::monitor_value);
5029       Node *chk_monitor   = _gvn.transform(new CmpXNode(lmasked_header, monitor_val));
5030       Node *test_monitor  = _gvn.transform(new BoolNode(chk_monitor, BoolTest::eq));
5031 
5032       generate_slow_guard(test_monitor, slow_region);
5033     } else {
5034       Node *unlocked_val      = _gvn.MakeConX(markWord::unlocked_value);
5035       Node *chk_unlocked      = _gvn.transform(new CmpXNode(lmasked_header, unlocked_val));
5036       Node *test_not_unlocked = _gvn.transform(new BoolNode(chk_unlocked, BoolTest::ne));
5037 
5038       generate_slow_guard(test_not_unlocked, slow_region);
5039     }
5040   }
5041 
5042   // Get the hash value and check to see that it has been properly assigned.
5043   // We depend on hash_mask being at most 32 bits and avoid the use of
5044   // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
5045   // vm: see markWord.hpp.

5080     // this->control() comes from set_results_for_java_call
5081     result_reg->init_req(_slow_path, control());
5082     result_val->init_req(_slow_path, slow_result);
5083     result_io  ->set_req(_slow_path, i_o());
5084     result_mem ->set_req(_slow_path, reset_memory());
5085   }
5086 
5087   // Return the combined state.
5088   set_i_o(        _gvn.transform(result_io)  );
5089   set_all_memory( _gvn.transform(result_mem));
5090 
5091   set_result(result_reg, result_val);
5092   return true;
5093 }
5094 
5095 //---------------------------inline_native_getClass----------------------------
5096 // public final native Class<?> java.lang.Object.getClass();
5097 //
5098 // Build special case code for calls to getClass on an object.
5099 bool LibraryCallKit::inline_native_getClass() {
5100   Node* obj = argument(0);
5101   if (obj->is_InlineType()) {
5102     const Type* t = _gvn.type(obj);
5103     if (t->maybe_null()) {
5104       null_check(obj);
5105     }
5106     set_result(makecon(TypeInstPtr::make(t->inline_klass()->java_mirror())));
5107     return true;
5108   }
5109   obj = null_check_receiver();
5110   if (stopped())  return true;
5111   set_result(load_mirror_from_klass(load_object_klass(obj)));
5112   return true;
5113 }
5114 
5115 //-----------------inline_native_Reflection_getCallerClass---------------------
5116 // public static native Class<?> sun.reflect.Reflection.getCallerClass();
5117 //
5118 // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
5119 //
5120 // NOTE: This code must perform the same logic as JVM_GetCallerClass
5121 // in that it must skip particular security frames and checks for
5122 // caller sensitive methods.
5123 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
5124 #ifndef PRODUCT
5125   if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
5126     tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
5127   }
5128 #endif
5129 

5441     dst_type = _gvn.type(dst_addr)->is_ptr(); // narrow out memory
5442 
5443     flags |= RC_NARROW_MEM; // narrow in memory
5444   }
5445 
5446   // Call it.  Note that the length argument is not scaled.
5447   make_runtime_call(flags,
5448                     OptoRuntime::make_setmemory_Type(),
5449                     StubRoutines::unsafe_setmemory(),
5450                     "unsafe_setmemory",
5451                     dst_type,
5452                     dst_addr, size XTOP, byte);
5453 
5454   store_to_memory(control(), doing_unsafe_access_addr, intcon(0), doing_unsafe_access_bt, Compile::AliasIdxRaw, MemNode::unordered);
5455 
5456   return true;
5457 }
5458 
5459 #undef XTOP
5460 
5461 //----------------------inline_unsafe_isFlatArray------------------------
5462 // public native boolean Unsafe.isFlatArray(Class<?> arrayClass);
5463 // This intrinsic exploits assumptions made by the native implementation
5464 // (arrayClass is neither null nor primitive) to avoid unnecessary null checks.
5465 bool LibraryCallKit::inline_unsafe_isFlatArray() {
5466   Node* cls = argument(1);
5467   Node* p = basic_plus_adr(cls, java_lang_Class::klass_offset());
5468   Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p,
5469                                                  TypeRawPtr::BOTTOM, TypeInstKlassPtr::OBJECT));
5470   Node* result = flat_array_test(kls);
5471   set_result(result);
5472   return true;
5473 }
5474 
5475 //------------------------clone_coping-----------------------------------
5476 // Helper function for inline_native_clone.
5477 void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array) {
5478   assert(obj_size != nullptr, "");
5479   Node* raw_obj = alloc_obj->in(1);
5480   assert(alloc_obj->is_CheckCastPP() && raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
5481 
5482   AllocateNode* alloc = nullptr;
5483   if (ReduceBulkZeroing &&
5484       // If we are implementing an array clone without knowing its source type
5485       // (can happen when compiling the array-guarded branch of a reflective
5486       // Object.clone() invocation), initialize the array within the allocation.
5487       // This is needed because some GCs (e.g. ZGC) might fall back in this case
5488       // to a runtime clone call that assumes fully initialized source arrays.
5489       (!is_array || obj->get_ptr_type()->isa_aryptr() != nullptr)) {
5490     // We will be completely responsible for initializing this object -
5491     // mark Initialize node as complete.
5492     alloc = AllocateNode::Ideal_allocation(alloc_obj);
5493     // The object was just allocated - there should be no any stores!
5494     guarantee(alloc != nullptr && alloc->maybe_set_complete(&_gvn), "");

5525 //  not cloneable or finalizer => slow path to out-of-line Object.clone
5526 //
5527 // The general case has two steps, allocation and copying.
5528 // Allocation has two cases, and uses GraphKit::new_instance or new_array.
5529 //
5530 // Copying also has two cases, oop arrays and everything else.
5531 // Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
5532 // Everything else uses the tight inline loop supplied by CopyArrayNode.
5533 //
5534 // These steps fold up nicely if and when the cloned object's klass
5535 // can be sharply typed as an object array, a type array, or an instance.
5536 //
5537 bool LibraryCallKit::inline_native_clone(bool is_virtual) {
5538   PhiNode* result_val;
5539 
5540   // Set the reexecute bit for the interpreter to reexecute
5541   // the bytecode that invokes Object.clone if deoptimization happens.
5542   { PreserveReexecuteState preexecs(this);
5543     jvms()->set_should_reexecute(true);
5544 
5545     Node* obj = argument(0);
5546     obj = null_check_receiver();
5547     if (stopped())  return true;
5548 
5549     const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
5550     if (obj_type->is_inlinetypeptr()) {
5551       // If the object to clone is an inline type, we can simply return it (i.e. a nop) since inline types have
5552       // no identity.
5553       set_result(obj);
5554       return true;
5555     }
5556 
5557     // If we are going to clone an instance, we need its exact type to
5558     // know the number and types of fields to convert the clone to
5559     // loads/stores. Maybe a speculative type can help us.
5560     if (!obj_type->klass_is_exact() &&
5561         obj_type->speculative_type() != nullptr &&
5562         obj_type->speculative_type()->is_instance_klass() &&
5563         !obj_type->speculative_type()->is_inlinetype()) {
5564       ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass();
5565       if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem &&
5566           !spec_ik->has_injected_fields()) {
5567         if (!obj_type->isa_instptr() ||
5568             obj_type->is_instptr()->instance_klass()->has_subklass()) {
5569           obj = maybe_cast_profiled_obj(obj, obj_type->speculative_type(), false);
5570         }
5571       }
5572     }
5573 
5574     // Conservatively insert a memory barrier on all memory slices.
5575     // Do not let writes into the original float below the clone.
5576     insert_mem_bar(Op_MemBarCPUOrder);
5577 
5578     // paths into result_reg:
5579     enum {
5580       _slow_path = 1,     // out-of-line call to clone method (virtual or not)
5581       _objArray_path,     // plain array allocation, plus arrayof_oop_arraycopy
5582       _array_path,        // plain array allocation, plus arrayof_long_arraycopy
5583       _instance_path,     // plain instance allocation, plus arrayof_long_arraycopy
5584       PATH_LIMIT
5585     };
5586     RegionNode* result_reg = new RegionNode(PATH_LIMIT);
5587     result_val             = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
5588     PhiNode*    result_i_o = new PhiNode(result_reg, Type::ABIO);
5589     PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
5590     record_for_igvn(result_reg);
5591 
5592     Node* obj_klass = load_object_klass(obj);
5593     // We only go to the fast case code if we pass a number of guards.
5594     // The paths which do not pass are accumulated in the slow_region.
5595     RegionNode* slow_region = new RegionNode(1);
5596     record_for_igvn(slow_region);
5597 
5598     Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)nullptr);
5599     if (array_ctl != nullptr) {
5600       // It's an array.
5601       PreserveJVMState pjvms(this);
5602       set_control(array_ctl);



5603 
5604       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
5605       const TypeAryPtr* ary_ptr = obj_type->isa_aryptr();
5606       if (UseFlatArray && bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Expansion) &&
5607           obj_type->can_be_inline_array() &&
5608           (ary_ptr == nullptr || (!ary_ptr->is_not_flat() && (!ary_ptr->is_flat() || ary_ptr->elem()->inline_klass()->contains_oops())))) {
5609         // Flat inline type array may have object field that would require a
5610         // write barrier. Conservatively, go to slow path.
5611         generate_fair_guard(flat_array_test(obj_klass), slow_region);













5612       }







5613 
5614       if (!stopped()) {
5615         Node* obj_length = load_array_length(obj);
5616         Node* array_size = nullptr; // Size of the array without object alignment padding.
5617         Node* alloc_obj = new_array(obj_klass, obj_length, 0, &array_size, /*deoptimize_on_exception=*/true);
5618 
5619         BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
5620         if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Parsing)) {
5621           // If it is an oop array, it requires very special treatment,
5622           // because gc barriers are required when accessing the array.
5623           Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)nullptr);
5624           if (is_obja != nullptr) {
5625             PreserveJVMState pjvms2(this);
5626             set_control(is_obja);
5627             // Generate a direct call to the right arraycopy function(s).
5628             // Clones are always tightly coupled.
5629             ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, true, false);
5630             ac->set_clone_oop_array();
5631             Node* n = _gvn.transform(ac);
5632             assert(n == ac, "cannot disappear");
5633             ac->connect_outputs(this, /*deoptimize_on_exception=*/true);
5634 
5635             result_reg->init_req(_objArray_path, control());
5636             result_val->init_req(_objArray_path, alloc_obj);
5637             result_i_o ->set_req(_objArray_path, i_o());
5638             result_mem ->set_req(_objArray_path, reset_memory());
5639           }
5640         }
5641         // Otherwise, there are no barriers to worry about.
5642         // (We can dispense with card marks if we know the allocation
5643         //  comes out of eden (TLAB)...  In fact, ReduceInitialCardMarks
5644         //  causes the non-eden paths to take compensating steps to
5645         //  simulate a fresh allocation, so that no further
5646         //  card marks are required in compiled code to initialize
5647         //  the object.)
5648 
5649         if (!stopped()) {
5650           copy_to_clone(obj, alloc_obj, array_size, true);
5651 
5652           // Present the results of the copy.
5653           result_reg->init_req(_array_path, control());
5654           result_val->init_req(_array_path, alloc_obj);
5655           result_i_o ->set_req(_array_path, i_o());
5656           result_mem ->set_req(_array_path, reset_memory());
5657         }
5658       }
5659     }
5660 




5661     if (!stopped()) {
5662       // It's an instance (we did array above).  Make the slow-path tests.
5663       // If this is a virtual call, we generate a funny guard.  We grab
5664       // the vtable entry corresponding to clone() from the target object.
5665       // If the target method which we are calling happens to be the
5666       // Object clone() method, we pass the guard.  We do not need this
5667       // guard for non-virtual calls; the caller is known to be the native
5668       // Object clone().
5669       if (is_virtual) {
5670         generate_virtual_guard(obj_klass, slow_region);
5671       }
5672 
5673       // The object must be easily cloneable and must not have a finalizer.
5674       // Both of these conditions may be checked in a single test.
5675       // We could optimize the test further, but we don't care.
5676       generate_misc_flags_guard(obj_klass,
5677                                 // Test both conditions:
5678                                 KlassFlags::_misc_is_cloneable_fast | KlassFlags::_misc_has_finalizer,
5679                                 // Must be cloneable but not finalizer:
5680                                 KlassFlags::_misc_is_cloneable_fast,

5772         set_jvms(sfpt->jvms());
5773         _reexecute_sp = jvms()->sp();
5774 
5775         return saved_jvms;
5776       }
5777     }
5778   }
5779   return nullptr;
5780 }
5781 
5782 // Clone the JVMState of the array allocation and create a new safepoint with it. Re-push the array length to the stack
5783 // such that uncommon traps can be emitted to re-execute the array allocation in the interpreter.
5784 SafePointNode* LibraryCallKit::create_safepoint_with_state_before_array_allocation(const AllocateArrayNode* alloc) const {
5785   JVMState* old_jvms = alloc->jvms()->clone_shallow(C);
5786   uint size = alloc->req();
5787   SafePointNode* sfpt = new SafePointNode(size, old_jvms);
5788   old_jvms->set_map(sfpt);
5789   for (uint i = 0; i < size; i++) {
5790     sfpt->init_req(i, alloc->in(i));
5791   }
5792   int adjustment = 1;
5793   const TypeAryKlassPtr* ary_klass_ptr = alloc->in(AllocateNode::KlassNode)->bottom_type()->is_aryklassptr();
5794   if (ary_klass_ptr->is_null_free()) {
5795     // A null-free, tightly coupled array allocation can only come from LibraryCallKit::inline_newNullRestrictedArray
5796     // which requires both the component type and the array length on stack for re-execution. Re-create and push
5797     // the component type.
5798     ciArrayKlass* klass = ary_klass_ptr->exact_klass()->as_array_klass();
5799     ciInstance* instance = klass->component_mirror_instance();
5800     const TypeInstPtr* t_instance = TypeInstPtr::make(instance);
5801     sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp(), makecon(t_instance));
5802     adjustment++;
5803   }
5804   // re-push array length for deoptimization
5805   sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp() + adjustment - 1, alloc->in(AllocateNode::ALength));
5806   old_jvms->set_sp(old_jvms->sp() + adjustment);
5807   old_jvms->set_monoff(old_jvms->monoff() + adjustment);
5808   old_jvms->set_scloff(old_jvms->scloff() + adjustment);
5809   old_jvms->set_endoff(old_jvms->endoff() + adjustment);
5810   old_jvms->set_should_reexecute(true);
5811 
5812   sfpt->set_i_o(map()->i_o());
5813   sfpt->set_memory(map()->memory());
5814   sfpt->set_control(map()->control());
5815   return sfpt;
5816 }
5817 
5818 // In case of a deoptimization, we restart execution at the
5819 // allocation, allocating a new array. We would leave an uninitialized
5820 // array in the heap that GCs wouldn't expect. Move the allocation
5821 // after the traps so we don't allocate the array if we
5822 // deoptimize. This is possible because tightly_coupled_allocation()
5823 // guarantees there's no observer of the allocated array at this point
5824 // and the control flow is simple enough.
5825 void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms_before_guards,
5826                                                     int saved_reexecute_sp, uint new_idx) {
5827   if (saved_jvms_before_guards != nullptr && !stopped()) {
5828     replace_unrelated_uncommon_traps_with_alloc_state(alloc, saved_jvms_before_guards);
5829 
5830     assert(alloc != nullptr, "only with a tightly coupled allocation");
5831     // restore JVM state to the state at the arraycopy
5832     saved_jvms_before_guards->map()->set_control(map()->control());
5833     assert(saved_jvms_before_guards->map()->memory() == map()->memory(), "memory state changed?");
5834     assert(saved_jvms_before_guards->map()->i_o() == map()->i_o(), "IO state changed?");
5835     // If we've improved the types of some nodes (null check) while
5836     // emitting the guards, propagate them to the current state
5837     map()->replaced_nodes().apply(saved_jvms_before_guards->map(), new_idx);
5838     set_jvms(saved_jvms_before_guards);
5839     _reexecute_sp = saved_reexecute_sp;
5840 
5841     // Remove the allocation from above the guards
5842     CallProjections* callprojs = alloc->extract_projections(true);

5843     InitializeNode* init = alloc->initialization();
5844     Node* alloc_mem = alloc->in(TypeFunc::Memory);
5845     C->gvn_replace_by(callprojs->fallthrough_ioproj, alloc->in(TypeFunc::I_O));
5846     C->gvn_replace_by(init->proj_out(TypeFunc::Memory), alloc_mem);
5847 
5848     // The CastIINode created in GraphKit::new_array (in AllocateArrayNode::make_ideal_length) must stay below
5849     // the allocation (i.e. is only valid if the allocation succeeds):
5850     // 1) replace CastIINode with AllocateArrayNode's length here
5851     // 2) Create CastIINode again once allocation has moved (see below) at the end of this method
5852     //
5853     // Multiple identical CastIINodes might exist here. Each GraphKit::load_array_length() call will generate
5854     // new separate CastIINode (arraycopy guard checks or any array length use between array allocation and ararycopy)
5855     Node* init_control = init->proj_out(TypeFunc::Control);
5856     Node* alloc_length = alloc->Ideal_length();
5857 #ifdef ASSERT
5858     Node* prev_cast = nullptr;
5859 #endif
5860     for (uint i = 0; i < init_control->outcnt(); i++) {
5861       Node* init_out = init_control->raw_out(i);
5862       if (init_out->is_CastII() && init_out->in(TypeFunc::Control) == init_control && init_out->in(1) == alloc_length) {
5863 #ifdef ASSERT
5864         if (prev_cast == nullptr) {
5865           prev_cast = init_out;

5867           if (prev_cast->cmp(*init_out) == false) {
5868             prev_cast->dump();
5869             init_out->dump();
5870             assert(false, "not equal CastIINode");
5871           }
5872         }
5873 #endif
5874         C->gvn_replace_by(init_out, alloc_length);
5875       }
5876     }
5877     C->gvn_replace_by(init->proj_out(TypeFunc::Control), alloc->in(0));
5878 
5879     // move the allocation here (after the guards)
5880     _gvn.hash_delete(alloc);
5881     alloc->set_req(TypeFunc::Control, control());
5882     alloc->set_req(TypeFunc::I_O, i_o());
5883     Node *mem = reset_memory();
5884     set_all_memory(mem);
5885     alloc->set_req(TypeFunc::Memory, mem);
5886     set_control(init->proj_out_or_null(TypeFunc::Control));
5887     set_i_o(callprojs->fallthrough_ioproj);
5888 
5889     // Update memory as done in GraphKit::set_output_for_allocation()
5890     const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
5891     const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
5892     if (ary_type->isa_aryptr() && length_type != nullptr) {
5893       ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
5894     }
5895     const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot);
5896     int            elemidx  = C->get_alias_index(telemref);
5897     set_memory(init->proj_out_or_null(TypeFunc::Memory), Compile::AliasIdxRaw);
5898     set_memory(init->proj_out_or_null(TypeFunc::Memory), elemidx);
5899 
5900     Node* allocx = _gvn.transform(alloc);
5901     assert(allocx == alloc, "where has the allocation gone?");
5902     assert(dest->is_CheckCastPP(), "not an allocation result?");
5903 
5904     _gvn.hash_delete(dest);
5905     dest->set_req(0, control());
5906     Node* destx = _gvn.transform(dest);
5907     assert(destx == dest, "where has the allocation result gone?");

6205         top_src  = src_type->isa_aryptr();
6206         has_src = (top_src != nullptr && top_src->elem() != Type::BOTTOM);
6207         src_spec = true;
6208       }
6209       if (!has_dest) {
6210         dest = maybe_cast_profiled_obj(dest, dest_k, true);
6211         dest_type  = _gvn.type(dest);
6212         top_dest  = dest_type->isa_aryptr();
6213         has_dest = (top_dest != nullptr && top_dest->elem() != Type::BOTTOM);
6214         dest_spec = true;
6215       }
6216     }
6217   }
6218 
6219   if (has_src && has_dest && can_emit_guards) {
6220     BasicType src_elem = top_src->isa_aryptr()->elem()->array_element_basic_type();
6221     BasicType dest_elem = top_dest->isa_aryptr()->elem()->array_element_basic_type();
6222     if (is_reference_type(src_elem, true)) src_elem = T_OBJECT;
6223     if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT;
6224 
6225     if (src_elem == dest_elem && top_src->is_flat() == top_dest->is_flat() && src_elem == T_OBJECT) {
6226       // If both arrays are object arrays then having the exact types
6227       // for both will remove the need for a subtype check at runtime
6228       // before the call and may make it possible to pick a faster copy
6229       // routine (without a subtype check on every element)
6230       // Do we have the exact type of src?
6231       bool could_have_src = src_spec;
6232       // Do we have the exact type of dest?
6233       bool could_have_dest = dest_spec;
6234       ciKlass* src_k = nullptr;
6235       ciKlass* dest_k = nullptr;
6236       if (!src_spec) {
6237         src_k = src_type->speculative_type_not_null();
6238         if (src_k != nullptr && src_k->is_array_klass()) {
6239           could_have_src = true;
6240         }
6241       }
6242       if (!dest_spec) {
6243         dest_k = dest_type->speculative_type_not_null();
6244         if (dest_k != nullptr && dest_k->is_array_klass()) {
6245           could_have_dest = true;
6246         }
6247       }
6248       if (could_have_src && could_have_dest) {
6249         // If we can have both exact types, emit the missing guards
6250         if (could_have_src && !src_spec) {
6251           src = maybe_cast_profiled_obj(src, src_k, true);
6252           src_type = _gvn.type(src);
6253           top_src = src_type->isa_aryptr();
6254         }
6255         if (could_have_dest && !dest_spec) {
6256           dest = maybe_cast_profiled_obj(dest, dest_k, true);
6257           dest_type = _gvn.type(dest);
6258           top_dest = dest_type->isa_aryptr();
6259         }
6260       }
6261     }
6262   }
6263 
6264   ciMethod* trap_method = method();
6265   int trap_bci = bci();
6266   if (saved_jvms_before_guards != nullptr) {
6267     trap_method = alloc->jvms()->method();
6268     trap_bci = alloc->jvms()->bci();
6269   }
6270 
6271   bool negative_length_guard_generated = false;
6272 
6273   if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
6274       can_emit_guards && !src->is_top() && !dest->is_top()) {

6275     // validate arguments: enables transformation the ArrayCopyNode
6276     validated = true;
6277 
6278     RegionNode* slow_region = new RegionNode(1);
6279     record_for_igvn(slow_region);
6280 
6281     // (1) src and dest are arrays.
6282     generate_non_array_guard(load_object_klass(src), slow_region);
6283     generate_non_array_guard(load_object_klass(dest), slow_region);
6284 
6285     // (2) src and dest arrays must have elements of the same BasicType
6286     // done at macro expansion or at Ideal transformation time
6287 
6288     // (4) src_offset must not be negative.
6289     generate_negative_guard(src_offset, slow_region);
6290 
6291     // (5) dest_offset must not be negative.
6292     generate_negative_guard(dest_offset, slow_region);
6293 
6294     // (7) src_offset + length must not exceed length of src.

6297                          slow_region);
6298 
6299     // (8) dest_offset + length must not exceed length of dest.
6300     generate_limit_guard(dest_offset, length,
6301                          load_array_length(dest),
6302                          slow_region);
6303 
6304     // (6) length must not be negative.
6305     // This is also checked in generate_arraycopy() during macro expansion, but
6306     // we also have to check it here for the case where the ArrayCopyNode will
6307     // be eliminated by Escape Analysis.
6308     if (EliminateAllocations) {
6309       generate_negative_guard(length, slow_region);
6310       negative_length_guard_generated = true;
6311     }
6312 
6313     // (9) each element of an oop array must be assignable
6314     Node* dest_klass = load_object_klass(dest);
6315     if (src != dest) {
6316       Node* not_subtype_ctrl = gen_subtype_check(src, dest_klass);
6317       slow_region->add_req(not_subtype_ctrl);
6318     }
6319 
6320     const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr();
6321     const Type* toop = dest_klass_t->cast_to_exactness(false)->as_instance_type();
6322     src = _gvn.transform(new CheckCastPPNode(control(), src, toop));
6323     src_type = _gvn.type(src);
6324     top_src  = src_type->isa_aryptr();
6325 
6326     // Handle flat inline type arrays (null-free arrays are handled by the subtype check above)
6327     if (!stopped() && UseFlatArray) {
6328       // If dest is flat, src must be flat as well (guaranteed by src <: dest check). Handle flat src here.
6329       assert(top_dest == nullptr || !top_dest->is_flat() || top_src->is_flat(), "src array must be flat");
6330       if (top_src != nullptr && top_src->is_flat()) {
6331         // Src is flat, check that dest is flat as well
6332         if (top_dest != nullptr && !top_dest->is_flat()) {
6333           generate_fair_guard(flat_array_test(dest_klass, /* flat = */ false), slow_region);
6334           // Since dest is flat and src <: dest, dest must have the same type as src.
6335           top_dest = top_src->cast_to_exactness(false);
6336           assert(top_dest->is_flat(), "dest must be flat");
6337           dest = _gvn.transform(new CheckCastPPNode(control(), dest, top_dest));
6338         }
6339       } else if (top_src == nullptr || !top_src->is_not_flat()) {
6340         // Src might be flat and dest might not be flat. Go to the slow path if src is flat.
6341         // TODO 8251971: Optimize for the case when src/dest are later found to be both flat.
6342         assert(top_dest == nullptr || !top_dest->is_flat(), "dest array must not be flat");
6343         generate_fair_guard(flat_array_test(src), slow_region);
6344         if (top_src != nullptr) {
6345           top_src = top_src->cast_to_not_flat();
6346           src = _gvn.transform(new CheckCastPPNode(control(), src, top_src));
6347         }
6348       }
6349     }
6350 
6351     {
6352       PreserveJVMState pjvms(this);
6353       set_control(_gvn.transform(slow_region));
6354       uncommon_trap(Deoptimization::Reason_intrinsic,
6355                     Deoptimization::Action_make_not_entrant);
6356       assert(stopped(), "Should be stopped");
6357     }




6358     arraycopy_move_allocation_here(alloc, dest, saved_jvms_before_guards, saved_reexecute_sp, new_idx);
6359   }
6360 
6361   if (stopped()) {
6362     return true;
6363   }
6364 
6365   ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != nullptr, negative_length_guard_generated,
6366                                           // Create LoadRange and LoadKlass nodes for use during macro expansion here
6367                                           // so the compiler has a chance to eliminate them: during macro expansion,
6368                                           // we have to set their control (CastPP nodes are eliminated).
6369                                           load_object_klass(src), load_object_klass(dest),
6370                                           load_array_length(src), load_array_length(dest));
6371 
6372   ac->set_arraycopy(validated);
6373 
6374   Node* n = _gvn.transform(ac);
6375   if (n == ac) {
6376     ac->connect_outputs(this);
6377   } else {
< prev index next >