< prev index next >

src/hotspot/share/opto/library_call.cpp

Print this page

   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"

  27 #include "ci/ciUtilities.inline.hpp"
  28 #include "classfile/vmIntrinsics.hpp"
  29 #include "compiler/compileBroker.hpp"
  30 #include "compiler/compileLog.hpp"
  31 #include "gc/shared/barrierSet.hpp"
  32 #include "jfr/support/jfrIntrinsics.hpp"
  33 #include "memory/resourceArea.hpp"
  34 #include "oops/klass.inline.hpp"
  35 #include "oops/objArrayKlass.hpp"
  36 #include "opto/addnode.hpp"
  37 #include "opto/arraycopynode.hpp"
  38 #include "opto/c2compiler.hpp"
  39 #include "opto/castnode.hpp"
  40 #include "opto/cfgnode.hpp"
  41 #include "opto/convertnode.hpp"
  42 #include "opto/countbitsnode.hpp"
  43 #include "opto/idealKit.hpp"
  44 #include "opto/library_call.hpp"
  45 #include "opto/mathexactnode.hpp"
  46 #include "opto/mulnode.hpp"

 307   case vmIntrinsics::_indexOfIL:                return inline_string_indexOfI(StrIntrinsicNode::LL);
 308   case vmIntrinsics::_indexOfIU:                return inline_string_indexOfI(StrIntrinsicNode::UU);
 309   case vmIntrinsics::_indexOfIUL:               return inline_string_indexOfI(StrIntrinsicNode::UL);
 310   case vmIntrinsics::_indexOfU_char:            return inline_string_indexOfChar(StrIntrinsicNode::U);
 311   case vmIntrinsics::_indexOfL_char:            return inline_string_indexOfChar(StrIntrinsicNode::L);
 312 
 313   case vmIntrinsics::_equalsL:                  return inline_string_equals(StrIntrinsicNode::LL);
 314 
 315   case vmIntrinsics::_vectorizedHashCode:       return inline_vectorizedHashCode();
 316 
 317   case vmIntrinsics::_toBytesStringU:           return inline_string_toBytesU();
 318   case vmIntrinsics::_getCharsStringU:          return inline_string_getCharsU();
 319   case vmIntrinsics::_getCharStringU:           return inline_string_char_access(!is_store);
 320   case vmIntrinsics::_putCharStringU:           return inline_string_char_access( is_store);
 321 
 322   case vmIntrinsics::_compressStringC:
 323   case vmIntrinsics::_compressStringB:          return inline_string_copy( is_compress);
 324   case vmIntrinsics::_inflateStringC:
 325   case vmIntrinsics::_inflateStringB:           return inline_string_copy(!is_compress);
 326 


 327   case vmIntrinsics::_getReference:             return inline_unsafe_access(!is_store, T_OBJECT,   Relaxed, false);
 328   case vmIntrinsics::_getBoolean:               return inline_unsafe_access(!is_store, T_BOOLEAN,  Relaxed, false);
 329   case vmIntrinsics::_getByte:                  return inline_unsafe_access(!is_store, T_BYTE,     Relaxed, false);
 330   case vmIntrinsics::_getShort:                 return inline_unsafe_access(!is_store, T_SHORT,    Relaxed, false);
 331   case vmIntrinsics::_getChar:                  return inline_unsafe_access(!is_store, T_CHAR,     Relaxed, false);
 332   case vmIntrinsics::_getInt:                   return inline_unsafe_access(!is_store, T_INT,      Relaxed, false);
 333   case vmIntrinsics::_getLong:                  return inline_unsafe_access(!is_store, T_LONG,     Relaxed, false);
 334   case vmIntrinsics::_getFloat:                 return inline_unsafe_access(!is_store, T_FLOAT,    Relaxed, false);
 335   case vmIntrinsics::_getDouble:                return inline_unsafe_access(!is_store, T_DOUBLE,   Relaxed, false);

 336 
 337   case vmIntrinsics::_putReference:             return inline_unsafe_access( is_store, T_OBJECT,   Relaxed, false);
 338   case vmIntrinsics::_putBoolean:               return inline_unsafe_access( is_store, T_BOOLEAN,  Relaxed, false);
 339   case vmIntrinsics::_putByte:                  return inline_unsafe_access( is_store, T_BYTE,     Relaxed, false);
 340   case vmIntrinsics::_putShort:                 return inline_unsafe_access( is_store, T_SHORT,    Relaxed, false);
 341   case vmIntrinsics::_putChar:                  return inline_unsafe_access( is_store, T_CHAR,     Relaxed, false);
 342   case vmIntrinsics::_putInt:                   return inline_unsafe_access( is_store, T_INT,      Relaxed, false);
 343   case vmIntrinsics::_putLong:                  return inline_unsafe_access( is_store, T_LONG,     Relaxed, false);
 344   case vmIntrinsics::_putFloat:                 return inline_unsafe_access( is_store, T_FLOAT,    Relaxed, false);
 345   case vmIntrinsics::_putDouble:                return inline_unsafe_access( is_store, T_DOUBLE,   Relaxed, false);

 346 
 347   case vmIntrinsics::_getReferenceVolatile:     return inline_unsafe_access(!is_store, T_OBJECT,   Volatile, false);
 348   case vmIntrinsics::_getBooleanVolatile:       return inline_unsafe_access(!is_store, T_BOOLEAN,  Volatile, false);
 349   case vmIntrinsics::_getByteVolatile:          return inline_unsafe_access(!is_store, T_BYTE,     Volatile, false);
 350   case vmIntrinsics::_getShortVolatile:         return inline_unsafe_access(!is_store, T_SHORT,    Volatile, false);
 351   case vmIntrinsics::_getCharVolatile:          return inline_unsafe_access(!is_store, T_CHAR,     Volatile, false);
 352   case vmIntrinsics::_getIntVolatile:           return inline_unsafe_access(!is_store, T_INT,      Volatile, false);
 353   case vmIntrinsics::_getLongVolatile:          return inline_unsafe_access(!is_store, T_LONG,     Volatile, false);
 354   case vmIntrinsics::_getFloatVolatile:         return inline_unsafe_access(!is_store, T_FLOAT,    Volatile, false);
 355   case vmIntrinsics::_getDoubleVolatile:        return inline_unsafe_access(!is_store, T_DOUBLE,   Volatile, false);
 356 
 357   case vmIntrinsics::_putReferenceVolatile:     return inline_unsafe_access( is_store, T_OBJECT,   Volatile, false);
 358   case vmIntrinsics::_putBooleanVolatile:       return inline_unsafe_access( is_store, T_BOOLEAN,  Volatile, false);
 359   case vmIntrinsics::_putByteVolatile:          return inline_unsafe_access( is_store, T_BYTE,     Volatile, false);
 360   case vmIntrinsics::_putShortVolatile:         return inline_unsafe_access( is_store, T_SHORT,    Volatile, false);
 361   case vmIntrinsics::_putCharVolatile:          return inline_unsafe_access( is_store, T_CHAR,     Volatile, false);
 362   case vmIntrinsics::_putIntVolatile:           return inline_unsafe_access( is_store, T_INT,      Volatile, false);
 363   case vmIntrinsics::_putLongVolatile:          return inline_unsafe_access( is_store, T_LONG,     Volatile, false);
 364   case vmIntrinsics::_putFloatVolatile:         return inline_unsafe_access( is_store, T_FLOAT,    Volatile, false);
 365   case vmIntrinsics::_putDoubleVolatile:        return inline_unsafe_access( is_store, T_DOUBLE,   Volatile, false);

 490   case vmIntrinsics::_notifyJvmtiVThreadMount:   return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_mount()),
 491                                                                                          "notifyJvmtiMount", false, false);
 492   case vmIntrinsics::_notifyJvmtiVThreadUnmount: return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_unmount()),
 493                                                                                          "notifyJvmtiUnmount", false, false);
 494   case vmIntrinsics::_notifyJvmtiVThreadHideFrames:     return inline_native_notify_jvmti_hide();
 495   case vmIntrinsics::_notifyJvmtiVThreadDisableSuspend: return inline_native_notify_jvmti_sync();
 496 #endif
 497 
 498 #ifdef JFR_HAVE_INTRINSICS
 499   case vmIntrinsics::_counterTime:              return inline_native_time_funcs(CAST_FROM_FN_PTR(address, JfrTime::time_function()), "counterTime");
 500   case vmIntrinsics::_getEventWriter:           return inline_native_getEventWriter();
 501   case vmIntrinsics::_jvm_commit:               return inline_native_jvm_commit();
 502 #endif
 503   case vmIntrinsics::_currentTimeMillis:        return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
 504   case vmIntrinsics::_nanoTime:                 return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
 505   case vmIntrinsics::_writeback0:               return inline_unsafe_writeback0();
 506   case vmIntrinsics::_writebackPreSync0:        return inline_unsafe_writebackSync0(true);
 507   case vmIntrinsics::_writebackPostSync0:       return inline_unsafe_writebackSync0(false);
 508   case vmIntrinsics::_allocateInstance:         return inline_unsafe_allocate();
 509   case vmIntrinsics::_copyMemory:               return inline_unsafe_copyMemory();

 510   case vmIntrinsics::_setMemory:                return inline_unsafe_setMemory();
 511   case vmIntrinsics::_getLength:                return inline_native_getLength();
 512   case vmIntrinsics::_copyOf:                   return inline_array_copyOf(false);
 513   case vmIntrinsics::_copyOfRange:              return inline_array_copyOf(true);
 514   case vmIntrinsics::_equalsB:                  return inline_array_equals(StrIntrinsicNode::LL);
 515   case vmIntrinsics::_equalsC:                  return inline_array_equals(StrIntrinsicNode::UU);
 516   case vmIntrinsics::_Preconditions_checkIndex: return inline_preconditions_checkIndex(T_INT);
 517   case vmIntrinsics::_Preconditions_checkLongIndex: return inline_preconditions_checkIndex(T_LONG);
 518   case vmIntrinsics::_clone:                    return inline_native_clone(intrinsic()->is_virtual());
 519 
 520   case vmIntrinsics::_allocateUninitializedArray: return inline_unsafe_newArray(true);
 521   case vmIntrinsics::_newArray:                   return inline_unsafe_newArray(false);

 522 
 523   case vmIntrinsics::_isAssignableFrom:         return inline_native_subtype_check();
 524 
 525   case vmIntrinsics::_isInstance:
 526   case vmIntrinsics::_getModifiers:
 527   case vmIntrinsics::_isInterface:
 528   case vmIntrinsics::_isArray:
 529   case vmIntrinsics::_isPrimitive:
 530   case vmIntrinsics::_isHidden:
 531   case vmIntrinsics::_getSuperclass:
 532   case vmIntrinsics::_getClassAccessFlags:      return inline_native_Class_query(intrinsic_id());
 533 
 534   case vmIntrinsics::_floatToRawIntBits:
 535   case vmIntrinsics::_floatToIntBits:
 536   case vmIntrinsics::_intBitsToFloat:
 537   case vmIntrinsics::_doubleToRawLongBits:
 538   case vmIntrinsics::_doubleToLongBits:
 539   case vmIntrinsics::_longBitsToDouble:
 540   case vmIntrinsics::_floatToFloat16:
 541   case vmIntrinsics::_float16ToFloat:           return inline_fp_conversions(intrinsic_id());

2222     case vmIntrinsics::_remainderUnsigned_l: {
2223       zero_check_long(argument(2));
2224       // Compile-time detect of null-exception
2225       if (stopped()) {
2226         return true; // keep the graph constructed so far
2227       }
2228       n = new UModLNode(control(), argument(0), argument(2));
2229       break;
2230     }
2231     default:  fatal_unexpected_iid(id);  break;
2232   }
2233   set_result(_gvn.transform(n));
2234   return true;
2235 }
2236 
2237 //----------------------------inline_unsafe_access----------------------------
2238 
2239 const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type) {
2240   // Attempt to infer a sharper value type from the offset and base type.
2241   ciKlass* sharpened_klass = nullptr;

2242 
2243   // See if it is an instance field, with an object type.
2244   if (alias_type->field() != nullptr) {
2245     if (alias_type->field()->type()->is_klass()) {
2246       sharpened_klass = alias_type->field()->type()->as_klass();

2247     }
2248   }
2249 
2250   const TypeOopPtr* result = nullptr;
2251   // See if it is a narrow oop array.
2252   if (adr_type->isa_aryptr()) {
2253     if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
2254       const TypeOopPtr* elem_type = adr_type->is_aryptr()->elem()->make_oopptr();

2255       if (elem_type != nullptr && elem_type->is_loaded()) {
2256         // Sharpen the value type.
2257         result = elem_type;
2258       }
2259     }
2260   }
2261 
2262   // The sharpened class might be unloaded if there is no class loader
2263   // contraint in place.
2264   if (result == nullptr && sharpened_klass != nullptr && sharpened_klass->is_loaded()) {
2265     // Sharpen the value type.
2266     result = TypeOopPtr::make_from_klass(sharpened_klass);



2267   }
2268   if (result != nullptr) {
2269 #ifndef PRODUCT
2270     if (C->print_intrinsics() || C->print_inlining()) {
2271       tty->print("  from base type:  ");  adr_type->dump(); tty->cr();
2272       tty->print("  sharpened value: ");  result->dump();    tty->cr();
2273     }
2274 #endif
2275   }
2276   return result;
2277 }
2278 
2279 DecoratorSet LibraryCallKit::mo_decorator_for_access_kind(AccessKind kind) {
2280   switch (kind) {
2281       case Relaxed:
2282         return MO_UNORDERED;
2283       case Opaque:
2284         return MO_RELAXED;
2285       case Acquire:
2286         return MO_ACQUIRE;
2287       case Release:
2288         return MO_RELEASE;
2289       case Volatile:
2290         return MO_SEQ_CST;
2291       default:
2292         ShouldNotReachHere();
2293         return 0;
2294   }
2295 }
2296 
2297 bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, const AccessKind kind, const bool unaligned) {
2298   if (callee()->is_static())  return false;  // caller must have the capability!
2299   DecoratorSet decorators = C2_UNSAFE_ACCESS;
2300   guarantee(!is_store || kind != Acquire, "Acquire accesses can be produced only for loads");
2301   guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
2302   assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
2303 
2304   if (is_reference_type(type)) {
2305     decorators |= ON_UNKNOWN_OOP_REF;
2306   }
2307 
2308   if (unaligned) {
2309     decorators |= C2_UNALIGNED;
2310   }
2311 
2312 #ifndef PRODUCT
2313   {
2314     ResourceMark rm;
2315     // Check the signatures.
2316     ciSignature* sig = callee()->signature();
2317 #ifdef ASSERT
2318     if (!is_store) {
2319       // Object getReference(Object base, int/long offset), etc.
2320       BasicType rtype = sig->return_type()->basic_type();
2321       assert(rtype == type, "getter must return the expected value");
2322       assert(sig->count() == 2, "oop getter has 2 arguments");
2323       assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2324       assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2325     } else {
2326       // void putReference(Object base, int/long offset, Object x), etc.
2327       assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
2328       assert(sig->count() == 3, "oop putter has 3 arguments");
2329       assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
2330       assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
2331       BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
2332       assert(vtype == type, "putter must accept the expected value");
2333     }
2334 #endif // ASSERT
2335  }
2336 #endif //PRODUCT
2337 
2338   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
2339 
2340   Node* receiver = argument(0);  // type: oop
2341 
2342   // Build address expression.
2343   Node* heap_base_oop = top();
2344 
2345   // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2346   Node* base = argument(1);  // type: oop
2347   // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2348   Node* offset = argument(2);  // type: long
2349   // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2350   // to be plain byte offsets, which are also the same as those accepted
2351   // by oopDesc::field_addr.
2352   assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2353          "fieldOffset must be byte-scaled");























































2354   // 32-bit machines ignore the high half!
2355   offset = ConvL2X(offset);
2356 
2357   // Save state and restore on bailout
2358   uint old_sp = sp();
2359   SafePointNode* old_map = clone_map();
2360 
2361   Node* adr = make_unsafe_address(base, offset, type, kind == Relaxed);
2362 
2363   if (_gvn.type(base)->isa_ptr() == TypePtr::NULL_PTR) {
2364     if (type != T_OBJECT) {
2365       decorators |= IN_NATIVE; // off-heap primitive access
2366     } else {
2367       set_map(old_map);
2368       set_sp(old_sp);
2369       return false; // off-heap oop accesses are not supported
2370     }
2371   } else {
2372     heap_base_oop = base; // on-heap or mixed access
2373   }
2374 
2375   // Can base be null? Otherwise, always on-heap access.
2376   bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(base));
2377 
2378   if (!can_access_non_heap) {
2379     decorators |= IN_HEAP;
2380   }
2381 
2382   Node* val = is_store ? argument(4) : nullptr;
2383 
2384   const TypePtr* adr_type = _gvn.type(adr)->isa_ptr();
2385   if (adr_type == TypePtr::NULL_PTR) {
2386     set_map(old_map);
2387     set_sp(old_sp);
2388     return false; // off-heap access with zero address
2389   }
2390 
2391   // Try to categorize the address.
2392   Compile::AliasType* alias_type = C->alias_type(adr_type);
2393   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2394 
2395   if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2396       alias_type->adr_type() == TypeAryPtr::RANGE) {
2397     set_map(old_map);
2398     set_sp(old_sp);
2399     return false; // not supported
2400   }
2401 
2402   bool mismatched = false;
2403   BasicType bt = alias_type->basic_type();





















2404   if (bt != T_ILLEGAL) {
2405     assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
2406     if (bt == T_BYTE && adr_type->isa_aryptr()) {
2407       // Alias type doesn't differentiate between byte[] and boolean[]).
2408       // Use address type to get the element type.
2409       bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2410     }
2411     if (is_reference_type(bt, true)) {
2412       // accessing an array field with getReference is not a mismatch
2413       bt = T_OBJECT;
2414     }
2415     if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2416       // Don't intrinsify mismatched object accesses
2417       set_map(old_map);
2418       set_sp(old_sp);
2419       return false;
2420     }
2421     mismatched = (bt != type);
2422   } else if (alias_type->adr_type()->isa_oopptr()) {
2423     mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
2424   }
2425 























2426   destruct_map_clone(old_map);
2427   assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2428 
2429   if (mismatched) {
2430     decorators |= C2_MISMATCHED;
2431   }
2432 
2433   // First guess at the value type.
2434   const Type *value_type = Type::get_const_basic_type(type);
2435 
2436   // Figure out the memory ordering.
2437   decorators |= mo_decorator_for_access_kind(kind);
2438 
2439   if (!is_store && type == T_OBJECT) {
2440     const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2441     if (tjp != nullptr) {
2442       value_type = tjp;


2443     }
2444   }
2445 
2446   receiver = null_check(receiver);
2447   if (stopped()) {
2448     return true;
2449   }
2450   // Heap pointers get a null-check from the interpreter,
2451   // as a courtesy.  However, this is not guaranteed by Unsafe,
2452   // and it is not possible to fully distinguish unintended nulls
2453   // from intended ones in this API.
2454 
2455   if (!is_store) {
2456     Node* p = nullptr;
2457     // Try to constant fold a load from a constant field
2458     ciField* field = alias_type->field();
2459     if (heap_base_oop != top() && field != nullptr && field->is_constant() && !mismatched) {
2460       // final or stable field
2461       p = make_constant_from_field(field, heap_base_oop);
2462     }
2463 
2464     if (p == nullptr) { // Could not constant fold the load
2465       p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);















2466       // Normalize the value returned by getBoolean in the following cases
2467       if (type == T_BOOLEAN &&
2468           (mismatched ||
2469            heap_base_oop == top() ||                  // - heap_base_oop is null or
2470            (can_access_non_heap && field == nullptr)) // - heap_base_oop is potentially null
2471                                                       //   and the unsafe access is made to large offset
2472                                                       //   (i.e., larger than the maximum offset necessary for any
2473                                                       //   field access)
2474             ) {
2475           IdealKit ideal = IdealKit(this);
2476 #define __ ideal.
2477           IdealVariable normalized_result(ideal);
2478           __ declarations_done();
2479           __ set(normalized_result, p);
2480           __ if_then(p, BoolTest::ne, ideal.ConI(0));
2481           __ set(normalized_result, ideal.ConI(1));
2482           ideal.end_if();
2483           final_sync(ideal);
2484           p = __ value(normalized_result);
2485 #undef __
2486       }
2487     }
2488     if (type == T_ADDRESS) {
2489       p = gvn().transform(new CastP2XNode(nullptr, p));
2490       p = ConvX2UL(p);
2491     }
2492     // The load node has the control of the preceding MemBarCPUOrder.  All
2493     // following nodes will have the control of the MemBarCPUOrder inserted at
2494     // the end of this method.  So, pushing the load onto the stack at a later
2495     // point is fine.
2496     set_result(p);
2497   } else {
2498     if (bt == T_ADDRESS) {
2499       // Repackage the long as a pointer.
2500       val = ConvL2X(val);
2501       val = gvn().transform(new CastX2PNode(val));
2502     }
2503     access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators);
















2504   }
2505 
2506   return true;
2507 }
2508 








































2509 //----------------------------inline_unsafe_load_store----------------------------
2510 // This method serves a couple of different customers (depending on LoadStoreKind):
2511 //
2512 // LS_cmp_swap:
2513 //
2514 //   boolean compareAndSetReference(Object o, long offset, Object expected, Object x);
2515 //   boolean compareAndSetInt(   Object o, long offset, int    expected, int    x);
2516 //   boolean compareAndSetLong(  Object o, long offset, long   expected, long   x);
2517 //
2518 // LS_cmp_swap_weak:
2519 //
2520 //   boolean weakCompareAndSetReference(       Object o, long offset, Object expected, Object x);
2521 //   boolean weakCompareAndSetReferencePlain(  Object o, long offset, Object expected, Object x);
2522 //   boolean weakCompareAndSetReferenceAcquire(Object o, long offset, Object expected, Object x);
2523 //   boolean weakCompareAndSetReferenceRelease(Object o, long offset, Object expected, Object x);
2524 //
2525 //   boolean weakCompareAndSetInt(          Object o, long offset, int    expected, int    x);
2526 //   boolean weakCompareAndSetIntPlain(     Object o, long offset, int    expected, int    x);
2527 //   boolean weakCompareAndSetIntAcquire(   Object o, long offset, int    expected, int    x);
2528 //   boolean weakCompareAndSetIntRelease(   Object o, long offset, int    expected, int    x);

2694     }
2695     case LS_cmp_swap:
2696     case LS_cmp_swap_weak:
2697     case LS_get_add:
2698       break;
2699     default:
2700       ShouldNotReachHere();
2701   }
2702 
2703   // Null check receiver.
2704   receiver = null_check(receiver);
2705   if (stopped()) {
2706     return true;
2707   }
2708 
2709   int alias_idx = C->get_alias_index(adr_type);
2710 
2711   if (is_reference_type(type)) {
2712     decorators |= IN_HEAP | ON_UNKNOWN_OOP_REF;
2713 













2714     // Transformation of a value which could be null pointer (CastPP #null)
2715     // could be delayed during Parse (for example, in adjust_map_after_if()).
2716     // Execute transformation here to avoid barrier generation in such case.
2717     if (_gvn.type(newval) == TypePtr::NULL_PTR)
2718       newval = _gvn.makecon(TypePtr::NULL_PTR);
2719 
2720     if (oldval != nullptr && _gvn.type(oldval) == TypePtr::NULL_PTR) {
2721       // Refine the value to a null constant, when it is known to be null
2722       oldval = _gvn.makecon(TypePtr::NULL_PTR);
2723     }
2724   }
2725 
2726   Node* result = nullptr;
2727   switch (kind) {
2728     case LS_cmp_exchange: {
2729       result = access_atomic_cmpxchg_val_at(base, adr, adr_type, alias_idx,
2730                                             oldval, newval, value_type, type, decorators);
2731       break;
2732     }
2733     case LS_cmp_swap_weak:

2880                     Deoptimization::Action_make_not_entrant);
2881     }
2882     if (stopped()) {
2883       return true;
2884     }
2885 #endif //INCLUDE_JVMTI
2886 
2887   Node* test = nullptr;
2888   if (LibraryCallKit::klass_needs_init_guard(kls)) {
2889     // Note:  The argument might still be an illegal value like
2890     // Serializable.class or Object[].class.   The runtime will handle it.
2891     // But we must make an explicit check for initialization.
2892     Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset()));
2893     // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
2894     // can generate code to load it as unsigned byte.
2895     Node* inst = make_load(nullptr, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered);
2896     Node* bits = intcon(InstanceKlass::fully_initialized);
2897     test = _gvn.transform(new SubINode(inst, bits));
2898     // The 'test' is non-zero if we need to take a slow path.
2899   }
2900 
2901   Node* obj = new_instance(kls, test);





2902   set_result(obj);
2903   return true;
2904 }
2905 
2906 //------------------------inline_native_time_funcs--------------
2907 // inline code for System.currentTimeMillis() and System.nanoTime()
2908 // these have the same type and signature
2909 bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
2910   const TypeFunc* tf = OptoRuntime::void_long_Type();
2911   const TypePtr* no_memory_effects = nullptr;
2912   Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
2913   Node* value = _gvn.transform(new ProjNode(time, TypeFunc::Parms+0));
2914 #ifdef ASSERT
2915   Node* value_top = _gvn.transform(new ProjNode(time, TypeFunc::Parms+1));
2916   assert(value_top == top(), "second value must be top");
2917 #endif
2918   set_result(value);
2919   return true;
2920 }
2921 

3657 
3658 //------------------------inline_native_setVthread------------------
3659 bool LibraryCallKit::inline_native_setCurrentThread() {
3660   assert(C->method()->changes_current_thread(),
3661          "method changes current Thread but is not annotated ChangesCurrentThread");
3662   Node* arr = argument(1);
3663   Node* thread = _gvn.transform(new ThreadLocalNode());
3664   Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::vthread_offset()));
3665   Node* thread_obj_handle
3666     = make_load(nullptr, p, p->bottom_type()->is_ptr(), T_OBJECT, MemNode::unordered);
3667   thread_obj_handle = _gvn.transform(thread_obj_handle);
3668   const TypePtr *adr_type = _gvn.type(thread_obj_handle)->isa_ptr();
3669   access_store_at(nullptr, thread_obj_handle, adr_type, arr, _gvn.type(arr), T_OBJECT, IN_NATIVE | MO_UNORDERED);
3670   JFR_ONLY(extend_setCurrentThread(thread, arr);)
3671   return true;
3672 }
3673 
3674 const Type* LibraryCallKit::scopedValueCache_type() {
3675   ciKlass* objects_klass = ciObjArrayKlass::make(env()->Object_klass());
3676   const TypeOopPtr* etype = TypeOopPtr::make_from_klass(env()->Object_klass());
3677   const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS);
3678 
3679   // Because we create the scopedValue cache lazily we have to make the
3680   // type of the result BotPTR.
3681   bool xk = etype->klass_is_exact();
3682   const Type* objects_type = TypeAryPtr::make(TypePtr::BotPTR, arr0, objects_klass, xk, 0);
3683   return objects_type;
3684 }
3685 
3686 Node* LibraryCallKit::scopedValueCache_helper() {
3687   Node* thread = _gvn.transform(new ThreadLocalNode());
3688   Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::scopedValueCache_offset()));
3689   // We cannot use immutable_memory() because we might flip onto a
3690   // different carrier thread, at which point we'll need to use that
3691   // carrier thread's cache.
3692   // return _gvn.transform(LoadNode::make(_gvn, nullptr, immutable_memory(), p, p->bottom_type()->is_ptr(),
3693   //       TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered));
3694   return make_load(nullptr, p, p->bottom_type()->is_ptr(), T_ADDRESS, MemNode::unordered);
3695 }
3696 
3697 //------------------------inline_native_scopedValueCache------------------
3698 bool LibraryCallKit::inline_native_scopedValueCache() {
3699   Node* cache_obj_handle = scopedValueCache_helper();
3700   const Type* objects_type = scopedValueCache_type();
3701   set_result(access_load(cache_obj_handle, objects_type, T_OBJECT, IN_NATIVE));
3702 
3703   return true;
3704 }
3705 
3706 //------------------------inline_native_setScopedValueCache------------------
3707 bool LibraryCallKit::inline_native_setScopedValueCache() {
3708   Node* arr = argument(0);
3709   Node* cache_obj_handle = scopedValueCache_helper();
3710   const Type* objects_type = scopedValueCache_type();
3711 
3712   const TypePtr *adr_type = _gvn.type(cache_obj_handle)->isa_ptr();
3713   access_store_at(nullptr, cache_obj_handle, adr_type, arr, objects_type, T_OBJECT, IN_NATIVE | MO_UNORDERED);
3714 
3715   return true;
3716 }
3717 
3718 //---------------------------load_mirror_from_klass----------------------------
3719 // Given a klass oop, load its java mirror (a java.lang.Class oop).
3720 Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
3721   Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
3722   Node* load = make_load(nullptr, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
3723   // mirror = ((OopHandle)mirror)->resolve();
3724   return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE);
3725 }
3726 
3727 //-----------------------load_klass_from_mirror_common-------------------------
3728 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
3729 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
3730 // and branch to the given path on the region.
3731 // If never_see_null, take an uncommon trap on null, so we can optimistically
3732 // compile for the non-null case.
3733 // If the region is null, force never_see_null = true.
3734 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
3735                                                     bool never_see_null,
3736                                                     RegionNode* region,
3737                                                     int null_path,
3738                                                     int offset) {
3739   if (region == nullptr)  never_see_null = true;
3740   Node* p = basic_plus_adr(mirror, offset);
3741   const TypeKlassPtr*  kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
3742   Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
3743   Node* null_ctl = top();
3744   kls = null_check_oop(kls, &null_ctl, never_see_null);
3745   if (region != nullptr) {
3746     // Set region->in(null_path) if the mirror is a primitive (e.g, int.class).

3749     assert(null_ctl == top(), "no loose ends");
3750   }
3751   return kls;
3752 }
3753 
3754 //--------------------(inline_native_Class_query helpers)---------------------
3755 // Use this for JVM_ACC_INTERFACE, JVM_ACC_IS_CLONEABLE_FAST, JVM_ACC_HAS_FINALIZER.
3756 // Fall through if (mods & mask) == bits, take the guard otherwise.
3757 Node* LibraryCallKit::generate_access_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
3758   // Branch around if the given klass has the given modifier bit set.
3759   // Like generate_guard, adds a new path onto the region.
3760   Node* modp = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3761   Node* mods = make_load(nullptr, modp, TypeInt::INT, T_INT, MemNode::unordered);
3762   Node* mask = intcon(modifier_mask);
3763   Node* bits = intcon(modifier_bits);
3764   Node* mbit = _gvn.transform(new AndINode(mods, mask));
3765   Node* cmp  = _gvn.transform(new CmpINode(mbit, bits));
3766   Node* bol  = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
3767   return generate_fair_guard(bol, region);
3768 }

3769 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
3770   return generate_access_flags_guard(kls, JVM_ACC_INTERFACE, 0, region);
3771 }
3772 Node* LibraryCallKit::generate_hidden_class_guard(Node* kls, RegionNode* region) {
3773   return generate_access_flags_guard(kls, JVM_ACC_IS_HIDDEN_CLASS, 0, region);
3774 }
3775 
3776 //-------------------------inline_native_Class_query-------------------
3777 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
3778   const Type* return_type = TypeInt::BOOL;
3779   Node* prim_return_value = top();  // what happens if it's a primitive class?
3780   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3781   bool expect_prim = false;     // most of these guys expect to work on refs
3782 
3783   enum { _normal_path = 1, _prim_path = 2, PATH_LIMIT };
3784 
3785   Node* mirror = argument(0);
3786   Node* obj    = top();
3787 
3788   switch (id) {

3942 
3943   case vmIntrinsics::_getClassAccessFlags:
3944     p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3945     query_value = make_load(nullptr, p, TypeInt::INT, T_INT, MemNode::unordered);
3946     break;
3947 
3948   default:
3949     fatal_unexpected_iid(id);
3950     break;
3951   }
3952 
3953   // Fall-through is the normal case of a query to a real class.
3954   phi->init_req(1, query_value);
3955   region->init_req(1, control());
3956 
3957   C->set_has_split_ifs(true); // Has chance for split-if optimization
3958   set_result(region, phi);
3959   return true;
3960 }
3961 

3962 //-------------------------inline_Class_cast-------------------
3963 bool LibraryCallKit::inline_Class_cast() {
3964   Node* mirror = argument(0); // Class
3965   Node* obj    = argument(1);
3966   const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
3967   if (mirror_con == nullptr) {
3968     return false;  // dead path (mirror->is_top()).
3969   }
3970   if (obj == nullptr || obj->is_top()) {
3971     return false;  // dead path
3972   }
3973   const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr();
3974 
3975   // First, see if Class.cast() can be folded statically.
3976   // java_mirror_type() returns non-null for compile-time Class constants.
3977   ciType* tm = mirror_con->java_mirror_type();

3978   if (tm != nullptr && tm->is_klass() &&
3979       tp != nullptr) {
3980     if (!tp->is_loaded()) {
3981       // Don't use intrinsic when class is not loaded.
3982       return false;
3983     } else {
3984       int static_res = C->static_subtype_check(TypeKlassPtr::make(tm->as_klass(), Type::trust_interfaces), tp->as_klass_type());




3985       if (static_res == Compile::SSC_always_true) {
3986         // isInstance() is true - fold the code.
3987         set_result(obj);
3988         return true;
3989       } else if (static_res == Compile::SSC_always_false) {
3990         // Don't use intrinsic, have to throw ClassCastException.
3991         // If the reference is null, the non-intrinsic bytecode will
3992         // be optimized appropriately.
3993         return false;
3994       }
3995     }
3996   }
3997 
3998   // Bailout intrinsic and do normal inlining if exception path is frequent.
3999   if (too_many_traps(Deoptimization::Reason_intrinsic)) {
4000     return false;
4001   }
4002 
4003   // Generate dynamic checks.
4004   // Class.cast() is java implementation of _checkcast bytecode.
4005   // Do checkcast (Parse::do_checkcast()) optimizations here.
4006 
4007   mirror = null_check(mirror);
4008   // If mirror is dead, only null-path is taken.
4009   if (stopped()) {
4010     return true;
4011   }
4012 
4013   // Not-subtype or the mirror's klass ptr is null (in case it is a primitive).
4014   enum { _bad_type_path = 1, _prim_path = 2, PATH_LIMIT };
4015   RegionNode* region = new RegionNode(PATH_LIMIT);
4016   record_for_igvn(region);
4017 
4018   // Now load the mirror's klass metaobject, and null-check it.
4019   // If kls is null, we have a primitive mirror and
4020   // nothing is an instance of a primitive type.
4021   Node* kls = load_klass_from_mirror(mirror, false, region, _prim_path);
4022 
4023   Node* res = top();


4024   if (!stopped()) {

4025     Node* bad_type_ctrl = top();
4026     // Do checkcast optimizations.
4027     res = gen_checkcast(obj, kls, &bad_type_ctrl);
4028     region->init_req(_bad_type_path, bad_type_ctrl);
4029   }
4030   if (region->in(_prim_path) != top() ||
4031       region->in(_bad_type_path) != top()) {

4032     // Let Interpreter throw ClassCastException.
4033     PreserveJVMState pjvms(this);
4034     set_control(_gvn.transform(region));



4035     uncommon_trap(Deoptimization::Reason_intrinsic,
4036                   Deoptimization::Action_maybe_recompile);
4037   }
4038   if (!stopped()) {
4039     set_result(res);
4040   }
4041   return true;
4042 }
4043 
4044 
4045 //--------------------------inline_native_subtype_check------------------------
4046 // This intrinsic takes the JNI calls out of the heart of
4047 // UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
4048 bool LibraryCallKit::inline_native_subtype_check() {
4049   // Pull both arguments off the stack.
4050   Node* args[2];                // two java.lang.Class mirrors: superc, subc
4051   args[0] = argument(0);
4052   args[1] = argument(1);
4053   Node* klasses[2];             // corresponding Klasses: superk, subk
4054   klasses[0] = klasses[1] = top();
4055 
4056   enum {
4057     // A full decision tree on {superc is prim, subc is prim}:
4058     _prim_0_path = 1,           // {P,N} => false
4059                                 // {P,P} & superc!=subc => false
4060     _prim_same_path,            // {P,P} & superc==subc => true
4061     _prim_1_path,               // {N,P} => false
4062     _ref_subtype_path,          // {N,N} & subtype check wins => true
4063     _both_ref_path,             // {N,N} & subtype check loses => false
4064     PATH_LIMIT
4065   };
4066 
4067   RegionNode* region = new RegionNode(PATH_LIMIT);

4068   Node*       phi    = new PhiNode(region, TypeInt::BOOL);
4069   record_for_igvn(region);

4070 
4071   const TypePtr* adr_type = TypeRawPtr::BOTTOM;   // memory type of loads
4072   const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
4073   int class_klass_offset = java_lang_Class::klass_offset();
4074 
4075   // First null-check both mirrors and load each mirror's klass metaobject.
4076   int which_arg;
4077   for (which_arg = 0; which_arg <= 1; which_arg++) {
4078     Node* arg = args[which_arg];
4079     arg = null_check(arg);
4080     if (stopped())  break;
4081     args[which_arg] = arg;
4082 
4083     Node* p = basic_plus_adr(arg, class_klass_offset);
4084     Node* kls = LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, adr_type, kls_type);
4085     klasses[which_arg] = _gvn.transform(kls);
4086   }
4087 
4088   // Having loaded both klasses, test each for null.
4089   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
4090   for (which_arg = 0; which_arg <= 1; which_arg++) {
4091     Node* kls = klasses[which_arg];
4092     Node* null_ctl = top();
4093     kls = null_check_oop(kls, &null_ctl, never_see_null);
4094     int prim_path = (which_arg == 0 ? _prim_0_path : _prim_1_path);
4095     region->init_req(prim_path, null_ctl);



4096     if (stopped())  break;
4097     klasses[which_arg] = kls;
4098   }
4099 
4100   if (!stopped()) {
4101     // now we have two reference types, in klasses[0..1]
4102     Node* subk   = klasses[1];  // the argument to isAssignableFrom
4103     Node* superk = klasses[0];  // the receiver
4104     region->set_req(_both_ref_path, gen_subtype_check(subk, superk));
4105     // now we have a successful reference subtype check
4106     region->set_req(_ref_subtype_path, control());
4107   }
4108 
4109   // If both operands are primitive (both klasses null), then
4110   // we must return true when they are identical primitives.
4111   // It is convenient to test this after the first null klass check.
4112   set_control(region->in(_prim_0_path)); // go back to first null check

4113   if (!stopped()) {
4114     // Since superc is primitive, make a guard for the superc==subc case.
4115     Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1]));
4116     Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq));
4117     generate_guard(bol_eq, region, PROB_FAIR);
4118     if (region->req() == PATH_LIMIT+1) {
4119       // A guard was added.  If the added guard is taken, superc==subc.
4120       region->swap_edges(PATH_LIMIT, _prim_same_path);
4121       region->del_req(PATH_LIMIT);
4122     }
4123     region->set_req(_prim_0_path, control()); // Not equal after all.
4124   }
4125 
4126   // these are the only paths that produce 'true':
4127   phi->set_req(_prim_same_path,   intcon(1));
4128   phi->set_req(_ref_subtype_path, intcon(1));
4129 
4130   // pull together the cases:
4131   assert(region->req() == PATH_LIMIT, "sane region");
4132   for (uint i = 1; i < region->req(); i++) {
4133     Node* ctl = region->in(i);
4134     if (ctl == nullptr || ctl == top()) {
4135       region->set_req(i, top());
4136       phi   ->set_req(i, top());
4137     } else if (phi->in(i) == nullptr) {
4138       phi->set_req(i, intcon(0)); // all other paths produce 'false'
4139     }
4140   }
4141 
4142   set_control(_gvn.transform(region));
4143   set_result(_gvn.transform(phi));
4144   return true;
4145 }
4146 
4147 //---------------------generate_array_guard_common------------------------
4148 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region,
4149                                                   bool obj_array, bool not_array) {
4150 
4151   if (stopped()) {
4152     return nullptr;
4153   }
4154 
4155   // If obj_array/non_array==false/false:
4156   // Branch around if the given klass is in fact an array (either obj or prim).
4157   // If obj_array/non_array==false/true:
4158   // Branch around if the given klass is not an array klass of any kind.
4159   // If obj_array/non_array==true/true:
4160   // Branch around if the kls is not an oop array (kls is int[], String, etc.)
4161   // If obj_array/non_array==true/false:
4162   // Branch around if the kls is an oop array (Object[] or subtype)
4163   //
4164   // Like generate_guard, adds a new path onto the region.
4165   jint  layout_con = 0;
4166   Node* layout_val = get_layout_helper(kls, layout_con);
4167   if (layout_val == nullptr) {
4168     bool query = (obj_array
4169                   ? Klass::layout_helper_is_objArray(layout_con)
4170                   : Klass::layout_helper_is_array(layout_con));
4171     if (query == not_array) {







4172       return nullptr;                       // never a branch
4173     } else {                             // always a branch
4174       Node* always_branch = control();
4175       if (region != nullptr)
4176         region->add_req(always_branch);
4177       set_control(top());
4178       return always_branch;
4179     }
4180   }





















4181   // Now test the correct condition.
4182   jint  nval = (obj_array
4183                 ? (jint)(Klass::_lh_array_tag_type_value
4184                    <<    Klass::_lh_array_tag_shift)
4185                 : Klass::_lh_neutral_value);
4186   Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval)));
4187   BoolTest::mask btest = BoolTest::lt;  // correct for testing is_[obj]array
4188   // invert the test if we are looking for a non-array
4189   if (not_array)  btest = BoolTest(btest).negate();
4190   Node* bol = _gvn.transform(new BoolNode(cmp, btest));
4191   return generate_fair_guard(bol, region);
4192 }
4193 


























4194 
4195 //-----------------------inline_native_newArray--------------------------
4196 // private static native Object java.lang.reflect.newArray(Class<?> componentType, int length);
4197 // private        native Object Unsafe.allocateUninitializedArray0(Class<?> cls, int size);
4198 bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) {
4199   Node* mirror;
4200   Node* count_val;
4201   if (uninitialized) {
4202     null_check_receiver();
4203     mirror    = argument(1);
4204     count_val = argument(2);
4205   } else {
4206     mirror    = argument(0);
4207     count_val = argument(1);
4208   }
4209 
4210   mirror = null_check(mirror);
4211   // If mirror or obj is dead, only null-path is taken.
4212   if (stopped())  return true;
4213 
4214   enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
4215   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4216   PhiNode*    result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);

4322   // the bytecode that invokes Arrays.copyOf if deoptimization happens.
4323   { PreserveReexecuteState preexecs(this);
4324     jvms()->set_should_reexecute(true);
4325 
4326     array_type_mirror = null_check(array_type_mirror);
4327     original          = null_check(original);
4328 
4329     // Check if a null path was taken unconditionally.
4330     if (stopped())  return true;
4331 
4332     Node* orig_length = load_array_length(original);
4333 
4334     Node* klass_node = load_klass_from_mirror(array_type_mirror, false, nullptr, 0);
4335     klass_node = null_check(klass_node);
4336 
4337     RegionNode* bailout = new RegionNode(1);
4338     record_for_igvn(bailout);
4339 
4340     // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
4341     // Bail out if that is so.
4342     Node* not_objArray = generate_non_objArray_guard(klass_node, bailout);












4343     if (not_objArray != nullptr) {
4344       // Improve the klass node's type from the new optimistic assumption:
4345       ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
4346       const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/);
4347       Node* cast = new CastPPNode(control(), klass_node, akls);
4348       klass_node = _gvn.transform(cast);
4349     }
4350 
4351     // Bail out if either start or end is negative.
4352     generate_negative_guard(start, bailout, &start);
4353     generate_negative_guard(end,   bailout, &end);
4354 
4355     Node* length = end;
4356     if (_gvn.type(start) != TypeInt::ZERO) {
4357       length = _gvn.transform(new SubINode(end, start));
4358     }
4359 
4360     // Bail out if length is negative (i.e., if start > end).
4361     // Without this the new_array would throw
4362     // NegativeArraySizeException but IllegalArgumentException is what
4363     // should be thrown
4364     generate_negative_guard(length, bailout, &length);
4365 






































4366     // Bail out if start is larger than the original length
4367     Node* orig_tail = _gvn.transform(new SubINode(orig_length, start));
4368     generate_negative_guard(orig_tail, bailout, &orig_tail);
4369 
4370     if (bailout->req() > 1) {
4371       PreserveJVMState pjvms(this);
4372       set_control(_gvn.transform(bailout));
4373       uncommon_trap(Deoptimization::Reason_intrinsic,
4374                     Deoptimization::Action_maybe_recompile);
4375     }
4376 
4377     if (!stopped()) {
4378       // How many elements will we copy from the original?
4379       // The answer is MinI(orig_tail, length).
4380       Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
4381 
4382       // Generate a direct call to the right arraycopy function(s).
4383       // We know the copy is disjoint but we might not know if the
4384       // oop stores need checking.
4385       // Extreme case:  Arrays.copyOf((Integer[])x, 10, String[].class).

4391       // to the copyOf to be validated, including that the copy to the
4392       // new array won't trigger an ArrayStoreException. That subtype
4393       // check can be optimized if we know something on the type of
4394       // the input array from type speculation.
4395       if (_gvn.type(klass_node)->singleton()) {
4396         const TypeKlassPtr* subk = _gvn.type(load_object_klass(original))->is_klassptr();
4397         const TypeKlassPtr* superk = _gvn.type(klass_node)->is_klassptr();
4398 
4399         int test = C->static_subtype_check(superk, subk);
4400         if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
4401           const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();
4402           if (t_original->speculative_type() != nullptr) {
4403             original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true);
4404           }
4405         }
4406       }
4407 
4408       bool validated = false;
4409       // Reason_class_check rather than Reason_intrinsic because we
4410       // want to intrinsify even if this traps.
4411       if (!too_many_traps(Deoptimization::Reason_class_check)) {
4412         Node* not_subtype_ctrl = gen_subtype_check(original, klass_node);
4413 
4414         if (not_subtype_ctrl != top()) {
4415           PreserveJVMState pjvms(this);
4416           set_control(not_subtype_ctrl);
4417           uncommon_trap(Deoptimization::Reason_class_check,
4418                         Deoptimization::Action_make_not_entrant);
4419           assert(stopped(), "Should be stopped");
4420         }
4421         validated = true;
4422       }
4423 
4424       if (!stopped()) {
4425         newcopy = new_array(klass_node, length, 0);  // no arguments to push
4426 
4427         ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true, true,
4428                                                 load_object_klass(original), klass_node);
4429         if (!is_copyOfRange) {
4430           ac->set_copyof(validated);
4431         } else {

4477 
4478 //-----------------------generate_method_call----------------------------
4479 // Use generate_method_call to make a slow-call to the real
4480 // method if the fast path fails.  An alternative would be to
4481 // use a stub like OptoRuntime::slow_arraycopy_Java.
4482 // This only works for expanding the current library call,
4483 // not another intrinsic.  (E.g., don't use this for making an
4484 // arraycopy call inside of the copyOf intrinsic.)
4485 CallJavaNode*
4486 LibraryCallKit::generate_method_call(vmIntrinsicID method_id, bool is_virtual, bool is_static, bool res_not_null) {
4487   // When compiling the intrinsic method itself, do not use this technique.
4488   guarantee(callee() != C->method(), "cannot make slow-call to self");
4489 
4490   ciMethod* method = callee();
4491   // ensure the JVMS we have will be correct for this call
4492   guarantee(method_id == method->intrinsic_id(), "must match");
4493 
4494   const TypeFunc* tf = TypeFunc::make(method);
4495   if (res_not_null) {
4496     assert(tf->return_type() == T_OBJECT, "");
4497     const TypeTuple* range = tf->range();
4498     const Type** fields = TypeTuple::fields(range->cnt());
4499     fields[TypeFunc::Parms] = range->field_at(TypeFunc::Parms)->filter_speculative(TypePtr::NOTNULL);
4500     const TypeTuple* new_range = TypeTuple::make(range->cnt(), fields);
4501     tf = TypeFunc::make(tf->domain(), new_range);
4502   }
4503   CallJavaNode* slow_call;
4504   if (is_static) {
4505     assert(!is_virtual, "");
4506     slow_call = new CallStaticJavaNode(C, tf,
4507                            SharedRuntime::get_resolve_static_call_stub(), method);
4508   } else if (is_virtual) {
4509     assert(!gvn().type(argument(0))->maybe_null(), "should not be null");
4510     int vtable_index = Method::invalid_vtable_index;
4511     if (UseInlineCaches) {
4512       // Suppress the vtable call
4513     } else {
4514       // hashCode and clone are not a miranda methods,
4515       // so the vtable index is fixed.
4516       // No need to use the linkResolver to get it.
4517        vtable_index = method->vtable_index();
4518        assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
4519               "bad index %d", vtable_index);
4520     }
4521     slow_call = new CallDynamicJavaNode(tf,

4538   set_edges_for_java_call(slow_call);
4539   return slow_call;
4540 }
4541 
4542 
4543 /**
4544  * Build special case code for calls to hashCode on an object. This call may
4545  * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
4546  * slightly different code.
4547  */
4548 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
4549   assert(is_static == callee()->is_static(), "correct intrinsic selection");
4550   assert(!(is_virtual && is_static), "either virtual, special, or static");
4551 
4552   enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
4553 
4554   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4555   PhiNode*    result_val = new PhiNode(result_reg, TypeInt::INT);
4556   PhiNode*    result_io  = new PhiNode(result_reg, Type::ABIO);
4557   PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4558   Node* obj = nullptr;







4559   if (!is_static) {
4560     // Check for hashing null object
4561     obj = null_check_receiver();
4562     if (stopped())  return true;        // unconditionally null
4563     result_reg->init_req(_null_path, top());
4564     result_val->init_req(_null_path, top());
4565   } else {
4566     // Do a null check, and return zero if null.
4567     // System.identityHashCode(null) == 0
4568     obj = argument(0);
4569     Node* null_ctl = top();
4570     obj = null_check_oop(obj, &null_ctl);
4571     result_reg->init_req(_null_path, null_ctl);
4572     result_val->init_req(_null_path, _gvn.intcon(0));
4573   }
4574 
4575   // Unconditionally null?  Then return right away.
4576   if (stopped()) {
4577     set_control( result_reg->in(_null_path));
4578     if (!stopped())
4579       set_result(result_val->in(_null_path));
4580     return true;
4581   }
4582 
4583   // We only go to the fast case code if we pass a number of guards.  The
4584   // paths which do not pass are accumulated in the slow_region.
4585   RegionNode* slow_region = new RegionNode(1);
4586   record_for_igvn(slow_region);
4587 
4588   // If this is a virtual call, we generate a funny guard.  We pull out
4589   // the vtable entry corresponding to hashCode() from the target object.
4590   // If the target method which we are calling happens to be the native
4591   // Object hashCode() method, we pass the guard.  We do not need this
4592   // guard for non-virtual calls -- the caller is known to be the native
4593   // Object hashCode().
4594   if (is_virtual) {
4595     // After null check, get the object's klass.
4596     Node* obj_klass = load_object_klass(obj);
4597     generate_virtual_guard(obj_klass, slow_region);
4598   }
4599 
4600   // Get the header out of the object, use LoadMarkNode when available
4601   Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
4602   // The control of the load must be null. Otherwise, the load can move before
4603   // the null check after castPP removal.
4604   Node* no_ctrl = nullptr;
4605   Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
4606 
4607   // Test the header to see if it is safe to read w.r.t. locking.
4608   Node *lock_mask      = _gvn.MakeConX(markWord::lock_mask_in_place);

4609   Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
4610   if (LockingMode == LM_LIGHTWEIGHT) {
4611     Node *monitor_val   = _gvn.MakeConX(markWord::monitor_value);
4612     Node *chk_monitor   = _gvn.transform(new CmpXNode(lmasked_header, monitor_val));
4613     Node *test_monitor  = _gvn.transform(new BoolNode(chk_monitor, BoolTest::eq));
4614 
4615     generate_slow_guard(test_monitor, slow_region);
4616   } else {
4617     Node *unlocked_val      = _gvn.MakeConX(markWord::unlocked_value);
4618     Node *chk_unlocked      = _gvn.transform(new CmpXNode(lmasked_header, unlocked_val));
4619     Node *test_not_unlocked = _gvn.transform(new BoolNode(chk_unlocked, BoolTest::ne));
4620 
4621     generate_slow_guard(test_not_unlocked, slow_region);
4622   }
4623 
4624   // Get the hash value and check to see that it has been properly assigned.
4625   // We depend on hash_mask being at most 32 bits and avoid the use of
4626   // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
4627   // vm: see markWord.hpp.
4628   Node *hash_mask      = _gvn.intcon(markWord::hash_mask);

4662     // this->control() comes from set_results_for_java_call
4663     result_reg->init_req(_slow_path, control());
4664     result_val->init_req(_slow_path, slow_result);
4665     result_io  ->set_req(_slow_path, i_o());
4666     result_mem ->set_req(_slow_path, reset_memory());
4667   }
4668 
4669   // Return the combined state.
4670   set_i_o(        _gvn.transform(result_io)  );
4671   set_all_memory( _gvn.transform(result_mem));
4672 
4673   set_result(result_reg, result_val);
4674   return true;
4675 }
4676 
4677 //---------------------------inline_native_getClass----------------------------
4678 // public final native Class<?> java.lang.Object.getClass();
4679 //
4680 // Build special case code for calls to getClass on an object.
4681 bool LibraryCallKit::inline_native_getClass() {
4682   Node* obj = null_check_receiver();









4683   if (stopped())  return true;
4684   set_result(load_mirror_from_klass(load_object_klass(obj)));
4685   return true;
4686 }
4687 
4688 //-----------------inline_native_Reflection_getCallerClass---------------------
4689 // public static native Class<?> sun.reflect.Reflection.getCallerClass();
4690 //
4691 // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
4692 //
4693 // NOTE: This code must perform the same logic as JVM_GetCallerClass
4694 // in that it must skip particular security frames and checks for
4695 // caller sensitive methods.
4696 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
4697 #ifndef PRODUCT
4698   if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4699     tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
4700   }
4701 #endif
4702 

5014     dst_type = _gvn.type(dst_addr)->is_ptr(); // narrow out memory
5015 
5016     flags |= RC_NARROW_MEM; // narrow in memory
5017   }
5018 
5019   // Call it.  Note that the length argument is not scaled.
5020   make_runtime_call(flags,
5021                     OptoRuntime::make_setmemory_Type(),
5022                     StubRoutines::unsafe_setmemory(),
5023                     "unsafe_setmemory",
5024                     dst_type,
5025                     dst_addr, size XTOP, byte);
5026 
5027   store_to_memory(control(), doing_unsafe_access_addr, intcon(0), doing_unsafe_access_bt, Compile::AliasIdxRaw, MemNode::unordered);
5028 
5029   return true;
5030 }
5031 
5032 #undef XTOP
5033 














5034 //------------------------clone_coping-----------------------------------
5035 // Helper function for inline_native_clone.
5036 void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array) {
5037   assert(obj_size != nullptr, "");
5038   Node* raw_obj = alloc_obj->in(1);
5039   assert(alloc_obj->is_CheckCastPP() && raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
5040 
5041   AllocateNode* alloc = nullptr;
5042   if (ReduceBulkZeroing &&
5043       // If we are implementing an array clone without knowing its source type
5044       // (can happen when compiling the array-guarded branch of a reflective
5045       // Object.clone() invocation), initialize the array within the allocation.
5046       // This is needed because some GCs (e.g. ZGC) might fall back in this case
5047       // to a runtime clone call that assumes fully initialized source arrays.
5048       (!is_array || obj->get_ptr_type()->isa_aryptr() != nullptr)) {
5049     // We will be completely responsible for initializing this object -
5050     // mark Initialize node as complete.
5051     alloc = AllocateNode::Ideal_allocation(alloc_obj);
5052     // The object was just allocated - there should be no any stores!
5053     guarantee(alloc != nullptr && alloc->maybe_set_complete(&_gvn), "");

5084 //  not cloneable or finalizer => slow path to out-of-line Object.clone
5085 //
5086 // The general case has two steps, allocation and copying.
5087 // Allocation has two cases, and uses GraphKit::new_instance or new_array.
5088 //
5089 // Copying also has two cases, oop arrays and everything else.
5090 // Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
5091 // Everything else uses the tight inline loop supplied by CopyArrayNode.
5092 //
5093 // These steps fold up nicely if and when the cloned object's klass
5094 // can be sharply typed as an object array, a type array, or an instance.
5095 //
5096 bool LibraryCallKit::inline_native_clone(bool is_virtual) {
5097   PhiNode* result_val;
5098 
5099   // Set the reexecute bit for the interpreter to reexecute
5100   // the bytecode that invokes Object.clone if deoptimization happens.
5101   { PreserveReexecuteState preexecs(this);
5102     jvms()->set_should_reexecute(true);
5103 
5104     Node* obj = null_check_receiver();

5105     if (stopped())  return true;
5106 
5107     const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();






5108 
5109     // If we are going to clone an instance, we need its exact type to
5110     // know the number and types of fields to convert the clone to
5111     // loads/stores. Maybe a speculative type can help us.
5112     if (!obj_type->klass_is_exact() &&
5113         obj_type->speculative_type() != nullptr &&
5114         obj_type->speculative_type()->is_instance_klass()) {

5115       ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass();
5116       if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem &&
5117           !spec_ik->has_injected_fields()) {
5118         if (!obj_type->isa_instptr() ||
5119             obj_type->is_instptr()->instance_klass()->has_subklass()) {
5120           obj = maybe_cast_profiled_obj(obj, obj_type->speculative_type(), false);
5121         }
5122       }
5123     }
5124 
5125     // Conservatively insert a memory barrier on all memory slices.
5126     // Do not let writes into the original float below the clone.
5127     insert_mem_bar(Op_MemBarCPUOrder);
5128 
5129     // paths into result_reg:
5130     enum {
5131       _slow_path = 1,     // out-of-line call to clone method (virtual or not)
5132       _objArray_path,     // plain array allocation, plus arrayof_oop_arraycopy
5133       _array_path,        // plain array allocation, plus arrayof_long_arraycopy
5134       _instance_path,     // plain instance allocation, plus arrayof_long_arraycopy
5135       PATH_LIMIT
5136     };
5137     RegionNode* result_reg = new RegionNode(PATH_LIMIT);
5138     result_val             = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
5139     PhiNode*    result_i_o = new PhiNode(result_reg, Type::ABIO);
5140     PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
5141     record_for_igvn(result_reg);
5142 
5143     Node* obj_klass = load_object_klass(obj);





5144     Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)nullptr);
5145     if (array_ctl != nullptr) {
5146       // It's an array.
5147       PreserveJVMState pjvms(this);
5148       set_control(array_ctl);
5149       Node* obj_length = load_array_length(obj);
5150       Node* array_size = nullptr; // Size of the array without object alignment padding.
5151       Node* alloc_obj = new_array(obj_klass, obj_length, 0, &array_size, /*deoptimize_on_exception=*/true);
5152 
5153       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
5154       if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Parsing)) {
5155         // If it is an oop array, it requires very special treatment,
5156         // because gc barriers are required when accessing the array.
5157         Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)nullptr);
5158         if (is_obja != nullptr) {
5159           PreserveJVMState pjvms2(this);
5160           set_control(is_obja);
5161           // Generate a direct call to the right arraycopy function(s).
5162           // Clones are always tightly coupled.
5163           ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, true, false);
5164           ac->set_clone_oop_array();
5165           Node* n = _gvn.transform(ac);
5166           assert(n == ac, "cannot disappear");
5167           ac->connect_outputs(this, /*deoptimize_on_exception=*/true);
5168 
5169           result_reg->init_req(_objArray_path, control());
5170           result_val->init_req(_objArray_path, alloc_obj);
5171           result_i_o ->set_req(_objArray_path, i_o());
5172           result_mem ->set_req(_objArray_path, reset_memory());
5173         }
5174       }
5175       // Otherwise, there are no barriers to worry about.
5176       // (We can dispense with card marks if we know the allocation
5177       //  comes out of eden (TLAB)...  In fact, ReduceInitialCardMarks
5178       //  causes the non-eden paths to take compensating steps to
5179       //  simulate a fresh allocation, so that no further
5180       //  card marks are required in compiled code to initialize
5181       //  the object.)
5182 
5183       if (!stopped()) {
5184         copy_to_clone(obj, alloc_obj, array_size, true);
5185 
5186         // Present the results of the copy.
5187         result_reg->init_req(_array_path, control());
5188         result_val->init_req(_array_path, alloc_obj);
5189         result_i_o ->set_req(_array_path, i_o());
5190         result_mem ->set_req(_array_path, reset_memory());




































5191       }
5192     }
5193 
5194     // We only go to the instance fast case code if we pass a number of guards.
5195     // The paths which do not pass are accumulated in the slow_region.
5196     RegionNode* slow_region = new RegionNode(1);
5197     record_for_igvn(slow_region);
5198     if (!stopped()) {
5199       // It's an instance (we did array above).  Make the slow-path tests.
5200       // If this is a virtual call, we generate a funny guard.  We grab
5201       // the vtable entry corresponding to clone() from the target object.
5202       // If the target method which we are calling happens to be the
5203       // Object clone() method, we pass the guard.  We do not need this
5204       // guard for non-virtual calls; the caller is known to be the native
5205       // Object clone().
5206       if (is_virtual) {
5207         generate_virtual_guard(obj_klass, slow_region);
5208       }
5209 
5210       // The object must be easily cloneable and must not have a finalizer.
5211       // Both of these conditions may be checked in a single test.
5212       // We could optimize the test further, but we don't care.
5213       generate_access_flags_guard(obj_klass,
5214                                   // Test both conditions:
5215                                   JVM_ACC_IS_CLONEABLE_FAST | JVM_ACC_HAS_FINALIZER,
5216                                   // Must be cloneable but not finalizer:
5217                                   JVM_ACC_IS_CLONEABLE_FAST,

5309         set_jvms(sfpt->jvms());
5310         _reexecute_sp = jvms()->sp();
5311 
5312         return saved_jvms;
5313       }
5314     }
5315   }
5316   return nullptr;
5317 }
5318 
5319 // Clone the JVMState of the array allocation and create a new safepoint with it. Re-push the array length to the stack
5320 // such that uncommon traps can be emitted to re-execute the array allocation in the interpreter.
5321 SafePointNode* LibraryCallKit::create_safepoint_with_state_before_array_allocation(const AllocateArrayNode* alloc) const {
5322   JVMState* old_jvms = alloc->jvms()->clone_shallow(C);
5323   uint size = alloc->req();
5324   SafePointNode* sfpt = new SafePointNode(size, old_jvms);
5325   old_jvms->set_map(sfpt);
5326   for (uint i = 0; i < size; i++) {
5327     sfpt->init_req(i, alloc->in(i));
5328   }












5329   // re-push array length for deoptimization
5330   sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp(), alloc->in(AllocateNode::ALength));
5331   old_jvms->set_sp(old_jvms->sp()+1);
5332   old_jvms->set_monoff(old_jvms->monoff()+1);
5333   old_jvms->set_scloff(old_jvms->scloff()+1);
5334   old_jvms->set_endoff(old_jvms->endoff()+1);
5335   old_jvms->set_should_reexecute(true);
5336 
5337   sfpt->set_i_o(map()->i_o());
5338   sfpt->set_memory(map()->memory());
5339   sfpt->set_control(map()->control());
5340   return sfpt;
5341 }
5342 
5343 // In case of a deoptimization, we restart execution at the
5344 // allocation, allocating a new array. We would leave an uninitialized
5345 // array in the heap that GCs wouldn't expect. Move the allocation
5346 // after the traps so we don't allocate the array if we
5347 // deoptimize. This is possible because tightly_coupled_allocation()
5348 // guarantees there's no observer of the allocated array at this point
5349 // and the control flow is simple enough.
5350 void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms_before_guards,
5351                                                     int saved_reexecute_sp, uint new_idx) {
5352   if (saved_jvms_before_guards != nullptr && !stopped()) {
5353     replace_unrelated_uncommon_traps_with_alloc_state(alloc, saved_jvms_before_guards);
5354 
5355     assert(alloc != nullptr, "only with a tightly coupled allocation");
5356     // restore JVM state to the state at the arraycopy
5357     saved_jvms_before_guards->map()->set_control(map()->control());
5358     assert(saved_jvms_before_guards->map()->memory() == map()->memory(), "memory state changed?");
5359     assert(saved_jvms_before_guards->map()->i_o() == map()->i_o(), "IO state changed?");
5360     // If we've improved the types of some nodes (null check) while
5361     // emitting the guards, propagate them to the current state
5362     map()->replaced_nodes().apply(saved_jvms_before_guards->map(), new_idx);
5363     set_jvms(saved_jvms_before_guards);
5364     _reexecute_sp = saved_reexecute_sp;
5365 
5366     // Remove the allocation from above the guards
5367     CallProjections callprojs;
5368     alloc->extract_projections(&callprojs, true);
5369     InitializeNode* init = alloc->initialization();
5370     Node* alloc_mem = alloc->in(TypeFunc::Memory);
5371     C->gvn_replace_by(callprojs.fallthrough_ioproj, alloc->in(TypeFunc::I_O));
5372     C->gvn_replace_by(init->proj_out(TypeFunc::Memory), alloc_mem);
5373 
5374     // The CastIINode created in GraphKit::new_array (in AllocateArrayNode::make_ideal_length) must stay below
5375     // the allocation (i.e. is only valid if the allocation succeeds):
5376     // 1) replace CastIINode with AllocateArrayNode's length here
5377     // 2) Create CastIINode again once allocation has moved (see below) at the end of this method
5378     //
5379     // Multiple identical CastIINodes might exist here. Each GraphKit::load_array_length() call will generate
5380     // new separate CastIINode (arraycopy guard checks or any array length use between array allocation and ararycopy)
5381     Node* init_control = init->proj_out(TypeFunc::Control);
5382     Node* alloc_length = alloc->Ideal_length();
5383 #ifdef ASSERT
5384     Node* prev_cast = nullptr;
5385 #endif
5386     for (uint i = 0; i < init_control->outcnt(); i++) {
5387       Node* init_out = init_control->raw_out(i);
5388       if (init_out->is_CastII() && init_out->in(TypeFunc::Control) == init_control && init_out->in(1) == alloc_length) {
5389 #ifdef ASSERT
5390         if (prev_cast == nullptr) {
5391           prev_cast = init_out;

5393           if (prev_cast->cmp(*init_out) == false) {
5394             prev_cast->dump();
5395             init_out->dump();
5396             assert(false, "not equal CastIINode");
5397           }
5398         }
5399 #endif
5400         C->gvn_replace_by(init_out, alloc_length);
5401       }
5402     }
5403     C->gvn_replace_by(init->proj_out(TypeFunc::Control), alloc->in(0));
5404 
5405     // move the allocation here (after the guards)
5406     _gvn.hash_delete(alloc);
5407     alloc->set_req(TypeFunc::Control, control());
5408     alloc->set_req(TypeFunc::I_O, i_o());
5409     Node *mem = reset_memory();
5410     set_all_memory(mem);
5411     alloc->set_req(TypeFunc::Memory, mem);
5412     set_control(init->proj_out_or_null(TypeFunc::Control));
5413     set_i_o(callprojs.fallthrough_ioproj);
5414 
5415     // Update memory as done in GraphKit::set_output_for_allocation()
5416     const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
5417     const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
5418     if (ary_type->isa_aryptr() && length_type != nullptr) {
5419       ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
5420     }
5421     const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot);
5422     int            elemidx  = C->get_alias_index(telemref);
5423     set_memory(init->proj_out_or_null(TypeFunc::Memory), Compile::AliasIdxRaw);
5424     set_memory(init->proj_out_or_null(TypeFunc::Memory), elemidx);
5425 
5426     Node* allocx = _gvn.transform(alloc);
5427     assert(allocx == alloc, "where has the allocation gone?");
5428     assert(dest->is_CheckCastPP(), "not an allocation result?");
5429 
5430     _gvn.hash_delete(dest);
5431     dest->set_req(0, control());
5432     Node* destx = _gvn.transform(dest);
5433     assert(destx == dest, "where has the allocation result gone?");

5703         top_src  = src_type->isa_aryptr();
5704         has_src = (top_src != nullptr && top_src->elem() != Type::BOTTOM);
5705         src_spec = true;
5706       }
5707       if (!has_dest) {
5708         dest = maybe_cast_profiled_obj(dest, dest_k, true);
5709         dest_type  = _gvn.type(dest);
5710         top_dest  = dest_type->isa_aryptr();
5711         has_dest = (top_dest != nullptr && top_dest->elem() != Type::BOTTOM);
5712         dest_spec = true;
5713       }
5714     }
5715   }
5716 
5717   if (has_src && has_dest && can_emit_guards) {
5718     BasicType src_elem = top_src->isa_aryptr()->elem()->array_element_basic_type();
5719     BasicType dest_elem = top_dest->isa_aryptr()->elem()->array_element_basic_type();
5720     if (is_reference_type(src_elem, true)) src_elem = T_OBJECT;
5721     if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT;
5722 
5723     if (src_elem == dest_elem && src_elem == T_OBJECT) {
5724       // If both arrays are object arrays then having the exact types
5725       // for both will remove the need for a subtype check at runtime
5726       // before the call and may make it possible to pick a faster copy
5727       // routine (without a subtype check on every element)
5728       // Do we have the exact type of src?
5729       bool could_have_src = src_spec;
5730       // Do we have the exact type of dest?
5731       bool could_have_dest = dest_spec;
5732       ciKlass* src_k = nullptr;
5733       ciKlass* dest_k = nullptr;
5734       if (!src_spec) {
5735         src_k = src_type->speculative_type_not_null();
5736         if (src_k != nullptr && src_k->is_array_klass()) {
5737           could_have_src = true;
5738         }
5739       }
5740       if (!dest_spec) {
5741         dest_k = dest_type->speculative_type_not_null();
5742         if (dest_k != nullptr && dest_k->is_array_klass()) {
5743           could_have_dest = true;
5744         }
5745       }
5746       if (could_have_src && could_have_dest) {
5747         // If we can have both exact types, emit the missing guards
5748         if (could_have_src && !src_spec) {
5749           src = maybe_cast_profiled_obj(src, src_k, true);


5750         }
5751         if (could_have_dest && !dest_spec) {
5752           dest = maybe_cast_profiled_obj(dest, dest_k, true);


5753         }
5754       }
5755     }
5756   }
5757 
5758   ciMethod* trap_method = method();
5759   int trap_bci = bci();
5760   if (saved_jvms_before_guards != nullptr) {
5761     trap_method = alloc->jvms()->method();
5762     trap_bci = alloc->jvms()->bci();
5763   }
5764 
5765   bool negative_length_guard_generated = false;
5766 
5767   if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
5768       can_emit_guards &&
5769       !src->is_top() && !dest->is_top()) {
5770     // validate arguments: enables transformation the ArrayCopyNode
5771     validated = true;
5772 
5773     RegionNode* slow_region = new RegionNode(1);
5774     record_for_igvn(slow_region);
5775 
5776     // (1) src and dest are arrays.
5777     generate_non_array_guard(load_object_klass(src), slow_region);
5778     generate_non_array_guard(load_object_klass(dest), slow_region);
5779 
5780     // (2) src and dest arrays must have elements of the same BasicType
5781     // done at macro expansion or at Ideal transformation time
5782 
5783     // (4) src_offset must not be negative.
5784     generate_negative_guard(src_offset, slow_region);
5785 
5786     // (5) dest_offset must not be negative.
5787     generate_negative_guard(dest_offset, slow_region);
5788 
5789     // (7) src_offset + length must not exceed length of src.

5792                          slow_region);
5793 
5794     // (8) dest_offset + length must not exceed length of dest.
5795     generate_limit_guard(dest_offset, length,
5796                          load_array_length(dest),
5797                          slow_region);
5798 
5799     // (6) length must not be negative.
5800     // This is also checked in generate_arraycopy() during macro expansion, but
5801     // we also have to check it here for the case where the ArrayCopyNode will
5802     // be eliminated by Escape Analysis.
5803     if (EliminateAllocations) {
5804       generate_negative_guard(length, slow_region);
5805       negative_length_guard_generated = true;
5806     }
5807 
5808     // (9) each element of an oop array must be assignable
5809     Node* dest_klass = load_object_klass(dest);
5810     if (src != dest) {
5811       Node* not_subtype_ctrl = gen_subtype_check(src, dest_klass);


5812 
5813       if (not_subtype_ctrl != top()) {
5814         PreserveJVMState pjvms(this);
5815         set_control(not_subtype_ctrl);
5816         uncommon_trap(Deoptimization::Reason_intrinsic,
5817                       Deoptimization::Action_make_not_entrant);
5818         assert(stopped(), "Should be stopped");






















5819       }
5820     }

5821     {
5822       PreserveJVMState pjvms(this);
5823       set_control(_gvn.transform(slow_region));
5824       uncommon_trap(Deoptimization::Reason_intrinsic,
5825                     Deoptimization::Action_make_not_entrant);
5826       assert(stopped(), "Should be stopped");
5827     }
5828 
5829     const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr();
5830     const Type *toop = dest_klass_t->cast_to_exactness(false)->as_instance_type();
5831     src = _gvn.transform(new CheckCastPPNode(control(), src, toop));
5832     arraycopy_move_allocation_here(alloc, dest, saved_jvms_before_guards, saved_reexecute_sp, new_idx);
5833   }
5834 
5835   if (stopped()) {
5836     return true;
5837   }
5838 
5839   ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != nullptr, negative_length_guard_generated,
5840                                           // Create LoadRange and LoadKlass nodes for use during macro expansion here
5841                                           // so the compiler has a chance to eliminate them: during macro expansion,
5842                                           // we have to set their control (CastPP nodes are eliminated).
5843                                           load_object_klass(src), load_object_klass(dest),
5844                                           load_array_length(src), load_array_length(dest));
5845 
5846   ac->set_arraycopy(validated);
5847 
5848   Node* n = _gvn.transform(ac);
5849   if (n == ac) {
5850     ac->connect_outputs(this);
5851   } else {

   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "ci/ciFlatArrayKlass.hpp"
  28 #include "ci/ciUtilities.inline.hpp"
  29 #include "classfile/vmIntrinsics.hpp"
  30 #include "compiler/compileBroker.hpp"
  31 #include "compiler/compileLog.hpp"
  32 #include "gc/shared/barrierSet.hpp"
  33 #include "jfr/support/jfrIntrinsics.hpp"
  34 #include "memory/resourceArea.hpp"
  35 #include "oops/klass.inline.hpp"
  36 #include "oops/objArrayKlass.hpp"
  37 #include "opto/addnode.hpp"
  38 #include "opto/arraycopynode.hpp"
  39 #include "opto/c2compiler.hpp"
  40 #include "opto/castnode.hpp"
  41 #include "opto/cfgnode.hpp"
  42 #include "opto/convertnode.hpp"
  43 #include "opto/countbitsnode.hpp"
  44 #include "opto/idealKit.hpp"
  45 #include "opto/library_call.hpp"
  46 #include "opto/mathexactnode.hpp"
  47 #include "opto/mulnode.hpp"

 308   case vmIntrinsics::_indexOfIL:                return inline_string_indexOfI(StrIntrinsicNode::LL);
 309   case vmIntrinsics::_indexOfIU:                return inline_string_indexOfI(StrIntrinsicNode::UU);
 310   case vmIntrinsics::_indexOfIUL:               return inline_string_indexOfI(StrIntrinsicNode::UL);
 311   case vmIntrinsics::_indexOfU_char:            return inline_string_indexOfChar(StrIntrinsicNode::U);
 312   case vmIntrinsics::_indexOfL_char:            return inline_string_indexOfChar(StrIntrinsicNode::L);
 313 
 314   case vmIntrinsics::_equalsL:                  return inline_string_equals(StrIntrinsicNode::LL);
 315 
 316   case vmIntrinsics::_vectorizedHashCode:       return inline_vectorizedHashCode();
 317 
 318   case vmIntrinsics::_toBytesStringU:           return inline_string_toBytesU();
 319   case vmIntrinsics::_getCharsStringU:          return inline_string_getCharsU();
 320   case vmIntrinsics::_getCharStringU:           return inline_string_char_access(!is_store);
 321   case vmIntrinsics::_putCharStringU:           return inline_string_char_access( is_store);
 322 
 323   case vmIntrinsics::_compressStringC:
 324   case vmIntrinsics::_compressStringB:          return inline_string_copy( is_compress);
 325   case vmIntrinsics::_inflateStringC:
 326   case vmIntrinsics::_inflateStringB:           return inline_string_copy(!is_compress);
 327 
 328   case vmIntrinsics::_makePrivateBuffer:        return inline_unsafe_make_private_buffer();
 329   case vmIntrinsics::_finishPrivateBuffer:      return inline_unsafe_finish_private_buffer();
 330   case vmIntrinsics::_getReference:             return inline_unsafe_access(!is_store, T_OBJECT,   Relaxed, false);
 331   case vmIntrinsics::_getBoolean:               return inline_unsafe_access(!is_store, T_BOOLEAN,  Relaxed, false);
 332   case vmIntrinsics::_getByte:                  return inline_unsafe_access(!is_store, T_BYTE,     Relaxed, false);
 333   case vmIntrinsics::_getShort:                 return inline_unsafe_access(!is_store, T_SHORT,    Relaxed, false);
 334   case vmIntrinsics::_getChar:                  return inline_unsafe_access(!is_store, T_CHAR,     Relaxed, false);
 335   case vmIntrinsics::_getInt:                   return inline_unsafe_access(!is_store, T_INT,      Relaxed, false);
 336   case vmIntrinsics::_getLong:                  return inline_unsafe_access(!is_store, T_LONG,     Relaxed, false);
 337   case vmIntrinsics::_getFloat:                 return inline_unsafe_access(!is_store, T_FLOAT,    Relaxed, false);
 338   case vmIntrinsics::_getDouble:                return inline_unsafe_access(!is_store, T_DOUBLE,   Relaxed, false);
 339   case vmIntrinsics::_getValue:                 return inline_unsafe_access(!is_store, T_OBJECT,   Relaxed, false, true);
 340 
 341   case vmIntrinsics::_putReference:             return inline_unsafe_access( is_store, T_OBJECT,   Relaxed, false);
 342   case vmIntrinsics::_putBoolean:               return inline_unsafe_access( is_store, T_BOOLEAN,  Relaxed, false);
 343   case vmIntrinsics::_putByte:                  return inline_unsafe_access( is_store, T_BYTE,     Relaxed, false);
 344   case vmIntrinsics::_putShort:                 return inline_unsafe_access( is_store, T_SHORT,    Relaxed, false);
 345   case vmIntrinsics::_putChar:                  return inline_unsafe_access( is_store, T_CHAR,     Relaxed, false);
 346   case vmIntrinsics::_putInt:                   return inline_unsafe_access( is_store, T_INT,      Relaxed, false);
 347   case vmIntrinsics::_putLong:                  return inline_unsafe_access( is_store, T_LONG,     Relaxed, false);
 348   case vmIntrinsics::_putFloat:                 return inline_unsafe_access( is_store, T_FLOAT,    Relaxed, false);
 349   case vmIntrinsics::_putDouble:                return inline_unsafe_access( is_store, T_DOUBLE,   Relaxed, false);
 350   case vmIntrinsics::_putValue:                 return inline_unsafe_access( is_store, T_OBJECT,   Relaxed, false, true);
 351 
 352   case vmIntrinsics::_getReferenceVolatile:     return inline_unsafe_access(!is_store, T_OBJECT,   Volatile, false);
 353   case vmIntrinsics::_getBooleanVolatile:       return inline_unsafe_access(!is_store, T_BOOLEAN,  Volatile, false);
 354   case vmIntrinsics::_getByteVolatile:          return inline_unsafe_access(!is_store, T_BYTE,     Volatile, false);
 355   case vmIntrinsics::_getShortVolatile:         return inline_unsafe_access(!is_store, T_SHORT,    Volatile, false);
 356   case vmIntrinsics::_getCharVolatile:          return inline_unsafe_access(!is_store, T_CHAR,     Volatile, false);
 357   case vmIntrinsics::_getIntVolatile:           return inline_unsafe_access(!is_store, T_INT,      Volatile, false);
 358   case vmIntrinsics::_getLongVolatile:          return inline_unsafe_access(!is_store, T_LONG,     Volatile, false);
 359   case vmIntrinsics::_getFloatVolatile:         return inline_unsafe_access(!is_store, T_FLOAT,    Volatile, false);
 360   case vmIntrinsics::_getDoubleVolatile:        return inline_unsafe_access(!is_store, T_DOUBLE,   Volatile, false);
 361 
 362   case vmIntrinsics::_putReferenceVolatile:     return inline_unsafe_access( is_store, T_OBJECT,   Volatile, false);
 363   case vmIntrinsics::_putBooleanVolatile:       return inline_unsafe_access( is_store, T_BOOLEAN,  Volatile, false);
 364   case vmIntrinsics::_putByteVolatile:          return inline_unsafe_access( is_store, T_BYTE,     Volatile, false);
 365   case vmIntrinsics::_putShortVolatile:         return inline_unsafe_access( is_store, T_SHORT,    Volatile, false);
 366   case vmIntrinsics::_putCharVolatile:          return inline_unsafe_access( is_store, T_CHAR,     Volatile, false);
 367   case vmIntrinsics::_putIntVolatile:           return inline_unsafe_access( is_store, T_INT,      Volatile, false);
 368   case vmIntrinsics::_putLongVolatile:          return inline_unsafe_access( is_store, T_LONG,     Volatile, false);
 369   case vmIntrinsics::_putFloatVolatile:         return inline_unsafe_access( is_store, T_FLOAT,    Volatile, false);
 370   case vmIntrinsics::_putDoubleVolatile:        return inline_unsafe_access( is_store, T_DOUBLE,   Volatile, false);

 495   case vmIntrinsics::_notifyJvmtiVThreadMount:   return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_mount()),
 496                                                                                          "notifyJvmtiMount", false, false);
 497   case vmIntrinsics::_notifyJvmtiVThreadUnmount: return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_unmount()),
 498                                                                                          "notifyJvmtiUnmount", false, false);
 499   case vmIntrinsics::_notifyJvmtiVThreadHideFrames:     return inline_native_notify_jvmti_hide();
 500   case vmIntrinsics::_notifyJvmtiVThreadDisableSuspend: return inline_native_notify_jvmti_sync();
 501 #endif
 502 
 503 #ifdef JFR_HAVE_INTRINSICS
 504   case vmIntrinsics::_counterTime:              return inline_native_time_funcs(CAST_FROM_FN_PTR(address, JfrTime::time_function()), "counterTime");
 505   case vmIntrinsics::_getEventWriter:           return inline_native_getEventWriter();
 506   case vmIntrinsics::_jvm_commit:               return inline_native_jvm_commit();
 507 #endif
 508   case vmIntrinsics::_currentTimeMillis:        return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
 509   case vmIntrinsics::_nanoTime:                 return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
 510   case vmIntrinsics::_writeback0:               return inline_unsafe_writeback0();
 511   case vmIntrinsics::_writebackPreSync0:        return inline_unsafe_writebackSync0(true);
 512   case vmIntrinsics::_writebackPostSync0:       return inline_unsafe_writebackSync0(false);
 513   case vmIntrinsics::_allocateInstance:         return inline_unsafe_allocate();
 514   case vmIntrinsics::_copyMemory:               return inline_unsafe_copyMemory();
 515   case vmIntrinsics::_isFlatArray:              return inline_unsafe_isFlatArray();
 516   case vmIntrinsics::_setMemory:                return inline_unsafe_setMemory();
 517   case vmIntrinsics::_getLength:                return inline_native_getLength();
 518   case vmIntrinsics::_copyOf:                   return inline_array_copyOf(false);
 519   case vmIntrinsics::_copyOfRange:              return inline_array_copyOf(true);
 520   case vmIntrinsics::_equalsB:                  return inline_array_equals(StrIntrinsicNode::LL);
 521   case vmIntrinsics::_equalsC:                  return inline_array_equals(StrIntrinsicNode::UU);
 522   case vmIntrinsics::_Preconditions_checkIndex: return inline_preconditions_checkIndex(T_INT);
 523   case vmIntrinsics::_Preconditions_checkLongIndex: return inline_preconditions_checkIndex(T_LONG);
 524   case vmIntrinsics::_clone:                    return inline_native_clone(intrinsic()->is_virtual());
 525 
 526   case vmIntrinsics::_allocateUninitializedArray: return inline_unsafe_newArray(true);
 527   case vmIntrinsics::_newArray:                   return inline_unsafe_newArray(false);
 528   case vmIntrinsics::_newNullRestrictedArray:   return inline_newNullRestrictedArray();
 529 
 530   case vmIntrinsics::_isAssignableFrom:         return inline_native_subtype_check();
 531 
 532   case vmIntrinsics::_isInstance:
 533   case vmIntrinsics::_getModifiers:
 534   case vmIntrinsics::_isInterface:
 535   case vmIntrinsics::_isArray:
 536   case vmIntrinsics::_isPrimitive:
 537   case vmIntrinsics::_isHidden:
 538   case vmIntrinsics::_getSuperclass:
 539   case vmIntrinsics::_getClassAccessFlags:      return inline_native_Class_query(intrinsic_id());
 540 
 541   case vmIntrinsics::_floatToRawIntBits:
 542   case vmIntrinsics::_floatToIntBits:
 543   case vmIntrinsics::_intBitsToFloat:
 544   case vmIntrinsics::_doubleToRawLongBits:
 545   case vmIntrinsics::_doubleToLongBits:
 546   case vmIntrinsics::_longBitsToDouble:
 547   case vmIntrinsics::_floatToFloat16:
 548   case vmIntrinsics::_float16ToFloat:           return inline_fp_conversions(intrinsic_id());

2229     case vmIntrinsics::_remainderUnsigned_l: {
2230       zero_check_long(argument(2));
2231       // Compile-time detect of null-exception
2232       if (stopped()) {
2233         return true; // keep the graph constructed so far
2234       }
2235       n = new UModLNode(control(), argument(0), argument(2));
2236       break;
2237     }
2238     default:  fatal_unexpected_iid(id);  break;
2239   }
2240   set_result(_gvn.transform(n));
2241   return true;
2242 }
2243 
2244 //----------------------------inline_unsafe_access----------------------------
2245 
2246 const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type) {
2247   // Attempt to infer a sharper value type from the offset and base type.
2248   ciKlass* sharpened_klass = nullptr;
2249   bool null_free = false;
2250 
2251   // See if it is an instance field, with an object type.
2252   if (alias_type->field() != nullptr) {
2253     if (alias_type->field()->type()->is_klass()) {
2254       sharpened_klass = alias_type->field()->type()->as_klass();
2255       null_free = alias_type->field()->is_null_free();
2256     }
2257   }
2258 
2259   const TypeOopPtr* result = nullptr;
2260   // See if it is a narrow oop array.
2261   if (adr_type->isa_aryptr()) {
2262     if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
2263       const TypeOopPtr* elem_type = adr_type->is_aryptr()->elem()->make_oopptr();
2264       null_free = adr_type->is_aryptr()->is_null_free();
2265       if (elem_type != nullptr && elem_type->is_loaded()) {
2266         // Sharpen the value type.
2267         result = elem_type;
2268       }
2269     }
2270   }
2271 
2272   // The sharpened class might be unloaded if there is no class loader
2273   // contraint in place.
2274   if (result == nullptr && sharpened_klass != nullptr && sharpened_klass->is_loaded()) {
2275     // Sharpen the value type.
2276     result = TypeOopPtr::make_from_klass(sharpened_klass);
2277     if (null_free) {
2278       result = result->join_speculative(TypePtr::NOTNULL)->is_oopptr();
2279     }
2280   }
2281   if (result != nullptr) {
2282 #ifndef PRODUCT
2283     if (C->print_intrinsics() || C->print_inlining()) {
2284       tty->print("  from base type:  ");  adr_type->dump(); tty->cr();
2285       tty->print("  sharpened value: ");  result->dump();    tty->cr();
2286     }
2287 #endif
2288   }
2289   return result;
2290 }
2291 
2292 DecoratorSet LibraryCallKit::mo_decorator_for_access_kind(AccessKind kind) {
2293   switch (kind) {
2294       case Relaxed:
2295         return MO_UNORDERED;
2296       case Opaque:
2297         return MO_RELAXED;
2298       case Acquire:
2299         return MO_ACQUIRE;
2300       case Release:
2301         return MO_RELEASE;
2302       case Volatile:
2303         return MO_SEQ_CST;
2304       default:
2305         ShouldNotReachHere();
2306         return 0;
2307   }
2308 }
2309 
2310 bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, const AccessKind kind, const bool unaligned, const bool is_flat) {
2311   if (callee()->is_static())  return false;  // caller must have the capability!
2312   DecoratorSet decorators = C2_UNSAFE_ACCESS;
2313   guarantee(!is_store || kind != Acquire, "Acquire accesses can be produced only for loads");
2314   guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
2315   assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
2316 
2317   if (is_reference_type(type)) {
2318     decorators |= ON_UNKNOWN_OOP_REF;
2319   }
2320 
2321   if (unaligned) {
2322     decorators |= C2_UNALIGNED;
2323   }
2324 
2325 #ifndef PRODUCT
2326   {
2327     ResourceMark rm;
2328     // Check the signatures.
2329     ciSignature* sig = callee()->signature();
2330 #ifdef ASSERT
2331     if (!is_store) {
2332       // Object getReference(Object base, int/long offset), etc.
2333       BasicType rtype = sig->return_type()->basic_type();
2334       assert(rtype == type, "getter must return the expected value");
2335       assert(sig->count() == 2 || (is_flat && sig->count() == 3), "oop getter has 2 or 3 arguments");
2336       assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2337       assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2338     } else {
2339       // void putReference(Object base, int/long offset, Object x), etc.
2340       assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
2341       assert(sig->count() == 3 || (is_flat && sig->count() == 4), "oop putter has 3 arguments");
2342       assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
2343       assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
2344       BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
2345       assert(vtype == type, "putter must accept the expected value");
2346     }
2347 #endif // ASSERT
2348  }
2349 #endif //PRODUCT
2350 
2351   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
2352 
2353   Node* receiver = argument(0);  // type: oop
2354 
2355   // Build address expression.
2356   Node* heap_base_oop = top();
2357 
2358   // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2359   Node* base = argument(1);  // type: oop
2360   // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2361   Node* offset = argument(2);  // type: long
2362   // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2363   // to be plain byte offsets, which are also the same as those accepted
2364   // by oopDesc::field_addr.
2365   assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2366          "fieldOffset must be byte-scaled");
2367 
2368   ciInlineKlass* inline_klass = nullptr;
2369   if (is_flat) {
2370     const TypeInstPtr* cls = _gvn.type(argument(4))->isa_instptr();
2371     if (cls == nullptr || cls->const_oop() == nullptr) {
2372       return false;
2373     }
2374     ciType* mirror_type = cls->const_oop()->as_instance()->java_mirror_type();
2375     if (!mirror_type->is_inlinetype()) {
2376       return false;
2377     }
2378     inline_klass = mirror_type->as_inline_klass();
2379   }
2380 
2381   if (base->is_InlineType()) {
2382     InlineTypeNode* vt = base->as_InlineType();
2383     if (is_store) {
2384       if (!vt->is_allocated(&_gvn)) {
2385         return false;
2386       }
2387       base = vt->get_oop();
2388     } else {
2389       if (offset->is_Con()) {
2390         long off = find_long_con(offset, 0);
2391         ciInlineKlass* vk = vt->type()->inline_klass();
2392         if ((long)(int)off != off || !vk->contains_field_offset(off)) {
2393           return false;
2394         }
2395 
2396         ciField* field = vk->get_non_flat_field_by_offset(off);
2397         if (field != nullptr) {
2398           BasicType bt = type2field[field->type()->basic_type()];
2399           if (bt == T_ARRAY || bt == T_NARROWOOP) {
2400             bt = T_OBJECT;
2401           }
2402           if (bt == type && (!field->is_flat() || field->type() == inline_klass)) {
2403             Node* value = vt->field_value_by_offset(off, false);
2404             if (value->is_InlineType()) {
2405               value = value->as_InlineType()->adjust_scalarization_depth(this);
2406             }
2407             set_result(value);
2408             return true;
2409           }
2410         }
2411       }
2412       {
2413         // Re-execute the unsafe access if allocation triggers deoptimization.
2414         PreserveReexecuteState preexecs(this);
2415         jvms()->set_should_reexecute(true);
2416         vt = vt->buffer(this);
2417       }
2418       base = vt->get_oop();
2419     }
2420   }
2421 
2422   // 32-bit machines ignore the high half!
2423   offset = ConvL2X(offset);
2424 
2425   // Save state and restore on bailout
2426   uint old_sp = sp();
2427   SafePointNode* old_map = clone_map();
2428 
2429   Node* adr = make_unsafe_address(base, offset, type, kind == Relaxed);
2430 
2431   if (_gvn.type(base)->isa_ptr() == TypePtr::NULL_PTR) {
2432     if (type != T_OBJECT && (inline_klass == nullptr || !inline_klass->has_object_fields())) {
2433       decorators |= IN_NATIVE; // off-heap primitive access
2434     } else {
2435       set_map(old_map);
2436       set_sp(old_sp);
2437       return false; // off-heap oop accesses are not supported
2438     }
2439   } else {
2440     heap_base_oop = base; // on-heap or mixed access
2441   }
2442 
2443   // Can base be null? Otherwise, always on-heap access.
2444   bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(base));
2445 
2446   if (!can_access_non_heap) {
2447     decorators |= IN_HEAP;
2448   }
2449 
2450   Node* val = is_store ? argument(4 + (is_flat ? 1 : 0)) : nullptr;
2451 
2452   const TypePtr* adr_type = _gvn.type(adr)->isa_ptr();
2453   if (adr_type == TypePtr::NULL_PTR) {
2454     set_map(old_map);
2455     set_sp(old_sp);
2456     return false; // off-heap access with zero address
2457   }
2458 
2459   // Try to categorize the address.
2460   Compile::AliasType* alias_type = C->alias_type(adr_type);
2461   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2462 
2463   if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2464       alias_type->adr_type() == TypeAryPtr::RANGE) {
2465     set_map(old_map);
2466     set_sp(old_sp);
2467     return false; // not supported
2468   }
2469 
2470   bool mismatched = false;
2471   BasicType bt = T_ILLEGAL;
2472   ciField* field = nullptr;
2473   if (adr_type->isa_instptr()) {
2474     const TypeInstPtr* instptr = adr_type->is_instptr();
2475     ciInstanceKlass* k = instptr->instance_klass();
2476     int off = instptr->offset();
2477     if (instptr->const_oop() != nullptr &&
2478         k == ciEnv::current()->Class_klass() &&
2479         instptr->offset() >= (k->size_helper() * wordSize)) {
2480       k = instptr->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
2481       field = k->get_field_by_offset(off, true);
2482     } else {
2483       field = k->get_non_flat_field_by_offset(off);
2484     }
2485     if (field != nullptr) {
2486       bt = type2field[field->type()->basic_type()];
2487     }
2488     assert(bt == alias_type->basic_type() || is_flat, "should match");
2489   } else {
2490     bt = alias_type->basic_type();
2491   }
2492 
2493   if (bt != T_ILLEGAL) {
2494     assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
2495     if (bt == T_BYTE && adr_type->isa_aryptr()) {
2496       // Alias type doesn't differentiate between byte[] and boolean[]).
2497       // Use address type to get the element type.
2498       bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2499     }
2500     if (is_reference_type(bt, true)) {
2501       // accessing an array field with getReference is not a mismatch
2502       bt = T_OBJECT;
2503     }
2504     if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2505       // Don't intrinsify mismatched object accesses
2506       set_map(old_map);
2507       set_sp(old_sp);
2508       return false;
2509     }
2510     mismatched = (bt != type);
2511   } else if (alias_type->adr_type()->isa_oopptr()) {
2512     mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
2513   }
2514 
2515   if (is_flat) {
2516     if (adr_type->isa_instptr()) {
2517       if (field == nullptr || field->type() != inline_klass) {
2518         mismatched = true;
2519       }
2520     } else if (adr_type->isa_aryptr()) {
2521       const Type* elem = adr_type->is_aryptr()->elem();
2522       if (!adr_type->is_flat() || elem->inline_klass() != inline_klass) {
2523         mismatched = true;
2524       }
2525     } else {
2526       mismatched = true;
2527     }
2528     if (is_store) {
2529       const Type* val_t = _gvn.type(val);
2530       if (!val_t->is_inlinetypeptr() || val_t->inline_klass() != inline_klass) {
2531         set_map(old_map);
2532         set_sp(old_sp);
2533         return false;
2534       }
2535     }
2536   }
2537 
2538   destruct_map_clone(old_map);
2539   assert(!mismatched || is_flat || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2540 
2541   if (mismatched) {
2542     decorators |= C2_MISMATCHED;
2543   }
2544 
2545   // First guess at the value type.
2546   const Type *value_type = Type::get_const_basic_type(type);
2547 
2548   // Figure out the memory ordering.
2549   decorators |= mo_decorator_for_access_kind(kind);
2550 
2551   if (!is_store) {
2552     if (type == T_OBJECT && !is_flat) {
2553       const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2554       if (tjp != nullptr) {
2555         value_type = tjp;
2556       }
2557     }
2558   }
2559 
2560   receiver = null_check(receiver);
2561   if (stopped()) {
2562     return true;
2563   }
2564   // Heap pointers get a null-check from the interpreter,
2565   // as a courtesy.  However, this is not guaranteed by Unsafe,
2566   // and it is not possible to fully distinguish unintended nulls
2567   // from intended ones in this API.
2568 
2569   if (!is_store) {
2570     Node* p = nullptr;
2571     // Try to constant fold a load from a constant field
2572 
2573     if (heap_base_oop != top() && field != nullptr && field->is_constant() && !field->is_flat() && !mismatched) {
2574       // final or stable field
2575       p = make_constant_from_field(field, heap_base_oop);
2576     }
2577 
2578     if (p == nullptr) { // Could not constant fold the load
2579       if (is_flat) {
2580         if (adr_type->isa_instptr() && !mismatched) {
2581           ciInstanceKlass* holder = adr_type->is_instptr()->instance_klass();
2582           int offset = adr_type->is_instptr()->offset();
2583           p = InlineTypeNode::make_from_flat(this, inline_klass, base, base, holder, offset, decorators);
2584         } else {
2585           p = InlineTypeNode::make_from_flat(this, inline_klass, base, adr, nullptr, 0, decorators);
2586         }
2587       } else {
2588         p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);
2589         const TypeOopPtr* ptr = value_type->make_oopptr();
2590         if (ptr != nullptr && ptr->is_inlinetypeptr()) {
2591           // Load a non-flattened inline type from memory
2592           p = InlineTypeNode::make_from_oop(this, p, ptr->inline_klass(), !ptr->maybe_null());
2593         }
2594       }
2595       // Normalize the value returned by getBoolean in the following cases
2596       if (type == T_BOOLEAN &&
2597           (mismatched ||
2598            heap_base_oop == top() ||                  // - heap_base_oop is null or
2599            (can_access_non_heap && field == nullptr)) // - heap_base_oop is potentially null
2600                                                       //   and the unsafe access is made to large offset
2601                                                       //   (i.e., larger than the maximum offset necessary for any
2602                                                       //   field access)
2603             ) {
2604           IdealKit ideal = IdealKit(this);
2605 #define __ ideal.
2606           IdealVariable normalized_result(ideal);
2607           __ declarations_done();
2608           __ set(normalized_result, p);
2609           __ if_then(p, BoolTest::ne, ideal.ConI(0));
2610           __ set(normalized_result, ideal.ConI(1));
2611           ideal.end_if();
2612           final_sync(ideal);
2613           p = __ value(normalized_result);
2614 #undef __
2615       }
2616     }
2617     if (type == T_ADDRESS) {
2618       p = gvn().transform(new CastP2XNode(nullptr, p));
2619       p = ConvX2UL(p);
2620     }
2621     // The load node has the control of the preceding MemBarCPUOrder.  All
2622     // following nodes will have the control of the MemBarCPUOrder inserted at
2623     // the end of this method.  So, pushing the load onto the stack at a later
2624     // point is fine.
2625     set_result(p);
2626   } else {
2627     if (bt == T_ADDRESS) {
2628       // Repackage the long as a pointer.
2629       val = ConvL2X(val);
2630       val = gvn().transform(new CastX2PNode(val));
2631     }
2632     if (is_flat) {
2633       if (adr_type->isa_instptr() && !mismatched) {
2634         ciInstanceKlass* holder = adr_type->is_instptr()->instance_klass();
2635         int offset = adr_type->is_instptr()->offset();
2636         val->as_InlineType()->store_flat(this, base, base, holder, offset, decorators);
2637       } else {
2638         val->as_InlineType()->store_flat(this, base, adr, nullptr, 0, decorators);
2639       }
2640     } else {
2641       access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators);
2642     }
2643   }
2644 
2645   if (argument(1)->is_InlineType() && is_store) {
2646     InlineTypeNode* value = InlineTypeNode::make_from_oop(this, base, _gvn.type(argument(1))->inline_klass());
2647     value = value->make_larval(this, false);
2648     replace_in_map(argument(1), value);
2649   }
2650 
2651   return true;
2652 }
2653 
2654 bool LibraryCallKit::inline_unsafe_make_private_buffer() {
2655   Node* receiver = argument(0);
2656   Node* value = argument(1);
2657   if (!value->is_InlineType()) {
2658     return false;
2659   }
2660 
2661   receiver = null_check(receiver);
2662   if (stopped()) {
2663     return true;
2664   }
2665 
2666   set_result(value->as_InlineType()->make_larval(this, true));
2667   return true;
2668 }
2669 
2670 bool LibraryCallKit::inline_unsafe_finish_private_buffer() {
2671   Node* receiver = argument(0);
2672   Node* buffer = argument(1);
2673   if (!buffer->is_InlineType()) {
2674     return false;
2675   }
2676   InlineTypeNode* vt = buffer->as_InlineType();
2677   if (!vt->is_allocated(&_gvn)) {
2678     return false;
2679   }
2680   // TODO 8239003 Why is this needed?
2681   if (AllocateNode::Ideal_allocation(vt->get_oop()) == nullptr) {
2682     return false;
2683   }
2684 
2685   receiver = null_check(receiver);
2686   if (stopped()) {
2687     return true;
2688   }
2689 
2690   set_result(vt->finish_larval(this));
2691   return true;
2692 }
2693 
2694 //----------------------------inline_unsafe_load_store----------------------------
2695 // This method serves a couple of different customers (depending on LoadStoreKind):
2696 //
2697 // LS_cmp_swap:
2698 //
2699 //   boolean compareAndSetReference(Object o, long offset, Object expected, Object x);
2700 //   boolean compareAndSetInt(   Object o, long offset, int    expected, int    x);
2701 //   boolean compareAndSetLong(  Object o, long offset, long   expected, long   x);
2702 //
2703 // LS_cmp_swap_weak:
2704 //
2705 //   boolean weakCompareAndSetReference(       Object o, long offset, Object expected, Object x);
2706 //   boolean weakCompareAndSetReferencePlain(  Object o, long offset, Object expected, Object x);
2707 //   boolean weakCompareAndSetReferenceAcquire(Object o, long offset, Object expected, Object x);
2708 //   boolean weakCompareAndSetReferenceRelease(Object o, long offset, Object expected, Object x);
2709 //
2710 //   boolean weakCompareAndSetInt(          Object o, long offset, int    expected, int    x);
2711 //   boolean weakCompareAndSetIntPlain(     Object o, long offset, int    expected, int    x);
2712 //   boolean weakCompareAndSetIntAcquire(   Object o, long offset, int    expected, int    x);
2713 //   boolean weakCompareAndSetIntRelease(   Object o, long offset, int    expected, int    x);

2879     }
2880     case LS_cmp_swap:
2881     case LS_cmp_swap_weak:
2882     case LS_get_add:
2883       break;
2884     default:
2885       ShouldNotReachHere();
2886   }
2887 
2888   // Null check receiver.
2889   receiver = null_check(receiver);
2890   if (stopped()) {
2891     return true;
2892   }
2893 
2894   int alias_idx = C->get_alias_index(adr_type);
2895 
2896   if (is_reference_type(type)) {
2897     decorators |= IN_HEAP | ON_UNKNOWN_OOP_REF;
2898 
2899     if (oldval != nullptr && oldval->is_InlineType()) {
2900       // Re-execute the unsafe access if allocation triggers deoptimization.
2901       PreserveReexecuteState preexecs(this);
2902       jvms()->set_should_reexecute(true);
2903       oldval = oldval->as_InlineType()->buffer(this)->get_oop();
2904     }
2905     if (newval != nullptr && newval->is_InlineType()) {
2906       // Re-execute the unsafe access if allocation triggers deoptimization.
2907       PreserveReexecuteState preexecs(this);
2908       jvms()->set_should_reexecute(true);
2909       newval = newval->as_InlineType()->buffer(this)->get_oop();
2910     }
2911 
2912     // Transformation of a value which could be null pointer (CastPP #null)
2913     // could be delayed during Parse (for example, in adjust_map_after_if()).
2914     // Execute transformation here to avoid barrier generation in such case.
2915     if (_gvn.type(newval) == TypePtr::NULL_PTR)
2916       newval = _gvn.makecon(TypePtr::NULL_PTR);
2917 
2918     if (oldval != nullptr && _gvn.type(oldval) == TypePtr::NULL_PTR) {
2919       // Refine the value to a null constant, when it is known to be null
2920       oldval = _gvn.makecon(TypePtr::NULL_PTR);
2921     }
2922   }
2923 
2924   Node* result = nullptr;
2925   switch (kind) {
2926     case LS_cmp_exchange: {
2927       result = access_atomic_cmpxchg_val_at(base, adr, adr_type, alias_idx,
2928                                             oldval, newval, value_type, type, decorators);
2929       break;
2930     }
2931     case LS_cmp_swap_weak:

3078                     Deoptimization::Action_make_not_entrant);
3079     }
3080     if (stopped()) {
3081       return true;
3082     }
3083 #endif //INCLUDE_JVMTI
3084 
3085   Node* test = nullptr;
3086   if (LibraryCallKit::klass_needs_init_guard(kls)) {
3087     // Note:  The argument might still be an illegal value like
3088     // Serializable.class or Object[].class.   The runtime will handle it.
3089     // But we must make an explicit check for initialization.
3090     Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset()));
3091     // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
3092     // can generate code to load it as unsigned byte.
3093     Node* inst = make_load(nullptr, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered);
3094     Node* bits = intcon(InstanceKlass::fully_initialized);
3095     test = _gvn.transform(new SubINode(inst, bits));
3096     // The 'test' is non-zero if we need to take a slow path.
3097   }
3098   Node* obj = nullptr;
3099   const TypeInstKlassPtr* tkls = _gvn.type(kls)->isa_instklassptr();
3100   if (tkls != nullptr && tkls->instance_klass()->is_inlinetype()) {
3101     obj = InlineTypeNode::make_default(_gvn, tkls->instance_klass()->as_inline_klass())->buffer(this);
3102   } else {
3103     obj = new_instance(kls, test);
3104   }
3105   set_result(obj);
3106   return true;
3107 }
3108 
3109 //------------------------inline_native_time_funcs--------------
3110 // inline code for System.currentTimeMillis() and System.nanoTime()
3111 // these have the same type and signature
3112 bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
3113   const TypeFunc* tf = OptoRuntime::void_long_Type();
3114   const TypePtr* no_memory_effects = nullptr;
3115   Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
3116   Node* value = _gvn.transform(new ProjNode(time, TypeFunc::Parms+0));
3117 #ifdef ASSERT
3118   Node* value_top = _gvn.transform(new ProjNode(time, TypeFunc::Parms+1));
3119   assert(value_top == top(), "second value must be top");
3120 #endif
3121   set_result(value);
3122   return true;
3123 }
3124 

3860 
3861 //------------------------inline_native_setVthread------------------
3862 bool LibraryCallKit::inline_native_setCurrentThread() {
3863   assert(C->method()->changes_current_thread(),
3864          "method changes current Thread but is not annotated ChangesCurrentThread");
3865   Node* arr = argument(1);
3866   Node* thread = _gvn.transform(new ThreadLocalNode());
3867   Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::vthread_offset()));
3868   Node* thread_obj_handle
3869     = make_load(nullptr, p, p->bottom_type()->is_ptr(), T_OBJECT, MemNode::unordered);
3870   thread_obj_handle = _gvn.transform(thread_obj_handle);
3871   const TypePtr *adr_type = _gvn.type(thread_obj_handle)->isa_ptr();
3872   access_store_at(nullptr, thread_obj_handle, adr_type, arr, _gvn.type(arr), T_OBJECT, IN_NATIVE | MO_UNORDERED);
3873   JFR_ONLY(extend_setCurrentThread(thread, arr);)
3874   return true;
3875 }
3876 
3877 const Type* LibraryCallKit::scopedValueCache_type() {
3878   ciKlass* objects_klass = ciObjArrayKlass::make(env()->Object_klass());
3879   const TypeOopPtr* etype = TypeOopPtr::make_from_klass(env()->Object_klass());
3880   const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS, /* stable= */ false, /* flat= */ false, /* not_flat= */ true, /* not_null_free= */ true);
3881 
3882   // Because we create the scopedValue cache lazily we have to make the
3883   // type of the result BotPTR.
3884   bool xk = etype->klass_is_exact();
3885   const Type* objects_type = TypeAryPtr::make(TypePtr::BotPTR, arr0, objects_klass, xk, TypeAryPtr::Offset(0));
3886   return objects_type;
3887 }
3888 
3889 Node* LibraryCallKit::scopedValueCache_helper() {
3890   Node* thread = _gvn.transform(new ThreadLocalNode());
3891   Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::scopedValueCache_offset()));
3892   // We cannot use immutable_memory() because we might flip onto a
3893   // different carrier thread, at which point we'll need to use that
3894   // carrier thread's cache.
3895   // return _gvn.transform(LoadNode::make(_gvn, nullptr, immutable_memory(), p, p->bottom_type()->is_ptr(),
3896   //       TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered));
3897   return make_load(nullptr, p, p->bottom_type()->is_ptr(), T_ADDRESS, MemNode::unordered);
3898 }
3899 
3900 //------------------------inline_native_scopedValueCache------------------
3901 bool LibraryCallKit::inline_native_scopedValueCache() {
3902   Node* cache_obj_handle = scopedValueCache_helper();
3903   const Type* objects_type = scopedValueCache_type();
3904   set_result(access_load(cache_obj_handle, objects_type, T_OBJECT, IN_NATIVE));
3905 
3906   return true;
3907 }
3908 
3909 //------------------------inline_native_setScopedValueCache------------------
3910 bool LibraryCallKit::inline_native_setScopedValueCache() {
3911   Node* arr = argument(0);
3912   Node* cache_obj_handle = scopedValueCache_helper();
3913   const Type* objects_type = scopedValueCache_type();
3914 
3915   const TypePtr *adr_type = _gvn.type(cache_obj_handle)->isa_ptr();
3916   access_store_at(nullptr, cache_obj_handle, adr_type, arr, objects_type, T_OBJECT, IN_NATIVE | MO_UNORDERED);
3917 
3918   return true;
3919 }
3920 









3921 //-----------------------load_klass_from_mirror_common-------------------------
3922 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
3923 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
3924 // and branch to the given path on the region.
3925 // If never_see_null, take an uncommon trap on null, so we can optimistically
3926 // compile for the non-null case.
3927 // If the region is null, force never_see_null = true.
3928 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
3929                                                     bool never_see_null,
3930                                                     RegionNode* region,
3931                                                     int null_path,
3932                                                     int offset) {
3933   if (region == nullptr)  never_see_null = true;
3934   Node* p = basic_plus_adr(mirror, offset);
3935   const TypeKlassPtr*  kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
3936   Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
3937   Node* null_ctl = top();
3938   kls = null_check_oop(kls, &null_ctl, never_see_null);
3939   if (region != nullptr) {
3940     // Set region->in(null_path) if the mirror is a primitive (e.g, int.class).

3943     assert(null_ctl == top(), "no loose ends");
3944   }
3945   return kls;
3946 }
3947 
3948 //--------------------(inline_native_Class_query helpers)---------------------
3949 // Use this for JVM_ACC_INTERFACE, JVM_ACC_IS_CLONEABLE_FAST, JVM_ACC_HAS_FINALIZER.
3950 // Fall through if (mods & mask) == bits, take the guard otherwise.
3951 Node* LibraryCallKit::generate_access_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
3952   // Branch around if the given klass has the given modifier bit set.
3953   // Like generate_guard, adds a new path onto the region.
3954   Node* modp = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3955   Node* mods = make_load(nullptr, modp, TypeInt::INT, T_INT, MemNode::unordered);
3956   Node* mask = intcon(modifier_mask);
3957   Node* bits = intcon(modifier_bits);
3958   Node* mbit = _gvn.transform(new AndINode(mods, mask));
3959   Node* cmp  = _gvn.transform(new CmpINode(mbit, bits));
3960   Node* bol  = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
3961   return generate_fair_guard(bol, region);
3962 }
3963 
3964 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
3965   return generate_access_flags_guard(kls, JVM_ACC_INTERFACE, 0, region);
3966 }
3967 Node* LibraryCallKit::generate_hidden_class_guard(Node* kls, RegionNode* region) {
3968   return generate_access_flags_guard(kls, JVM_ACC_IS_HIDDEN_CLASS, 0, region);
3969 }
3970 
3971 //-------------------------inline_native_Class_query-------------------
3972 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
3973   const Type* return_type = TypeInt::BOOL;
3974   Node* prim_return_value = top();  // what happens if it's a primitive class?
3975   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3976   bool expect_prim = false;     // most of these guys expect to work on refs
3977 
3978   enum { _normal_path = 1, _prim_path = 2, PATH_LIMIT };
3979 
3980   Node* mirror = argument(0);
3981   Node* obj    = top();
3982 
3983   switch (id) {

4137 
4138   case vmIntrinsics::_getClassAccessFlags:
4139     p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
4140     query_value = make_load(nullptr, p, TypeInt::INT, T_INT, MemNode::unordered);
4141     break;
4142 
4143   default:
4144     fatal_unexpected_iid(id);
4145     break;
4146   }
4147 
4148   // Fall-through is the normal case of a query to a real class.
4149   phi->init_req(1, query_value);
4150   region->init_req(1, control());
4151 
4152   C->set_has_split_ifs(true); // Has chance for split-if optimization
4153   set_result(region, phi);
4154   return true;
4155 }
4156 
4157 
4158 //-------------------------inline_Class_cast-------------------
4159 bool LibraryCallKit::inline_Class_cast() {
4160   Node* mirror = argument(0); // Class
4161   Node* obj    = argument(1);
4162   const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
4163   if (mirror_con == nullptr) {
4164     return false;  // dead path (mirror->is_top()).
4165   }
4166   if (obj == nullptr || obj->is_top()) {
4167     return false;  // dead path
4168   }
4169   const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr();
4170 
4171   // First, see if Class.cast() can be folded statically.
4172   // java_mirror_type() returns non-null for compile-time Class constants.
4173   bool is_null_free_array = false;
4174   ciType* tm = mirror_con->java_mirror_type(&is_null_free_array);
4175   if (tm != nullptr && tm->is_klass() &&
4176       tp != nullptr) {
4177     if (!tp->is_loaded()) {
4178       // Don't use intrinsic when class is not loaded.
4179       return false;
4180     } else {
4181       const TypeKlassPtr* tklass = TypeKlassPtr::make(tm->as_klass(), Type::trust_interfaces);
4182       if (is_null_free_array) {
4183         tklass = tklass->is_aryklassptr()->cast_to_null_free();
4184       }
4185       int static_res = C->static_subtype_check(tklass, tp->as_klass_type());
4186       if (static_res == Compile::SSC_always_true) {
4187         // isInstance() is true - fold the code.
4188         set_result(obj);
4189         return true;
4190       } else if (static_res == Compile::SSC_always_false) {
4191         // Don't use intrinsic, have to throw ClassCastException.
4192         // If the reference is null, the non-intrinsic bytecode will
4193         // be optimized appropriately.
4194         return false;
4195       }
4196     }
4197   }
4198 
4199   // Bailout intrinsic and do normal inlining if exception path is frequent.
4200   if (too_many_traps(Deoptimization::Reason_intrinsic)) {
4201     return false;
4202   }
4203 
4204   // Generate dynamic checks.
4205   // Class.cast() is java implementation of _checkcast bytecode.
4206   // Do checkcast (Parse::do_checkcast()) optimizations here.
4207 
4208   mirror = null_check(mirror);
4209   // If mirror is dead, only null-path is taken.
4210   if (stopped()) {
4211     return true;
4212   }
4213 
4214   // Not-subtype or the mirror's klass ptr is nullptr (in case it is a primitive).
4215   enum { _bad_type_path = 1, _prim_path = 2, _npe_path = 3, PATH_LIMIT };
4216   RegionNode* region = new RegionNode(PATH_LIMIT);
4217   record_for_igvn(region);
4218 
4219   // Now load the mirror's klass metaobject, and null-check it.
4220   // If kls is null, we have a primitive mirror and
4221   // nothing is an instance of a primitive type.
4222   Node* kls = load_klass_from_mirror(mirror, false, region, _prim_path);
4223 
4224   Node* res = top();
4225   Node* io = i_o();
4226   Node* mem = merged_memory();
4227   if (!stopped()) {
4228 
4229     Node* bad_type_ctrl = top();
4230     // Do checkcast optimizations.
4231     res = gen_checkcast(obj, kls, &bad_type_ctrl);
4232     region->init_req(_bad_type_path, bad_type_ctrl);
4233   }
4234   if (region->in(_prim_path) != top() ||
4235       region->in(_bad_type_path) != top() ||
4236       region->in(_npe_path) != top()) {
4237     // Let Interpreter throw ClassCastException.
4238     PreserveJVMState pjvms(this);
4239     set_control(_gvn.transform(region));
4240     // Set IO and memory because gen_checkcast may override them when buffering inline types
4241     set_i_o(io);
4242     set_all_memory(mem);
4243     uncommon_trap(Deoptimization::Reason_intrinsic,
4244                   Deoptimization::Action_maybe_recompile);
4245   }
4246   if (!stopped()) {
4247     set_result(res);
4248   }
4249   return true;
4250 }
4251 
4252 
4253 //--------------------------inline_native_subtype_check------------------------
4254 // This intrinsic takes the JNI calls out of the heart of
4255 // UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
4256 bool LibraryCallKit::inline_native_subtype_check() {
4257   // Pull both arguments off the stack.
4258   Node* args[2];                // two java.lang.Class mirrors: superc, subc
4259   args[0] = argument(0);
4260   args[1] = argument(1);
4261   Node* klasses[2];             // corresponding Klasses: superk, subk
4262   klasses[0] = klasses[1] = top();
4263 
4264   enum {
4265     // A full decision tree on {superc is prim, subc is prim}:
4266     _prim_0_path = 1,           // {P,N} => false
4267                                 // {P,P} & superc!=subc => false
4268     _prim_same_path,            // {P,P} & superc==subc => true
4269     _prim_1_path,               // {N,P} => false
4270     _ref_subtype_path,          // {N,N} & subtype check wins => true
4271     _both_ref_path,             // {N,N} & subtype check loses => false
4272     PATH_LIMIT
4273   };
4274 
4275   RegionNode* region = new RegionNode(PATH_LIMIT);
4276   RegionNode* prim_region = new RegionNode(2);
4277   Node*       phi    = new PhiNode(region, TypeInt::BOOL);
4278   record_for_igvn(region);
4279   record_for_igvn(prim_region);
4280 
4281   const TypePtr* adr_type = TypeRawPtr::BOTTOM;   // memory type of loads
4282   const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
4283   int class_klass_offset = java_lang_Class::klass_offset();
4284 
4285   // First null-check both mirrors and load each mirror's klass metaobject.
4286   int which_arg;
4287   for (which_arg = 0; which_arg <= 1; which_arg++) {
4288     Node* arg = args[which_arg];
4289     arg = null_check(arg);
4290     if (stopped())  break;
4291     args[which_arg] = arg;
4292 
4293     Node* p = basic_plus_adr(arg, class_klass_offset);
4294     Node* kls = LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, adr_type, kls_type);
4295     klasses[which_arg] = _gvn.transform(kls);
4296   }
4297 
4298   // Having loaded both klasses, test each for null.
4299   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
4300   for (which_arg = 0; which_arg <= 1; which_arg++) {
4301     Node* kls = klasses[which_arg];
4302     Node* null_ctl = top();
4303     kls = null_check_oop(kls, &null_ctl, never_see_null);
4304     if (which_arg == 0) {
4305       prim_region->init_req(1, null_ctl);
4306     } else {
4307       region->init_req(_prim_1_path, null_ctl);
4308     }
4309     if (stopped())  break;
4310     klasses[which_arg] = kls;
4311   }
4312 
4313   if (!stopped()) {
4314     // now we have two reference types, in klasses[0..1]
4315     Node* subk   = klasses[1];  // the argument to isAssignableFrom
4316     Node* superk = klasses[0];  // the receiver
4317     region->set_req(_both_ref_path, gen_subtype_check(subk, superk));

4318     region->set_req(_ref_subtype_path, control());
4319   }
4320 
4321   // If both operands are primitive (both klasses null), then
4322   // we must return true when they are identical primitives.
4323   // It is convenient to test this after the first null klass check.
4324   // This path is also used if superc is a value mirror.
4325   set_control(_gvn.transform(prim_region));
4326   if (!stopped()) {
4327     // Since superc is primitive, make a guard for the superc==subc case.
4328     Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1]));
4329     Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq));
4330     generate_fair_guard(bol_eq, region);
4331     if (region->req() == PATH_LIMIT+1) {
4332       // A guard was added.  If the added guard is taken, superc==subc.
4333       region->swap_edges(PATH_LIMIT, _prim_same_path);
4334       region->del_req(PATH_LIMIT);
4335     }
4336     region->set_req(_prim_0_path, control()); // Not equal after all.
4337   }
4338 
4339   // these are the only paths that produce 'true':
4340   phi->set_req(_prim_same_path,   intcon(1));
4341   phi->set_req(_ref_subtype_path, intcon(1));
4342 
4343   // pull together the cases:
4344   assert(region->req() == PATH_LIMIT, "sane region");
4345   for (uint i = 1; i < region->req(); i++) {
4346     Node* ctl = region->in(i);
4347     if (ctl == nullptr || ctl == top()) {
4348       region->set_req(i, top());
4349       phi   ->set_req(i, top());
4350     } else if (phi->in(i) == nullptr) {
4351       phi->set_req(i, intcon(0)); // all other paths produce 'false'
4352     }
4353   }
4354 
4355   set_control(_gvn.transform(region));
4356   set_result(_gvn.transform(phi));
4357   return true;
4358 }
4359 
4360 //---------------------generate_array_guard_common------------------------
4361 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region, ArrayKind kind) {

4362 
4363   if (stopped()) {
4364     return nullptr;
4365   }
4366 









4367   // Like generate_guard, adds a new path onto the region.
4368   jint  layout_con = 0;
4369   Node* layout_val = get_layout_helper(kls, layout_con);
4370   if (layout_val == nullptr) {
4371     bool query = 0;
4372     switch(kind) {
4373       case ObjectArray:    query = Klass::layout_helper_is_objArray(layout_con); break;
4374       case NonObjectArray: query = !Klass::layout_helper_is_objArray(layout_con); break;
4375       case TypeArray:      query = Klass::layout_helper_is_typeArray(layout_con); break;
4376       case AnyArray:       query = Klass::layout_helper_is_array(layout_con); break;
4377       case NonArray:       query = !Klass::layout_helper_is_array(layout_con); break;
4378       default:
4379         ShouldNotReachHere();
4380     }
4381     if (!query) {
4382       return nullptr;                       // never a branch
4383     } else {                             // always a branch
4384       Node* always_branch = control();
4385       if (region != nullptr)
4386         region->add_req(always_branch);
4387       set_control(top());
4388       return always_branch;
4389     }
4390   }
4391   unsigned int value = 0;
4392   BoolTest::mask btest = BoolTest::illegal;
4393   switch(kind) {
4394     case ObjectArray:
4395     case NonObjectArray: {
4396       value = Klass::_lh_array_tag_obj_value;
4397       layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
4398       btest = (kind == ObjectArray) ? BoolTest::eq : BoolTest::ne;
4399       break;
4400     }
4401     case TypeArray: {
4402       value = Klass::_lh_array_tag_type_value;
4403       layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
4404       btest = BoolTest::eq;
4405       break;
4406     }
4407     case AnyArray:    value = Klass::_lh_neutral_value; btest = BoolTest::lt; break;
4408     case NonArray:    value = Klass::_lh_neutral_value; btest = BoolTest::gt; break;
4409     default:
4410       ShouldNotReachHere();
4411   }
4412   // Now test the correct condition.
4413   jint nval = (jint)value;



4414   Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval)));



4415   Node* bol = _gvn.transform(new BoolNode(cmp, btest));
4416   return generate_fair_guard(bol, region);
4417 }
4418 
4419 //-----------------------inline_newNullRestrictedArray--------------------------
4420 // public static native Object[] newNullRestrictedArray(Class<?> componentType, int length);
4421 bool LibraryCallKit::inline_newNullRestrictedArray() {
4422   Node* componentType = argument(0);
4423   Node* length = argument(1);
4424 
4425   const TypeInstPtr* tp = _gvn.type(componentType)->isa_instptr();
4426   if (tp != nullptr) {
4427     ciInstanceKlass* ik = tp->instance_klass();
4428     if (ik == C->env()->Class_klass()) {
4429       ciType* t = tp->java_mirror_type();
4430       if (t != nullptr && t->is_inlinetype()) {
4431         ciArrayKlass* array_klass = ciArrayKlass::make(t, true);
4432         if (array_klass->is_loaded() && array_klass->element_klass()->as_inline_klass()->is_initialized()) {
4433           const TypeAryKlassPtr* array_klass_type = TypeKlassPtr::make(array_klass, Type::trust_interfaces)->is_aryklassptr();
4434           array_klass_type = array_klass_type->cast_to_null_free();
4435           Node* obj = new_array(makecon(array_klass_type), length, 0, nullptr, false);  // no arguments to push
4436           set_result(obj);
4437           assert(gvn().type(obj)->is_aryptr()->is_null_free(), "must be null-free");
4438           return true;
4439         }
4440       }
4441     }
4442   }
4443   return false;
4444 }
4445 
4446 //-----------------------inline_native_newArray--------------------------
4447 // private static native Object java.lang.reflect.Array.newArray(Class<?> componentType, int length);
4448 // private        native Object Unsafe.allocateUninitializedArray0(Class<?> cls, int size);
4449 bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) {
4450   Node* mirror;
4451   Node* count_val;
4452   if (uninitialized) {
4453     null_check_receiver();
4454     mirror    = argument(1);
4455     count_val = argument(2);
4456   } else {
4457     mirror    = argument(0);
4458     count_val = argument(1);
4459   }
4460 
4461   mirror = null_check(mirror);
4462   // If mirror or obj is dead, only null-path is taken.
4463   if (stopped())  return true;
4464 
4465   enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
4466   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4467   PhiNode*    result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);

4573   // the bytecode that invokes Arrays.copyOf if deoptimization happens.
4574   { PreserveReexecuteState preexecs(this);
4575     jvms()->set_should_reexecute(true);
4576 
4577     array_type_mirror = null_check(array_type_mirror);
4578     original          = null_check(original);
4579 
4580     // Check if a null path was taken unconditionally.
4581     if (stopped())  return true;
4582 
4583     Node* orig_length = load_array_length(original);
4584 
4585     Node* klass_node = load_klass_from_mirror(array_type_mirror, false, nullptr, 0);
4586     klass_node = null_check(klass_node);
4587 
4588     RegionNode* bailout = new RegionNode(1);
4589     record_for_igvn(bailout);
4590 
4591     // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
4592     // Bail out if that is so.
4593     // Inline type array may have object field that would require a
4594     // write barrier. Conservatively, go to slow path.
4595     // TODO 8251971: Optimize for the case when flat src/dst are later found
4596     // to not contain oops (i.e., move this check to the macro expansion phase).
4597     BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
4598     const TypeAryPtr* orig_t = _gvn.type(original)->isa_aryptr();
4599     const TypeKlassPtr* tklass = _gvn.type(klass_node)->is_klassptr();
4600     bool exclude_flat = UseFlatArray && bs->array_copy_requires_gc_barriers(true, T_OBJECT, false, false, BarrierSetC2::Parsing) &&
4601                         // Can src array be flat and contain oops?
4602                         (orig_t == nullptr || (!orig_t->is_not_flat() && (!orig_t->is_flat() || orig_t->elem()->inline_klass()->contains_oops()))) &&
4603                         // Can dest array be flat and contain oops?
4604                         tklass->can_be_inline_array() && (!tklass->is_flat() || tklass->is_aryklassptr()->elem()->is_instklassptr()->instance_klass()->as_inline_klass()->contains_oops());
4605     Node* not_objArray = exclude_flat ? generate_non_objArray_guard(klass_node, bailout) : generate_typeArray_guard(klass_node, bailout);
4606     if (not_objArray != nullptr) {
4607       // Improve the klass node's type from the new optimistic assumption:
4608       ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
4609       const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, Type::Offset(0));
4610       Node* cast = new CastPPNode(control(), klass_node, akls);
4611       klass_node = _gvn.transform(cast);
4612     }
4613 
4614     // Bail out if either start or end is negative.
4615     generate_negative_guard(start, bailout, &start);
4616     generate_negative_guard(end,   bailout, &end);
4617 
4618     Node* length = end;
4619     if (_gvn.type(start) != TypeInt::ZERO) {
4620       length = _gvn.transform(new SubINode(end, start));
4621     }
4622 
4623     // Bail out if length is negative (i.e., if start > end).
4624     // Without this the new_array would throw
4625     // NegativeArraySizeException but IllegalArgumentException is what
4626     // should be thrown
4627     generate_negative_guard(length, bailout, &length);
4628 
4629     // Handle inline type arrays
4630     bool can_validate = !too_many_traps(Deoptimization::Reason_class_check);
4631     if (!stopped()) {
4632       // TODO JDK-8329224
4633       if (!orig_t->is_null_free()) {
4634         // Not statically known to be null free, add a check
4635         generate_fair_guard(null_free_array_test(original), bailout);
4636       }
4637       orig_t = _gvn.type(original)->isa_aryptr();
4638       if (orig_t != nullptr && orig_t->is_flat()) {
4639         // Src is flat, check that dest is flat as well
4640         if (exclude_flat) {
4641           // Dest can't be flat, bail out
4642           bailout->add_req(control());
4643           set_control(top());
4644         } else {
4645           generate_fair_guard(flat_array_test(klass_node, /* flat = */ false), bailout);
4646         }
4647       } else if (UseFlatArray && (orig_t == nullptr || !orig_t->is_not_flat()) &&
4648                  // If dest is flat, src must be flat as well (guaranteed by src <: dest check if validated).
4649                  ((!tklass->is_flat() && tklass->can_be_inline_array()) || !can_validate)) {
4650         // Src might be flat and dest might not be flat. Go to the slow path if src is flat.
4651         // TODO 8251971: Optimize for the case when src/dest are later found to be both flat.
4652         generate_fair_guard(flat_array_test(load_object_klass(original)), bailout);
4653         if (orig_t != nullptr) {
4654           orig_t = orig_t->cast_to_not_flat();
4655           original = _gvn.transform(new CheckCastPPNode(control(), original, orig_t));
4656         }
4657       }
4658       if (!can_validate) {
4659         // No validation. The subtype check emitted at macro expansion time will not go to the slow
4660         // path but call checkcast_arraycopy which can not handle flat/null-free inline type arrays.
4661         // TODO 8251971: Optimize for the case when src/dest are later found to be both flat/null-free.
4662         generate_fair_guard(flat_array_test(klass_node), bailout);
4663         generate_fair_guard(null_free_array_test(original), bailout);
4664       }
4665     }
4666 
4667     // Bail out if start is larger than the original length
4668     Node* orig_tail = _gvn.transform(new SubINode(orig_length, start));
4669     generate_negative_guard(orig_tail, bailout, &orig_tail);
4670 
4671     if (bailout->req() > 1) {
4672       PreserveJVMState pjvms(this);
4673       set_control(_gvn.transform(bailout));
4674       uncommon_trap(Deoptimization::Reason_intrinsic,
4675                     Deoptimization::Action_maybe_recompile);
4676     }
4677 
4678     if (!stopped()) {
4679       // How many elements will we copy from the original?
4680       // The answer is MinI(orig_tail, length).
4681       Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
4682 
4683       // Generate a direct call to the right arraycopy function(s).
4684       // We know the copy is disjoint but we might not know if the
4685       // oop stores need checking.
4686       // Extreme case:  Arrays.copyOf((Integer[])x, 10, String[].class).

4692       // to the copyOf to be validated, including that the copy to the
4693       // new array won't trigger an ArrayStoreException. That subtype
4694       // check can be optimized if we know something on the type of
4695       // the input array from type speculation.
4696       if (_gvn.type(klass_node)->singleton()) {
4697         const TypeKlassPtr* subk = _gvn.type(load_object_klass(original))->is_klassptr();
4698         const TypeKlassPtr* superk = _gvn.type(klass_node)->is_klassptr();
4699 
4700         int test = C->static_subtype_check(superk, subk);
4701         if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
4702           const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();
4703           if (t_original->speculative_type() != nullptr) {
4704             original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true);
4705           }
4706         }
4707       }
4708 
4709       bool validated = false;
4710       // Reason_class_check rather than Reason_intrinsic because we
4711       // want to intrinsify even if this traps.
4712       if (can_validate) {
4713         Node* not_subtype_ctrl = gen_subtype_check(original, klass_node);
4714 
4715         if (not_subtype_ctrl != top()) {
4716           PreserveJVMState pjvms(this);
4717           set_control(not_subtype_ctrl);
4718           uncommon_trap(Deoptimization::Reason_class_check,
4719                         Deoptimization::Action_make_not_entrant);
4720           assert(stopped(), "Should be stopped");
4721         }
4722         validated = true;
4723       }
4724 
4725       if (!stopped()) {
4726         newcopy = new_array(klass_node, length, 0);  // no arguments to push
4727 
4728         ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true, true,
4729                                                 load_object_klass(original), klass_node);
4730         if (!is_copyOfRange) {
4731           ac->set_copyof(validated);
4732         } else {

4778 
4779 //-----------------------generate_method_call----------------------------
4780 // Use generate_method_call to make a slow-call to the real
4781 // method if the fast path fails.  An alternative would be to
4782 // use a stub like OptoRuntime::slow_arraycopy_Java.
4783 // This only works for expanding the current library call,
4784 // not another intrinsic.  (E.g., don't use this for making an
4785 // arraycopy call inside of the copyOf intrinsic.)
4786 CallJavaNode*
4787 LibraryCallKit::generate_method_call(vmIntrinsicID method_id, bool is_virtual, bool is_static, bool res_not_null) {
4788   // When compiling the intrinsic method itself, do not use this technique.
4789   guarantee(callee() != C->method(), "cannot make slow-call to self");
4790 
4791   ciMethod* method = callee();
4792   // ensure the JVMS we have will be correct for this call
4793   guarantee(method_id == method->intrinsic_id(), "must match");
4794 
4795   const TypeFunc* tf = TypeFunc::make(method);
4796   if (res_not_null) {
4797     assert(tf->return_type() == T_OBJECT, "");
4798     const TypeTuple* range = tf->range_cc();
4799     const Type** fields = TypeTuple::fields(range->cnt());
4800     fields[TypeFunc::Parms] = range->field_at(TypeFunc::Parms)->filter_speculative(TypePtr::NOTNULL);
4801     const TypeTuple* new_range = TypeTuple::make(range->cnt(), fields);
4802     tf = TypeFunc::make(tf->domain_cc(), new_range);
4803   }
4804   CallJavaNode* slow_call;
4805   if (is_static) {
4806     assert(!is_virtual, "");
4807     slow_call = new CallStaticJavaNode(C, tf,
4808                            SharedRuntime::get_resolve_static_call_stub(), method);
4809   } else if (is_virtual) {
4810     assert(!gvn().type(argument(0))->maybe_null(), "should not be null");
4811     int vtable_index = Method::invalid_vtable_index;
4812     if (UseInlineCaches) {
4813       // Suppress the vtable call
4814     } else {
4815       // hashCode and clone are not a miranda methods,
4816       // so the vtable index is fixed.
4817       // No need to use the linkResolver to get it.
4818        vtable_index = method->vtable_index();
4819        assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
4820               "bad index %d", vtable_index);
4821     }
4822     slow_call = new CallDynamicJavaNode(tf,

4839   set_edges_for_java_call(slow_call);
4840   return slow_call;
4841 }
4842 
4843 
4844 /**
4845  * Build special case code for calls to hashCode on an object. This call may
4846  * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
4847  * slightly different code.
4848  */
4849 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
4850   assert(is_static == callee()->is_static(), "correct intrinsic selection");
4851   assert(!(is_virtual && is_static), "either virtual, special, or static");
4852 
4853   enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
4854 
4855   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4856   PhiNode*    result_val = new PhiNode(result_reg, TypeInt::INT);
4857   PhiNode*    result_io  = new PhiNode(result_reg, Type::ABIO);
4858   PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4859   Node* obj = argument(0);
4860 
4861   // Don't intrinsify hashcode on inline types for now.
4862   // The "is locked" runtime check below also serves as inline type check and goes to the slow path.
4863   if (gvn().type(obj)->is_inlinetypeptr()) {
4864     return false;
4865   }
4866 
4867   if (!is_static) {
4868     // Check for hashing null object
4869     obj = null_check_receiver();
4870     if (stopped())  return true;        // unconditionally null
4871     result_reg->init_req(_null_path, top());
4872     result_val->init_req(_null_path, top());
4873   } else {
4874     // Do a null check, and return zero if null.
4875     // System.identityHashCode(null) == 0

4876     Node* null_ctl = top();
4877     obj = null_check_oop(obj, &null_ctl);
4878     result_reg->init_req(_null_path, null_ctl);
4879     result_val->init_req(_null_path, _gvn.intcon(0));
4880   }
4881 
4882   // Unconditionally null?  Then return right away.
4883   if (stopped()) {
4884     set_control( result_reg->in(_null_path));
4885     if (!stopped())
4886       set_result(result_val->in(_null_path));
4887     return true;
4888   }
4889 
4890   // We only go to the fast case code if we pass a number of guards.  The
4891   // paths which do not pass are accumulated in the slow_region.
4892   RegionNode* slow_region = new RegionNode(1);
4893   record_for_igvn(slow_region);
4894 
4895   // If this is a virtual call, we generate a funny guard.  We pull out
4896   // the vtable entry corresponding to hashCode() from the target object.
4897   // If the target method which we are calling happens to be the native
4898   // Object hashCode() method, we pass the guard.  We do not need this
4899   // guard for non-virtual calls -- the caller is known to be the native
4900   // Object hashCode().
4901   if (is_virtual) {
4902     // After null check, get the object's klass.
4903     Node* obj_klass = load_object_klass(obj);
4904     generate_virtual_guard(obj_klass, slow_region);
4905   }
4906 
4907   // Get the header out of the object, use LoadMarkNode when available
4908   Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
4909   // The control of the load must be null. Otherwise, the load can move before
4910   // the null check after castPP removal.
4911   Node* no_ctrl = nullptr;
4912   Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
4913 
4914   // Test the header to see if it is safe to read w.r.t. locking.
4915   // This also serves as guard against inline types
4916   Node *lock_mask      = _gvn.MakeConX(markWord::inline_type_mask_in_place);
4917   Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
4918   if (LockingMode == LM_LIGHTWEIGHT) {
4919     Node *monitor_val   = _gvn.MakeConX(markWord::monitor_value);
4920     Node *chk_monitor   = _gvn.transform(new CmpXNode(lmasked_header, monitor_val));
4921     Node *test_monitor  = _gvn.transform(new BoolNode(chk_monitor, BoolTest::eq));
4922 
4923     generate_slow_guard(test_monitor, slow_region);
4924   } else {
4925     Node *unlocked_val      = _gvn.MakeConX(markWord::unlocked_value);
4926     Node *chk_unlocked      = _gvn.transform(new CmpXNode(lmasked_header, unlocked_val));
4927     Node *test_not_unlocked = _gvn.transform(new BoolNode(chk_unlocked, BoolTest::ne));
4928 
4929     generate_slow_guard(test_not_unlocked, slow_region);
4930   }
4931 
4932   // Get the hash value and check to see that it has been properly assigned.
4933   // We depend on hash_mask being at most 32 bits and avoid the use of
4934   // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
4935   // vm: see markWord.hpp.
4936   Node *hash_mask      = _gvn.intcon(markWord::hash_mask);

4970     // this->control() comes from set_results_for_java_call
4971     result_reg->init_req(_slow_path, control());
4972     result_val->init_req(_slow_path, slow_result);
4973     result_io  ->set_req(_slow_path, i_o());
4974     result_mem ->set_req(_slow_path, reset_memory());
4975   }
4976 
4977   // Return the combined state.
4978   set_i_o(        _gvn.transform(result_io)  );
4979   set_all_memory( _gvn.transform(result_mem));
4980 
4981   set_result(result_reg, result_val);
4982   return true;
4983 }
4984 
4985 //---------------------------inline_native_getClass----------------------------
4986 // public final native Class<?> java.lang.Object.getClass();
4987 //
4988 // Build special case code for calls to getClass on an object.
4989 bool LibraryCallKit::inline_native_getClass() {
4990   Node* obj = argument(0);
4991   if (obj->is_InlineType()) {
4992     const Type* t = _gvn.type(obj);
4993     if (t->maybe_null()) {
4994       null_check(obj);
4995     }
4996     set_result(makecon(TypeInstPtr::make(t->inline_klass()->java_mirror())));
4997     return true;
4998   }
4999   obj = null_check_receiver();
5000   if (stopped())  return true;
5001   set_result(load_mirror_from_klass(load_object_klass(obj)));
5002   return true;
5003 }
5004 
5005 //-----------------inline_native_Reflection_getCallerClass---------------------
5006 // public static native Class<?> sun.reflect.Reflection.getCallerClass();
5007 //
5008 // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
5009 //
5010 // NOTE: This code must perform the same logic as JVM_GetCallerClass
5011 // in that it must skip particular security frames and checks for
5012 // caller sensitive methods.
5013 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
5014 #ifndef PRODUCT
5015   if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
5016     tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
5017   }
5018 #endif
5019 

5331     dst_type = _gvn.type(dst_addr)->is_ptr(); // narrow out memory
5332 
5333     flags |= RC_NARROW_MEM; // narrow in memory
5334   }
5335 
5336   // Call it.  Note that the length argument is not scaled.
5337   make_runtime_call(flags,
5338                     OptoRuntime::make_setmemory_Type(),
5339                     StubRoutines::unsafe_setmemory(),
5340                     "unsafe_setmemory",
5341                     dst_type,
5342                     dst_addr, size XTOP, byte);
5343 
5344   store_to_memory(control(), doing_unsafe_access_addr, intcon(0), doing_unsafe_access_bt, Compile::AliasIdxRaw, MemNode::unordered);
5345 
5346   return true;
5347 }
5348 
5349 #undef XTOP
5350 
5351 //----------------------inline_unsafe_isFlatArray------------------------
5352 // public native boolean Unsafe.isFlatArray(Class<?> arrayClass);
5353 // This intrinsic exploits assumptions made by the native implementation
5354 // (arrayClass is neither null nor primitive) to avoid unnecessary null checks.
5355 bool LibraryCallKit::inline_unsafe_isFlatArray() {
5356   Node* cls = argument(1);
5357   Node* p = basic_plus_adr(cls, java_lang_Class::klass_offset());
5358   Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p,
5359                                                  TypeRawPtr::BOTTOM, TypeInstKlassPtr::OBJECT));
5360   Node* result = flat_array_test(kls);
5361   set_result(result);
5362   return true;
5363 }
5364 
5365 //------------------------clone_coping-----------------------------------
5366 // Helper function for inline_native_clone.
5367 void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array) {
5368   assert(obj_size != nullptr, "");
5369   Node* raw_obj = alloc_obj->in(1);
5370   assert(alloc_obj->is_CheckCastPP() && raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
5371 
5372   AllocateNode* alloc = nullptr;
5373   if (ReduceBulkZeroing &&
5374       // If we are implementing an array clone without knowing its source type
5375       // (can happen when compiling the array-guarded branch of a reflective
5376       // Object.clone() invocation), initialize the array within the allocation.
5377       // This is needed because some GCs (e.g. ZGC) might fall back in this case
5378       // to a runtime clone call that assumes fully initialized source arrays.
5379       (!is_array || obj->get_ptr_type()->isa_aryptr() != nullptr)) {
5380     // We will be completely responsible for initializing this object -
5381     // mark Initialize node as complete.
5382     alloc = AllocateNode::Ideal_allocation(alloc_obj);
5383     // The object was just allocated - there should be no any stores!
5384     guarantee(alloc != nullptr && alloc->maybe_set_complete(&_gvn), "");

5415 //  not cloneable or finalizer => slow path to out-of-line Object.clone
5416 //
5417 // The general case has two steps, allocation and copying.
5418 // Allocation has two cases, and uses GraphKit::new_instance or new_array.
5419 //
5420 // Copying also has two cases, oop arrays and everything else.
5421 // Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
5422 // Everything else uses the tight inline loop supplied by CopyArrayNode.
5423 //
5424 // These steps fold up nicely if and when the cloned object's klass
5425 // can be sharply typed as an object array, a type array, or an instance.
5426 //
5427 bool LibraryCallKit::inline_native_clone(bool is_virtual) {
5428   PhiNode* result_val;
5429 
5430   // Set the reexecute bit for the interpreter to reexecute
5431   // the bytecode that invokes Object.clone if deoptimization happens.
5432   { PreserveReexecuteState preexecs(this);
5433     jvms()->set_should_reexecute(true);
5434 
5435     Node* obj = argument(0);
5436     obj = null_check_receiver();
5437     if (stopped())  return true;
5438 
5439     const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
5440     if (obj_type->is_inlinetypeptr()) {
5441       // If the object to clone is an inline type, we can simply return it (i.e. a nop) since inline types have
5442       // no identity.
5443       set_result(obj);
5444       return true;
5445     }
5446 
5447     // If we are going to clone an instance, we need its exact type to
5448     // know the number and types of fields to convert the clone to
5449     // loads/stores. Maybe a speculative type can help us.
5450     if (!obj_type->klass_is_exact() &&
5451         obj_type->speculative_type() != nullptr &&
5452         obj_type->speculative_type()->is_instance_klass() &&
5453         !obj_type->speculative_type()->is_inlinetype()) {
5454       ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass();
5455       if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem &&
5456           !spec_ik->has_injected_fields()) {
5457         if (!obj_type->isa_instptr() ||
5458             obj_type->is_instptr()->instance_klass()->has_subklass()) {
5459           obj = maybe_cast_profiled_obj(obj, obj_type->speculative_type(), false);
5460         }
5461       }
5462     }
5463 
5464     // Conservatively insert a memory barrier on all memory slices.
5465     // Do not let writes into the original float below the clone.
5466     insert_mem_bar(Op_MemBarCPUOrder);
5467 
5468     // paths into result_reg:
5469     enum {
5470       _slow_path = 1,     // out-of-line call to clone method (virtual or not)
5471       _objArray_path,     // plain array allocation, plus arrayof_oop_arraycopy
5472       _array_path,        // plain array allocation, plus arrayof_long_arraycopy
5473       _instance_path,     // plain instance allocation, plus arrayof_long_arraycopy
5474       PATH_LIMIT
5475     };
5476     RegionNode* result_reg = new RegionNode(PATH_LIMIT);
5477     result_val             = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
5478     PhiNode*    result_i_o = new PhiNode(result_reg, Type::ABIO);
5479     PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
5480     record_for_igvn(result_reg);
5481 
5482     Node* obj_klass = load_object_klass(obj);
5483     // We only go to the fast case code if we pass a number of guards.
5484     // The paths which do not pass are accumulated in the slow_region.
5485     RegionNode* slow_region = new RegionNode(1);
5486     record_for_igvn(slow_region);
5487 
5488     Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)nullptr);
5489     if (array_ctl != nullptr) {
5490       // It's an array.
5491       PreserveJVMState pjvms(this);
5492       set_control(array_ctl);



5493 
5494       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
5495       const TypeAryPtr* ary_ptr = obj_type->isa_aryptr();
5496       if (UseFlatArray && bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Expansion) &&
5497           obj_type->can_be_inline_array() &&
5498           (ary_ptr == nullptr || (!ary_ptr->is_not_flat() && (!ary_ptr->is_flat() || ary_ptr->elem()->inline_klass()->contains_oops())))) {
5499         // Flat inline type array may have object field that would require a
5500         // write barrier. Conservatively, go to slow path.
5501         generate_fair_guard(flat_array_test(obj_klass), slow_region);













5502       }







5503 
5504       if (!stopped()) {
5505         Node* obj_length = load_array_length(obj);
5506         Node* array_size = nullptr; // Size of the array without object alignment padding.
5507         Node* alloc_obj = new_array(obj_klass, obj_length, 0, &array_size, /*deoptimize_on_exception=*/true);
5508 
5509         BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
5510         if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Parsing)) {
5511           // If it is an oop array, it requires very special treatment,
5512           // because gc barriers are required when accessing the array.
5513           Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)nullptr);
5514           if (is_obja != nullptr) {
5515             PreserveJVMState pjvms2(this);
5516             set_control(is_obja);
5517             // Generate a direct call to the right arraycopy function(s).
5518             // Clones are always tightly coupled.
5519             ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, true, false);
5520             ac->set_clone_oop_array();
5521             Node* n = _gvn.transform(ac);
5522             assert(n == ac, "cannot disappear");
5523             ac->connect_outputs(this, /*deoptimize_on_exception=*/true);
5524 
5525             result_reg->init_req(_objArray_path, control());
5526             result_val->init_req(_objArray_path, alloc_obj);
5527             result_i_o ->set_req(_objArray_path, i_o());
5528             result_mem ->set_req(_objArray_path, reset_memory());
5529           }
5530         }
5531         // Otherwise, there are no barriers to worry about.
5532         // (We can dispense with card marks if we know the allocation
5533         //  comes out of eden (TLAB)...  In fact, ReduceInitialCardMarks
5534         //  causes the non-eden paths to take compensating steps to
5535         //  simulate a fresh allocation, so that no further
5536         //  card marks are required in compiled code to initialize
5537         //  the object.)
5538 
5539         if (!stopped()) {
5540           copy_to_clone(obj, alloc_obj, array_size, true);
5541 
5542           // Present the results of the copy.
5543           result_reg->init_req(_array_path, control());
5544           result_val->init_req(_array_path, alloc_obj);
5545           result_i_o ->set_req(_array_path, i_o());
5546           result_mem ->set_req(_array_path, reset_memory());
5547         }
5548       }
5549     }
5550 




5551     if (!stopped()) {
5552       // It's an instance (we did array above).  Make the slow-path tests.
5553       // If this is a virtual call, we generate a funny guard.  We grab
5554       // the vtable entry corresponding to clone() from the target object.
5555       // If the target method which we are calling happens to be the
5556       // Object clone() method, we pass the guard.  We do not need this
5557       // guard for non-virtual calls; the caller is known to be the native
5558       // Object clone().
5559       if (is_virtual) {
5560         generate_virtual_guard(obj_klass, slow_region);
5561       }
5562 
5563       // The object must be easily cloneable and must not have a finalizer.
5564       // Both of these conditions may be checked in a single test.
5565       // We could optimize the test further, but we don't care.
5566       generate_access_flags_guard(obj_klass,
5567                                   // Test both conditions:
5568                                   JVM_ACC_IS_CLONEABLE_FAST | JVM_ACC_HAS_FINALIZER,
5569                                   // Must be cloneable but not finalizer:
5570                                   JVM_ACC_IS_CLONEABLE_FAST,

5662         set_jvms(sfpt->jvms());
5663         _reexecute_sp = jvms()->sp();
5664 
5665         return saved_jvms;
5666       }
5667     }
5668   }
5669   return nullptr;
5670 }
5671 
5672 // Clone the JVMState of the array allocation and create a new safepoint with it. Re-push the array length to the stack
5673 // such that uncommon traps can be emitted to re-execute the array allocation in the interpreter.
5674 SafePointNode* LibraryCallKit::create_safepoint_with_state_before_array_allocation(const AllocateArrayNode* alloc) const {
5675   JVMState* old_jvms = alloc->jvms()->clone_shallow(C);
5676   uint size = alloc->req();
5677   SafePointNode* sfpt = new SafePointNode(size, old_jvms);
5678   old_jvms->set_map(sfpt);
5679   for (uint i = 0; i < size; i++) {
5680     sfpt->init_req(i, alloc->in(i));
5681   }
5682   int adjustment = 1;
5683   const TypeAryKlassPtr* ary_klass_ptr = alloc->in(AllocateNode::KlassNode)->bottom_type()->is_aryklassptr();
5684   if (ary_klass_ptr->is_null_free()) {
5685     // A null-free, tightly coupled array allocation can only come from LibraryCallKit::inline_newNullRestrictedArray
5686     // which requires both the component type and the array length on stack for re-execution. Re-create and push
5687     // the component type.
5688     ciArrayKlass* klass = ary_klass_ptr->exact_klass()->as_array_klass();
5689     ciInstance* instance = klass->component_mirror_instance();
5690     const TypeInstPtr* t_instance = TypeInstPtr::make(instance);
5691     sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp(), makecon(t_instance));
5692     adjustment++;
5693   }
5694   // re-push array length for deoptimization
5695   sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp() + adjustment - 1, alloc->in(AllocateNode::ALength));
5696   old_jvms->set_sp(old_jvms->sp() + adjustment);
5697   old_jvms->set_monoff(old_jvms->monoff() + adjustment);
5698   old_jvms->set_scloff(old_jvms->scloff() + adjustment);
5699   old_jvms->set_endoff(old_jvms->endoff() + adjustment);
5700   old_jvms->set_should_reexecute(true);
5701 
5702   sfpt->set_i_o(map()->i_o());
5703   sfpt->set_memory(map()->memory());
5704   sfpt->set_control(map()->control());
5705   return sfpt;
5706 }
5707 
5708 // In case of a deoptimization, we restart execution at the
5709 // allocation, allocating a new array. We would leave an uninitialized
5710 // array in the heap that GCs wouldn't expect. Move the allocation
5711 // after the traps so we don't allocate the array if we
5712 // deoptimize. This is possible because tightly_coupled_allocation()
5713 // guarantees there's no observer of the allocated array at this point
5714 // and the control flow is simple enough.
5715 void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms_before_guards,
5716                                                     int saved_reexecute_sp, uint new_idx) {
5717   if (saved_jvms_before_guards != nullptr && !stopped()) {
5718     replace_unrelated_uncommon_traps_with_alloc_state(alloc, saved_jvms_before_guards);
5719 
5720     assert(alloc != nullptr, "only with a tightly coupled allocation");
5721     // restore JVM state to the state at the arraycopy
5722     saved_jvms_before_guards->map()->set_control(map()->control());
5723     assert(saved_jvms_before_guards->map()->memory() == map()->memory(), "memory state changed?");
5724     assert(saved_jvms_before_guards->map()->i_o() == map()->i_o(), "IO state changed?");
5725     // If we've improved the types of some nodes (null check) while
5726     // emitting the guards, propagate them to the current state
5727     map()->replaced_nodes().apply(saved_jvms_before_guards->map(), new_idx);
5728     set_jvms(saved_jvms_before_guards);
5729     _reexecute_sp = saved_reexecute_sp;
5730 
5731     // Remove the allocation from above the guards
5732     CallProjections* callprojs = alloc->extract_projections(true);

5733     InitializeNode* init = alloc->initialization();
5734     Node* alloc_mem = alloc->in(TypeFunc::Memory);
5735     C->gvn_replace_by(callprojs->fallthrough_ioproj, alloc->in(TypeFunc::I_O));
5736     C->gvn_replace_by(init->proj_out(TypeFunc::Memory), alloc_mem);
5737 
5738     // The CastIINode created in GraphKit::new_array (in AllocateArrayNode::make_ideal_length) must stay below
5739     // the allocation (i.e. is only valid if the allocation succeeds):
5740     // 1) replace CastIINode with AllocateArrayNode's length here
5741     // 2) Create CastIINode again once allocation has moved (see below) at the end of this method
5742     //
5743     // Multiple identical CastIINodes might exist here. Each GraphKit::load_array_length() call will generate
5744     // new separate CastIINode (arraycopy guard checks or any array length use between array allocation and ararycopy)
5745     Node* init_control = init->proj_out(TypeFunc::Control);
5746     Node* alloc_length = alloc->Ideal_length();
5747 #ifdef ASSERT
5748     Node* prev_cast = nullptr;
5749 #endif
5750     for (uint i = 0; i < init_control->outcnt(); i++) {
5751       Node* init_out = init_control->raw_out(i);
5752       if (init_out->is_CastII() && init_out->in(TypeFunc::Control) == init_control && init_out->in(1) == alloc_length) {
5753 #ifdef ASSERT
5754         if (prev_cast == nullptr) {
5755           prev_cast = init_out;

5757           if (prev_cast->cmp(*init_out) == false) {
5758             prev_cast->dump();
5759             init_out->dump();
5760             assert(false, "not equal CastIINode");
5761           }
5762         }
5763 #endif
5764         C->gvn_replace_by(init_out, alloc_length);
5765       }
5766     }
5767     C->gvn_replace_by(init->proj_out(TypeFunc::Control), alloc->in(0));
5768 
5769     // move the allocation here (after the guards)
5770     _gvn.hash_delete(alloc);
5771     alloc->set_req(TypeFunc::Control, control());
5772     alloc->set_req(TypeFunc::I_O, i_o());
5773     Node *mem = reset_memory();
5774     set_all_memory(mem);
5775     alloc->set_req(TypeFunc::Memory, mem);
5776     set_control(init->proj_out_or_null(TypeFunc::Control));
5777     set_i_o(callprojs->fallthrough_ioproj);
5778 
5779     // Update memory as done in GraphKit::set_output_for_allocation()
5780     const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
5781     const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
5782     if (ary_type->isa_aryptr() && length_type != nullptr) {
5783       ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
5784     }
5785     const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot);
5786     int            elemidx  = C->get_alias_index(telemref);
5787     set_memory(init->proj_out_or_null(TypeFunc::Memory), Compile::AliasIdxRaw);
5788     set_memory(init->proj_out_or_null(TypeFunc::Memory), elemidx);
5789 
5790     Node* allocx = _gvn.transform(alloc);
5791     assert(allocx == alloc, "where has the allocation gone?");
5792     assert(dest->is_CheckCastPP(), "not an allocation result?");
5793 
5794     _gvn.hash_delete(dest);
5795     dest->set_req(0, control());
5796     Node* destx = _gvn.transform(dest);
5797     assert(destx == dest, "where has the allocation result gone?");

6067         top_src  = src_type->isa_aryptr();
6068         has_src = (top_src != nullptr && top_src->elem() != Type::BOTTOM);
6069         src_spec = true;
6070       }
6071       if (!has_dest) {
6072         dest = maybe_cast_profiled_obj(dest, dest_k, true);
6073         dest_type  = _gvn.type(dest);
6074         top_dest  = dest_type->isa_aryptr();
6075         has_dest = (top_dest != nullptr && top_dest->elem() != Type::BOTTOM);
6076         dest_spec = true;
6077       }
6078     }
6079   }
6080 
6081   if (has_src && has_dest && can_emit_guards) {
6082     BasicType src_elem = top_src->isa_aryptr()->elem()->array_element_basic_type();
6083     BasicType dest_elem = top_dest->isa_aryptr()->elem()->array_element_basic_type();
6084     if (is_reference_type(src_elem, true)) src_elem = T_OBJECT;
6085     if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT;
6086 
6087     if (src_elem == dest_elem && top_src->is_flat() == top_dest->is_flat() && src_elem == T_OBJECT) {
6088       // If both arrays are object arrays then having the exact types
6089       // for both will remove the need for a subtype check at runtime
6090       // before the call and may make it possible to pick a faster copy
6091       // routine (without a subtype check on every element)
6092       // Do we have the exact type of src?
6093       bool could_have_src = src_spec;
6094       // Do we have the exact type of dest?
6095       bool could_have_dest = dest_spec;
6096       ciKlass* src_k = nullptr;
6097       ciKlass* dest_k = nullptr;
6098       if (!src_spec) {
6099         src_k = src_type->speculative_type_not_null();
6100         if (src_k != nullptr && src_k->is_array_klass()) {
6101           could_have_src = true;
6102         }
6103       }
6104       if (!dest_spec) {
6105         dest_k = dest_type->speculative_type_not_null();
6106         if (dest_k != nullptr && dest_k->is_array_klass()) {
6107           could_have_dest = true;
6108         }
6109       }
6110       if (could_have_src && could_have_dest) {
6111         // If we can have both exact types, emit the missing guards
6112         if (could_have_src && !src_spec) {
6113           src = maybe_cast_profiled_obj(src, src_k, true);
6114           src_type = _gvn.type(src);
6115           top_src = src_type->isa_aryptr();
6116         }
6117         if (could_have_dest && !dest_spec) {
6118           dest = maybe_cast_profiled_obj(dest, dest_k, true);
6119           dest_type = _gvn.type(dest);
6120           top_dest = dest_type->isa_aryptr();
6121         }
6122       }
6123     }
6124   }
6125 
6126   ciMethod* trap_method = method();
6127   int trap_bci = bci();
6128   if (saved_jvms_before_guards != nullptr) {
6129     trap_method = alloc->jvms()->method();
6130     trap_bci = alloc->jvms()->bci();
6131   }
6132 
6133   bool negative_length_guard_generated = false;
6134 
6135   if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
6136       can_emit_guards && !src->is_top() && !dest->is_top()) {

6137     // validate arguments: enables transformation the ArrayCopyNode
6138     validated = true;
6139 
6140     RegionNode* slow_region = new RegionNode(1);
6141     record_for_igvn(slow_region);
6142 
6143     // (1) src and dest are arrays.
6144     generate_non_array_guard(load_object_klass(src), slow_region);
6145     generate_non_array_guard(load_object_klass(dest), slow_region);
6146 
6147     // (2) src and dest arrays must have elements of the same BasicType
6148     // done at macro expansion or at Ideal transformation time
6149 
6150     // (4) src_offset must not be negative.
6151     generate_negative_guard(src_offset, slow_region);
6152 
6153     // (5) dest_offset must not be negative.
6154     generate_negative_guard(dest_offset, slow_region);
6155 
6156     // (7) src_offset + length must not exceed length of src.

6159                          slow_region);
6160 
6161     // (8) dest_offset + length must not exceed length of dest.
6162     generate_limit_guard(dest_offset, length,
6163                          load_array_length(dest),
6164                          slow_region);
6165 
6166     // (6) length must not be negative.
6167     // This is also checked in generate_arraycopy() during macro expansion, but
6168     // we also have to check it here for the case where the ArrayCopyNode will
6169     // be eliminated by Escape Analysis.
6170     if (EliminateAllocations) {
6171       generate_negative_guard(length, slow_region);
6172       negative_length_guard_generated = true;
6173     }
6174 
6175     // (9) each element of an oop array must be assignable
6176     Node* dest_klass = load_object_klass(dest);
6177     if (src != dest) {
6178       Node* not_subtype_ctrl = gen_subtype_check(src, dest_klass);
6179       slow_region->add_req(not_subtype_ctrl);
6180     }
6181 
6182     const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr();
6183     const Type* toop = dest_klass_t->cast_to_exactness(false)->as_instance_type();
6184     src = _gvn.transform(new CheckCastPPNode(control(), src, toop));
6185     src_type = _gvn.type(src);
6186     top_src  = src_type->isa_aryptr();
6187 
6188     // Handle flat inline type arrays (null-free arrays are handled by the subtype check above)
6189     if (!stopped() && UseFlatArray) {
6190       // If dest is flat, src must be flat as well (guaranteed by src <: dest check). Handle flat src here.
6191       assert(top_dest == nullptr || !top_dest->is_flat() || top_src->is_flat(), "src array must be flat");
6192       if (top_src != nullptr && top_src->is_flat()) {
6193         // Src is flat, check that dest is flat as well
6194         if (top_dest != nullptr && !top_dest->is_flat()) {
6195           generate_fair_guard(flat_array_test(dest_klass, /* flat = */ false), slow_region);
6196           // Since dest is flat and src <: dest, dest must have the same type as src.
6197           top_dest = top_src->cast_to_exactness(false);
6198           assert(top_dest->is_flat(), "dest must be flat");
6199           dest = _gvn.transform(new CheckCastPPNode(control(), dest, top_dest));
6200         }
6201       } else if (top_src == nullptr || !top_src->is_not_flat()) {
6202         // Src might be flat and dest might not be flat. Go to the slow path if src is flat.
6203         // TODO 8251971: Optimize for the case when src/dest are later found to be both flat.
6204         assert(top_dest == nullptr || !top_dest->is_flat(), "dest array must not be flat");
6205         generate_fair_guard(flat_array_test(src), slow_region);
6206         if (top_src != nullptr) {
6207           top_src = top_src->cast_to_not_flat();
6208           src = _gvn.transform(new CheckCastPPNode(control(), src, top_src));
6209         }
6210       }
6211     }
6212 
6213     {
6214       PreserveJVMState pjvms(this);
6215       set_control(_gvn.transform(slow_region));
6216       uncommon_trap(Deoptimization::Reason_intrinsic,
6217                     Deoptimization::Action_make_not_entrant);
6218       assert(stopped(), "Should be stopped");
6219     }




6220     arraycopy_move_allocation_here(alloc, dest, saved_jvms_before_guards, saved_reexecute_sp, new_idx);
6221   }
6222 
6223   if (stopped()) {
6224     return true;
6225   }
6226 
6227   ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != nullptr, negative_length_guard_generated,
6228                                           // Create LoadRange and LoadKlass nodes for use during macro expansion here
6229                                           // so the compiler has a chance to eliminate them: during macro expansion,
6230                                           // we have to set their control (CastPP nodes are eliminated).
6231                                           load_object_klass(src), load_object_klass(dest),
6232                                           load_array_length(src), load_array_length(dest));
6233 
6234   ac->set_arraycopy(validated);
6235 
6236   Node* n = _gvn.transform(ac);
6237   if (n == ac) {
6238     ac->connect_outputs(this);
6239   } else {
< prev index next >