< prev index next >

src/hotspot/share/opto/library_call.cpp

Print this page

   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"

  27 #include "ci/ciUtilities.inline.hpp"
  28 #include "classfile/vmIntrinsics.hpp"
  29 #include "compiler/compileBroker.hpp"
  30 #include "compiler/compileLog.hpp"
  31 #include "gc/shared/barrierSet.hpp"
  32 #include "jfr/support/jfrIntrinsics.hpp"
  33 #include "memory/resourceArea.hpp"
  34 #include "oops/klass.inline.hpp"
  35 #include "oops/objArrayKlass.hpp"
  36 #include "opto/addnode.hpp"
  37 #include "opto/arraycopynode.hpp"
  38 #include "opto/c2compiler.hpp"
  39 #include "opto/castnode.hpp"
  40 #include "opto/cfgnode.hpp"
  41 #include "opto/convertnode.hpp"
  42 #include "opto/countbitsnode.hpp"
  43 #include "opto/idealKit.hpp"
  44 #include "opto/library_call.hpp"
  45 #include "opto/mathexactnode.hpp"
  46 #include "opto/mulnode.hpp"

 308   case vmIntrinsics::_indexOfIL:                return inline_string_indexOfI(StrIntrinsicNode::LL);
 309   case vmIntrinsics::_indexOfIU:                return inline_string_indexOfI(StrIntrinsicNode::UU);
 310   case vmIntrinsics::_indexOfIUL:               return inline_string_indexOfI(StrIntrinsicNode::UL);
 311   case vmIntrinsics::_indexOfU_char:            return inline_string_indexOfChar(StrIntrinsicNode::U);
 312   case vmIntrinsics::_indexOfL_char:            return inline_string_indexOfChar(StrIntrinsicNode::L);
 313 
 314   case vmIntrinsics::_equalsL:                  return inline_string_equals(StrIntrinsicNode::LL);
 315 
 316   case vmIntrinsics::_vectorizedHashCode:       return inline_vectorizedHashCode();
 317 
 318   case vmIntrinsics::_toBytesStringU:           return inline_string_toBytesU();
 319   case vmIntrinsics::_getCharsStringU:          return inline_string_getCharsU();
 320   case vmIntrinsics::_getCharStringU:           return inline_string_char_access(!is_store);
 321   case vmIntrinsics::_putCharStringU:           return inline_string_char_access( is_store);
 322 
 323   case vmIntrinsics::_compressStringC:
 324   case vmIntrinsics::_compressStringB:          return inline_string_copy( is_compress);
 325   case vmIntrinsics::_inflateStringC:
 326   case vmIntrinsics::_inflateStringB:           return inline_string_copy(!is_compress);
 327 


 328   case vmIntrinsics::_getReference:             return inline_unsafe_access(!is_store, T_OBJECT,   Relaxed, false);
 329   case vmIntrinsics::_getBoolean:               return inline_unsafe_access(!is_store, T_BOOLEAN,  Relaxed, false);
 330   case vmIntrinsics::_getByte:                  return inline_unsafe_access(!is_store, T_BYTE,     Relaxed, false);
 331   case vmIntrinsics::_getShort:                 return inline_unsafe_access(!is_store, T_SHORT,    Relaxed, false);
 332   case vmIntrinsics::_getChar:                  return inline_unsafe_access(!is_store, T_CHAR,     Relaxed, false);
 333   case vmIntrinsics::_getInt:                   return inline_unsafe_access(!is_store, T_INT,      Relaxed, false);
 334   case vmIntrinsics::_getLong:                  return inline_unsafe_access(!is_store, T_LONG,     Relaxed, false);
 335   case vmIntrinsics::_getFloat:                 return inline_unsafe_access(!is_store, T_FLOAT,    Relaxed, false);
 336   case vmIntrinsics::_getDouble:                return inline_unsafe_access(!is_store, T_DOUBLE,   Relaxed, false);

 337 
 338   case vmIntrinsics::_putReference:             return inline_unsafe_access( is_store, T_OBJECT,   Relaxed, false);
 339   case vmIntrinsics::_putBoolean:               return inline_unsafe_access( is_store, T_BOOLEAN,  Relaxed, false);
 340   case vmIntrinsics::_putByte:                  return inline_unsafe_access( is_store, T_BYTE,     Relaxed, false);
 341   case vmIntrinsics::_putShort:                 return inline_unsafe_access( is_store, T_SHORT,    Relaxed, false);
 342   case vmIntrinsics::_putChar:                  return inline_unsafe_access( is_store, T_CHAR,     Relaxed, false);
 343   case vmIntrinsics::_putInt:                   return inline_unsafe_access( is_store, T_INT,      Relaxed, false);
 344   case vmIntrinsics::_putLong:                  return inline_unsafe_access( is_store, T_LONG,     Relaxed, false);
 345   case vmIntrinsics::_putFloat:                 return inline_unsafe_access( is_store, T_FLOAT,    Relaxed, false);
 346   case vmIntrinsics::_putDouble:                return inline_unsafe_access( is_store, T_DOUBLE,   Relaxed, false);

 347 
 348   case vmIntrinsics::_getReferenceVolatile:     return inline_unsafe_access(!is_store, T_OBJECT,   Volatile, false);
 349   case vmIntrinsics::_getBooleanVolatile:       return inline_unsafe_access(!is_store, T_BOOLEAN,  Volatile, false);
 350   case vmIntrinsics::_getByteVolatile:          return inline_unsafe_access(!is_store, T_BYTE,     Volatile, false);
 351   case vmIntrinsics::_getShortVolatile:         return inline_unsafe_access(!is_store, T_SHORT,    Volatile, false);
 352   case vmIntrinsics::_getCharVolatile:          return inline_unsafe_access(!is_store, T_CHAR,     Volatile, false);
 353   case vmIntrinsics::_getIntVolatile:           return inline_unsafe_access(!is_store, T_INT,      Volatile, false);
 354   case vmIntrinsics::_getLongVolatile:          return inline_unsafe_access(!is_store, T_LONG,     Volatile, false);
 355   case vmIntrinsics::_getFloatVolatile:         return inline_unsafe_access(!is_store, T_FLOAT,    Volatile, false);
 356   case vmIntrinsics::_getDoubleVolatile:        return inline_unsafe_access(!is_store, T_DOUBLE,   Volatile, false);
 357 
 358   case vmIntrinsics::_putReferenceVolatile:     return inline_unsafe_access( is_store, T_OBJECT,   Volatile, false);
 359   case vmIntrinsics::_putBooleanVolatile:       return inline_unsafe_access( is_store, T_BOOLEAN,  Volatile, false);
 360   case vmIntrinsics::_putByteVolatile:          return inline_unsafe_access( is_store, T_BYTE,     Volatile, false);
 361   case vmIntrinsics::_putShortVolatile:         return inline_unsafe_access( is_store, T_SHORT,    Volatile, false);
 362   case vmIntrinsics::_putCharVolatile:          return inline_unsafe_access( is_store, T_CHAR,     Volatile, false);
 363   case vmIntrinsics::_putIntVolatile:           return inline_unsafe_access( is_store, T_INT,      Volatile, false);
 364   case vmIntrinsics::_putLongVolatile:          return inline_unsafe_access( is_store, T_LONG,     Volatile, false);
 365   case vmIntrinsics::_putFloatVolatile:         return inline_unsafe_access( is_store, T_FLOAT,    Volatile, false);
 366   case vmIntrinsics::_putDoubleVolatile:        return inline_unsafe_access( is_store, T_DOUBLE,   Volatile, false);

 493                                                                                          "notifyJvmtiEnd", false, true);
 494   case vmIntrinsics::_notifyJvmtiVThreadMount:   return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_mount()),
 495                                                                                          "notifyJvmtiMount", false, false);
 496   case vmIntrinsics::_notifyJvmtiVThreadUnmount: return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_unmount()),
 497                                                                                          "notifyJvmtiUnmount", false, false);
 498   case vmIntrinsics::_notifyJvmtiVThreadDisableSuspend: return inline_native_notify_jvmti_sync();
 499 #endif
 500 
 501 #ifdef JFR_HAVE_INTRINSICS
 502   case vmIntrinsics::_counterTime:              return inline_native_time_funcs(CAST_FROM_FN_PTR(address, JfrTime::time_function()), "counterTime");
 503   case vmIntrinsics::_getEventWriter:           return inline_native_getEventWriter();
 504   case vmIntrinsics::_jvm_commit:               return inline_native_jvm_commit();
 505 #endif
 506   case vmIntrinsics::_currentTimeMillis:        return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
 507   case vmIntrinsics::_nanoTime:                 return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
 508   case vmIntrinsics::_writeback0:               return inline_unsafe_writeback0();
 509   case vmIntrinsics::_writebackPreSync0:        return inline_unsafe_writebackSync0(true);
 510   case vmIntrinsics::_writebackPostSync0:       return inline_unsafe_writebackSync0(false);
 511   case vmIntrinsics::_allocateInstance:         return inline_unsafe_allocate();
 512   case vmIntrinsics::_copyMemory:               return inline_unsafe_copyMemory();

 513   case vmIntrinsics::_setMemory:                return inline_unsafe_setMemory();
 514   case vmIntrinsics::_getLength:                return inline_native_getLength();
 515   case vmIntrinsics::_copyOf:                   return inline_array_copyOf(false);
 516   case vmIntrinsics::_copyOfRange:              return inline_array_copyOf(true);
 517   case vmIntrinsics::_equalsB:                  return inline_array_equals(StrIntrinsicNode::LL);
 518   case vmIntrinsics::_equalsC:                  return inline_array_equals(StrIntrinsicNode::UU);
 519   case vmIntrinsics::_Preconditions_checkIndex: return inline_preconditions_checkIndex(T_INT);
 520   case vmIntrinsics::_Preconditions_checkLongIndex: return inline_preconditions_checkIndex(T_LONG);
 521   case vmIntrinsics::_clone:                    return inline_native_clone(intrinsic()->is_virtual());
 522 
 523   case vmIntrinsics::_allocateUninitializedArray: return inline_unsafe_newArray(true);
 524   case vmIntrinsics::_newArray:                   return inline_unsafe_newArray(false);

 525 
 526   case vmIntrinsics::_isAssignableFrom:         return inline_native_subtype_check();
 527 
 528   case vmIntrinsics::_isInstance:
 529   case vmIntrinsics::_getModifiers:
 530   case vmIntrinsics::_isInterface:
 531   case vmIntrinsics::_isArray:
 532   case vmIntrinsics::_isPrimitive:
 533   case vmIntrinsics::_isHidden:
 534   case vmIntrinsics::_getSuperclass:
 535   case vmIntrinsics::_getClassAccessFlags:      return inline_native_Class_query(intrinsic_id());
 536 
 537   case vmIntrinsics::_floatToRawIntBits:
 538   case vmIntrinsics::_floatToIntBits:
 539   case vmIntrinsics::_intBitsToFloat:
 540   case vmIntrinsics::_doubleToRawLongBits:
 541   case vmIntrinsics::_doubleToLongBits:
 542   case vmIntrinsics::_longBitsToDouble:
 543   case vmIntrinsics::_floatToFloat16:
 544   case vmIntrinsics::_float16ToFloat:           return inline_fp_conversions(intrinsic_id());

2235     case vmIntrinsics::_remainderUnsigned_l: {
2236       zero_check_long(argument(2));
2237       // Compile-time detect of null-exception
2238       if (stopped()) {
2239         return true; // keep the graph constructed so far
2240       }
2241       n = new UModLNode(control(), argument(0), argument(2));
2242       break;
2243     }
2244     default:  fatal_unexpected_iid(id);  break;
2245   }
2246   set_result(_gvn.transform(n));
2247   return true;
2248 }
2249 
2250 //----------------------------inline_unsafe_access----------------------------
2251 
2252 const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type) {
2253   // Attempt to infer a sharper value type from the offset and base type.
2254   ciKlass* sharpened_klass = nullptr;

2255 
2256   // See if it is an instance field, with an object type.
2257   if (alias_type->field() != nullptr) {
2258     if (alias_type->field()->type()->is_klass()) {
2259       sharpened_klass = alias_type->field()->type()->as_klass();

2260     }
2261   }
2262 
2263   const TypeOopPtr* result = nullptr;
2264   // See if it is a narrow oop array.
2265   if (adr_type->isa_aryptr()) {
2266     if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
2267       const TypeOopPtr* elem_type = adr_type->is_aryptr()->elem()->make_oopptr();

2268       if (elem_type != nullptr && elem_type->is_loaded()) {
2269         // Sharpen the value type.
2270         result = elem_type;
2271       }
2272     }
2273   }
2274 
2275   // The sharpened class might be unloaded if there is no class loader
2276   // contraint in place.
2277   if (result == nullptr && sharpened_klass != nullptr && sharpened_klass->is_loaded()) {
2278     // Sharpen the value type.
2279     result = TypeOopPtr::make_from_klass(sharpened_klass);



2280   }
2281   if (result != nullptr) {
2282 #ifndef PRODUCT
2283     if (C->print_intrinsics() || C->print_inlining()) {
2284       tty->print("  from base type:  ");  adr_type->dump(); tty->cr();
2285       tty->print("  sharpened value: ");  result->dump();    tty->cr();
2286     }
2287 #endif
2288   }
2289   return result;
2290 }
2291 
2292 DecoratorSet LibraryCallKit::mo_decorator_for_access_kind(AccessKind kind) {
2293   switch (kind) {
2294       case Relaxed:
2295         return MO_UNORDERED;
2296       case Opaque:
2297         return MO_RELAXED;
2298       case Acquire:
2299         return MO_ACQUIRE;
2300       case Release:
2301         return MO_RELEASE;
2302       case Volatile:
2303         return MO_SEQ_CST;
2304       default:
2305         ShouldNotReachHere();
2306         return 0;
2307   }
2308 }
2309 
2310 bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, const AccessKind kind, const bool unaligned) {
2311   if (callee()->is_static())  return false;  // caller must have the capability!
2312   DecoratorSet decorators = C2_UNSAFE_ACCESS;
2313   guarantee(!is_store || kind != Acquire, "Acquire accesses can be produced only for loads");
2314   guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
2315   assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
2316 
2317   if (is_reference_type(type)) {
2318     decorators |= ON_UNKNOWN_OOP_REF;
2319   }
2320 
2321   if (unaligned) {
2322     decorators |= C2_UNALIGNED;
2323   }
2324 
2325 #ifndef PRODUCT
2326   {
2327     ResourceMark rm;
2328     // Check the signatures.
2329     ciSignature* sig = callee()->signature();
2330 #ifdef ASSERT
2331     if (!is_store) {
2332       // Object getReference(Object base, int/long offset), etc.
2333       BasicType rtype = sig->return_type()->basic_type();
2334       assert(rtype == type, "getter must return the expected value");
2335       assert(sig->count() == 2, "oop getter has 2 arguments");
2336       assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2337       assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2338     } else {
2339       // void putReference(Object base, int/long offset, Object x), etc.
2340       assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
2341       assert(sig->count() == 3, "oop putter has 3 arguments");
2342       assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
2343       assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
2344       BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
2345       assert(vtype == type, "putter must accept the expected value");
2346     }
2347 #endif // ASSERT
2348  }
2349 #endif //PRODUCT
2350 
2351   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
2352 
2353   Node* receiver = argument(0);  // type: oop
2354 
2355   // Build address expression.
2356   Node* heap_base_oop = top();
2357 
2358   // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2359   Node* base = argument(1);  // type: oop
2360   // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2361   Node* offset = argument(2);  // type: long
2362   // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2363   // to be plain byte offsets, which are also the same as those accepted
2364   // by oopDesc::field_addr.
2365   assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2366          "fieldOffset must be byte-scaled");























































2367   // 32-bit machines ignore the high half!
2368   offset = ConvL2X(offset);
2369 
2370   // Save state and restore on bailout
2371   uint old_sp = sp();
2372   SafePointNode* old_map = clone_map();
2373 
2374   Node* adr = make_unsafe_address(base, offset, type, kind == Relaxed);
2375   assert(!stopped(), "Inlining of unsafe access failed: address construction stopped unexpectedly");
2376 
2377   if (_gvn.type(base->uncast())->isa_ptr() == TypePtr::NULL_PTR) {
2378     if (type != T_OBJECT) {
2379       decorators |= IN_NATIVE; // off-heap primitive access
2380     } else {
2381       set_map(old_map);
2382       set_sp(old_sp);
2383       return false; // off-heap oop accesses are not supported
2384     }
2385   } else {
2386     heap_base_oop = base; // on-heap or mixed access
2387   }
2388 
2389   // Can base be null? Otherwise, always on-heap access.
2390   bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(base));
2391 
2392   if (!can_access_non_heap) {
2393     decorators |= IN_HEAP;
2394   }
2395 
2396   Node* val = is_store ? argument(4) : nullptr;
2397 
2398   const TypePtr* adr_type = _gvn.type(adr)->isa_ptr();
2399   if (adr_type == TypePtr::NULL_PTR) {
2400     set_map(old_map);
2401     set_sp(old_sp);
2402     return false; // off-heap access with zero address
2403   }
2404 
2405   // Try to categorize the address.
2406   Compile::AliasType* alias_type = C->alias_type(adr_type);
2407   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2408 
2409   if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2410       alias_type->adr_type() == TypeAryPtr::RANGE) {
2411     set_map(old_map);
2412     set_sp(old_sp);
2413     return false; // not supported
2414   }
2415 
2416   bool mismatched = false;
2417   BasicType bt = alias_type->basic_type();





















2418   if (bt != T_ILLEGAL) {
2419     assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
2420     if (bt == T_BYTE && adr_type->isa_aryptr()) {
2421       // Alias type doesn't differentiate between byte[] and boolean[]).
2422       // Use address type to get the element type.
2423       bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2424     }
2425     if (is_reference_type(bt, true)) {
2426       // accessing an array field with getReference is not a mismatch
2427       bt = T_OBJECT;
2428     }
2429     if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2430       // Don't intrinsify mismatched object accesses
2431       set_map(old_map);
2432       set_sp(old_sp);
2433       return false;
2434     }
2435     mismatched = (bt != type);
2436   } else if (alias_type->adr_type()->isa_oopptr()) {
2437     mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
2438   }
2439 























2440   destruct_map_clone(old_map);
2441   assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2442 
2443   if (mismatched) {
2444     decorators |= C2_MISMATCHED;
2445   }
2446 
2447   // First guess at the value type.
2448   const Type *value_type = Type::get_const_basic_type(type);
2449 
2450   // Figure out the memory ordering.
2451   decorators |= mo_decorator_for_access_kind(kind);
2452 
2453   if (!is_store && type == T_OBJECT) {
2454     const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2455     if (tjp != nullptr) {
2456       value_type = tjp;


2457     }
2458   }
2459 
2460   receiver = null_check(receiver);
2461   if (stopped()) {
2462     return true;
2463   }
2464   // Heap pointers get a null-check from the interpreter,
2465   // as a courtesy.  However, this is not guaranteed by Unsafe,
2466   // and it is not possible to fully distinguish unintended nulls
2467   // from intended ones in this API.
2468 
2469   if (!is_store) {
2470     Node* p = nullptr;
2471     // Try to constant fold a load from a constant field
2472     ciField* field = alias_type->field();
2473     if (heap_base_oop != top() && field != nullptr && field->is_constant() && !mismatched) {
2474       // final or stable field
2475       p = make_constant_from_field(field, heap_base_oop);
2476     }
2477 
2478     if (p == nullptr) { // Could not constant fold the load
2479       p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);















2480       // Normalize the value returned by getBoolean in the following cases
2481       if (type == T_BOOLEAN &&
2482           (mismatched ||
2483            heap_base_oop == top() ||                  // - heap_base_oop is null or
2484            (can_access_non_heap && field == nullptr)) // - heap_base_oop is potentially null
2485                                                       //   and the unsafe access is made to large offset
2486                                                       //   (i.e., larger than the maximum offset necessary for any
2487                                                       //   field access)
2488             ) {
2489           IdealKit ideal = IdealKit(this);
2490 #define __ ideal.
2491           IdealVariable normalized_result(ideal);
2492           __ declarations_done();
2493           __ set(normalized_result, p);
2494           __ if_then(p, BoolTest::ne, ideal.ConI(0));
2495           __ set(normalized_result, ideal.ConI(1));
2496           ideal.end_if();
2497           final_sync(ideal);
2498           p = __ value(normalized_result);
2499 #undef __
2500       }
2501     }
2502     if (type == T_ADDRESS) {
2503       p = gvn().transform(new CastP2XNode(nullptr, p));
2504       p = ConvX2UL(p);
2505     }
2506     // The load node has the control of the preceding MemBarCPUOrder.  All
2507     // following nodes will have the control of the MemBarCPUOrder inserted at
2508     // the end of this method.  So, pushing the load onto the stack at a later
2509     // point is fine.
2510     set_result(p);
2511   } else {
2512     if (bt == T_ADDRESS) {
2513       // Repackage the long as a pointer.
2514       val = ConvL2X(val);
2515       val = gvn().transform(new CastX2PNode(val));
2516     }
2517     access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators);
















2518   }
2519 
2520   return true;
2521 }
2522 








































2523 //----------------------------inline_unsafe_load_store----------------------------
2524 // This method serves a couple of different customers (depending on LoadStoreKind):
2525 //
2526 // LS_cmp_swap:
2527 //
2528 //   boolean compareAndSetReference(Object o, long offset, Object expected, Object x);
2529 //   boolean compareAndSetInt(   Object o, long offset, int    expected, int    x);
2530 //   boolean compareAndSetLong(  Object o, long offset, long   expected, long   x);
2531 //
2532 // LS_cmp_swap_weak:
2533 //
2534 //   boolean weakCompareAndSetReference(       Object o, long offset, Object expected, Object x);
2535 //   boolean weakCompareAndSetReferencePlain(  Object o, long offset, Object expected, Object x);
2536 //   boolean weakCompareAndSetReferenceAcquire(Object o, long offset, Object expected, Object x);
2537 //   boolean weakCompareAndSetReferenceRelease(Object o, long offset, Object expected, Object x);
2538 //
2539 //   boolean weakCompareAndSetInt(          Object o, long offset, int    expected, int    x);
2540 //   boolean weakCompareAndSetIntPlain(     Object o, long offset, int    expected, int    x);
2541 //   boolean weakCompareAndSetIntAcquire(   Object o, long offset, int    expected, int    x);
2542 //   boolean weakCompareAndSetIntRelease(   Object o, long offset, int    expected, int    x);

2708     }
2709     case LS_cmp_swap:
2710     case LS_cmp_swap_weak:
2711     case LS_get_add:
2712       break;
2713     default:
2714       ShouldNotReachHere();
2715   }
2716 
2717   // Null check receiver.
2718   receiver = null_check(receiver);
2719   if (stopped()) {
2720     return true;
2721   }
2722 
2723   int alias_idx = C->get_alias_index(adr_type);
2724 
2725   if (is_reference_type(type)) {
2726     decorators |= IN_HEAP | ON_UNKNOWN_OOP_REF;
2727 













2728     // Transformation of a value which could be null pointer (CastPP #null)
2729     // could be delayed during Parse (for example, in adjust_map_after_if()).
2730     // Execute transformation here to avoid barrier generation in such case.
2731     if (_gvn.type(newval) == TypePtr::NULL_PTR)
2732       newval = _gvn.makecon(TypePtr::NULL_PTR);
2733 
2734     if (oldval != nullptr && _gvn.type(oldval) == TypePtr::NULL_PTR) {
2735       // Refine the value to a null constant, when it is known to be null
2736       oldval = _gvn.makecon(TypePtr::NULL_PTR);
2737     }
2738   }
2739 
2740   Node* result = nullptr;
2741   switch (kind) {
2742     case LS_cmp_exchange: {
2743       result = access_atomic_cmpxchg_val_at(base, adr, adr_type, alias_idx,
2744                                             oldval, newval, value_type, type, decorators);
2745       break;
2746     }
2747     case LS_cmp_swap_weak:

2894                     Deoptimization::Action_make_not_entrant);
2895     }
2896     if (stopped()) {
2897       return true;
2898     }
2899 #endif //INCLUDE_JVMTI
2900 
2901   Node* test = nullptr;
2902   if (LibraryCallKit::klass_needs_init_guard(kls)) {
2903     // Note:  The argument might still be an illegal value like
2904     // Serializable.class or Object[].class.   The runtime will handle it.
2905     // But we must make an explicit check for initialization.
2906     Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset()));
2907     // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
2908     // can generate code to load it as unsigned byte.
2909     Node* inst = make_load(nullptr, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::acquire);
2910     Node* bits = intcon(InstanceKlass::fully_initialized);
2911     test = _gvn.transform(new SubINode(inst, bits));
2912     // The 'test' is non-zero if we need to take a slow path.
2913   }
2914 
2915   Node* obj = new_instance(kls, test);





2916   set_result(obj);
2917   return true;
2918 }
2919 
2920 //------------------------inline_native_time_funcs--------------
2921 // inline code for System.currentTimeMillis() and System.nanoTime()
2922 // these have the same type and signature
2923 bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
2924   const TypeFunc* tf = OptoRuntime::void_long_Type();
2925   const TypePtr* no_memory_effects = nullptr;
2926   Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
2927   Node* value = _gvn.transform(new ProjNode(time, TypeFunc::Parms+0));
2928 #ifdef ASSERT
2929   Node* value_top = _gvn.transform(new ProjNode(time, TypeFunc::Parms+1));
2930   assert(value_top == top(), "second value must be top");
2931 #endif
2932   set_result(value);
2933   return true;
2934 }
2935 

3677   Node* thread = _gvn.transform(new ThreadLocalNode());
3678   Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::vthread_offset()));
3679   Node* thread_obj_handle
3680     = make_load(nullptr, p, p->bottom_type()->is_ptr(), T_OBJECT, MemNode::unordered);
3681   thread_obj_handle = _gvn.transform(thread_obj_handle);
3682   const TypePtr *adr_type = _gvn.type(thread_obj_handle)->isa_ptr();
3683   access_store_at(nullptr, thread_obj_handle, adr_type, arr, _gvn.type(arr), T_OBJECT, IN_NATIVE | MO_UNORDERED);
3684 
3685   // Change the lock_id of the JavaThread
3686   Node* tid = load_field_from_object(arr, "tid", "J");
3687   Node* thread_id_offset = basic_plus_adr(thread, in_bytes(JavaThread::lock_id_offset()));
3688   Node* tid_memory = store_to_memory(control(), thread_id_offset, tid, T_LONG, MemNode::unordered, true);
3689 
3690   JFR_ONLY(extend_setCurrentThread(thread, arr);)
3691   return true;
3692 }
3693 
3694 const Type* LibraryCallKit::scopedValueCache_type() {
3695   ciKlass* objects_klass = ciObjArrayKlass::make(env()->Object_klass());
3696   const TypeOopPtr* etype = TypeOopPtr::make_from_klass(env()->Object_klass());
3697   const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS);
3698 
3699   // Because we create the scopedValue cache lazily we have to make the
3700   // type of the result BotPTR.
3701   bool xk = etype->klass_is_exact();
3702   const Type* objects_type = TypeAryPtr::make(TypePtr::BotPTR, arr0, objects_klass, xk, 0);
3703   return objects_type;
3704 }
3705 
3706 Node* LibraryCallKit::scopedValueCache_helper() {
3707   Node* thread = _gvn.transform(new ThreadLocalNode());
3708   Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::scopedValueCache_offset()));
3709   // We cannot use immutable_memory() because we might flip onto a
3710   // different carrier thread, at which point we'll need to use that
3711   // carrier thread's cache.
3712   // return _gvn.transform(LoadNode::make(_gvn, nullptr, immutable_memory(), p, p->bottom_type()->is_ptr(),
3713   //       TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered));
3714   return make_load(nullptr, p, p->bottom_type()->is_ptr(), T_ADDRESS, MemNode::unordered);
3715 }
3716 
3717 //------------------------inline_native_scopedValueCache------------------
3718 bool LibraryCallKit::inline_native_scopedValueCache() {
3719   Node* cache_obj_handle = scopedValueCache_helper();
3720   const Type* objects_type = scopedValueCache_type();
3721   set_result(access_load(cache_obj_handle, objects_type, T_OBJECT, IN_NATIVE));
3722 

3805   }
3806 
3807   // Result of top level CFG and Memory.
3808   RegionNode* result_rgn = new RegionNode(PATH_LIMIT);
3809   record_for_igvn(result_rgn);
3810   PhiNode* result_mem = new PhiNode(result_rgn, Type::MEMORY, TypePtr::BOTTOM);
3811   record_for_igvn(result_mem);
3812 
3813   result_rgn->init_req(_true_path, _gvn.transform(valid_pin_count));
3814   result_rgn->init_req(_false_path, _gvn.transform(continuation_is_null));
3815   result_mem->init_req(_true_path, _gvn.transform(updated_pin_count_memory));
3816   result_mem->init_req(_false_path, _gvn.transform(input_memory_state));
3817 
3818   // Set output state.
3819   set_control(_gvn.transform(result_rgn));
3820   set_all_memory(_gvn.transform(result_mem));
3821 
3822   return true;
3823 }
3824 
3825 //---------------------------load_mirror_from_klass----------------------------
3826 // Given a klass oop, load its java mirror (a java.lang.Class oop).
3827 Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
3828   Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
3829   Node* load = make_load(nullptr, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
3830   // mirror = ((OopHandle)mirror)->resolve();
3831   return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE);
3832 }
3833 
3834 //-----------------------load_klass_from_mirror_common-------------------------
3835 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
3836 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
3837 // and branch to the given path on the region.
3838 // If never_see_null, take an uncommon trap on null, so we can optimistically
3839 // compile for the non-null case.
3840 // If the region is null, force never_see_null = true.
3841 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
3842                                                     bool never_see_null,
3843                                                     RegionNode* region,
3844                                                     int null_path,
3845                                                     int offset) {
3846   if (region == nullptr)  never_see_null = true;
3847   Node* p = basic_plus_adr(mirror, offset);
3848   const TypeKlassPtr*  kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
3849   Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
3850   Node* null_ctl = top();
3851   kls = null_check_oop(kls, &null_ctl, never_see_null);
3852   if (region != nullptr) {
3853     // Set region->in(null_path) if the mirror is a primitive (e.g, int.class).

3857   }
3858   return kls;
3859 }
3860 
3861 //--------------------(inline_native_Class_query helpers)---------------------
3862 // Use this for JVM_ACC_INTERFACE.
3863 // Fall through if (mods & mask) == bits, take the guard otherwise.
3864 Node* LibraryCallKit::generate_klass_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region,
3865                                                  ByteSize offset, const Type* type, BasicType bt) {
3866   // Branch around if the given klass has the given modifier bit set.
3867   // Like generate_guard, adds a new path onto the region.
3868   Node* modp = basic_plus_adr(kls, in_bytes(offset));
3869   Node* mods = make_load(nullptr, modp, type, bt, MemNode::unordered);
3870   Node* mask = intcon(modifier_mask);
3871   Node* bits = intcon(modifier_bits);
3872   Node* mbit = _gvn.transform(new AndINode(mods, mask));
3873   Node* cmp  = _gvn.transform(new CmpINode(mbit, bits));
3874   Node* bol  = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
3875   return generate_fair_guard(bol, region);
3876 }

3877 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
3878   return generate_klass_flags_guard(kls, JVM_ACC_INTERFACE, 0, region,
3879                                     Klass::access_flags_offset(), TypeInt::INT, T_INT);
3880 }
3881 
3882 // Use this for testing if Klass is_hidden, has_finalizer, and is_cloneable_fast.
3883 Node* LibraryCallKit::generate_misc_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
3884   return generate_klass_flags_guard(kls, modifier_mask, modifier_bits, region,
3885                                     Klass::misc_flags_offset(), TypeInt::UBYTE, T_BOOLEAN);
3886 }
3887 
3888 Node* LibraryCallKit::generate_hidden_class_guard(Node* kls, RegionNode* region) {
3889   return generate_misc_flags_guard(kls, KlassFlags::_misc_is_hidden_class, 0, region);
3890 }
3891 
3892 //-------------------------inline_native_Class_query-------------------
3893 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
3894   const Type* return_type = TypeInt::BOOL;
3895   Node* prim_return_value = top();  // what happens if it's a primitive class?
3896   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);

4058 
4059   case vmIntrinsics::_getClassAccessFlags:
4060     p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
4061     query_value = make_load(nullptr, p, TypeInt::INT, T_INT, MemNode::unordered);
4062     break;
4063 
4064   default:
4065     fatal_unexpected_iid(id);
4066     break;
4067   }
4068 
4069   // Fall-through is the normal case of a query to a real class.
4070   phi->init_req(1, query_value);
4071   region->init_req(1, control());
4072 
4073   C->set_has_split_ifs(true); // Has chance for split-if optimization
4074   set_result(region, phi);
4075   return true;
4076 }
4077 

4078 //-------------------------inline_Class_cast-------------------
4079 bool LibraryCallKit::inline_Class_cast() {
4080   Node* mirror = argument(0); // Class
4081   Node* obj    = argument(1);
4082   const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
4083   if (mirror_con == nullptr) {
4084     return false;  // dead path (mirror->is_top()).
4085   }
4086   if (obj == nullptr || obj->is_top()) {
4087     return false;  // dead path
4088   }
4089   const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr();
4090 
4091   // First, see if Class.cast() can be folded statically.
4092   // java_mirror_type() returns non-null for compile-time Class constants.
4093   ciType* tm = mirror_con->java_mirror_type();

4094   if (tm != nullptr && tm->is_klass() &&
4095       tp != nullptr) {
4096     if (!tp->is_loaded()) {
4097       // Don't use intrinsic when class is not loaded.
4098       return false;
4099     } else {
4100       int static_res = C->static_subtype_check(TypeKlassPtr::make(tm->as_klass(), Type::trust_interfaces), tp->as_klass_type());




4101       if (static_res == Compile::SSC_always_true) {
4102         // isInstance() is true - fold the code.
4103         set_result(obj);
4104         return true;
4105       } else if (static_res == Compile::SSC_always_false) {
4106         // Don't use intrinsic, have to throw ClassCastException.
4107         // If the reference is null, the non-intrinsic bytecode will
4108         // be optimized appropriately.
4109         return false;
4110       }
4111     }
4112   }
4113 
4114   // Bailout intrinsic and do normal inlining if exception path is frequent.
4115   if (too_many_traps(Deoptimization::Reason_intrinsic)) {
4116     return false;
4117   }
4118 
4119   // Generate dynamic checks.
4120   // Class.cast() is java implementation of _checkcast bytecode.
4121   // Do checkcast (Parse::do_checkcast()) optimizations here.
4122 
4123   mirror = null_check(mirror);
4124   // If mirror is dead, only null-path is taken.
4125   if (stopped()) {
4126     return true;
4127   }
4128 
4129   // Not-subtype or the mirror's klass ptr is null (in case it is a primitive).
4130   enum { _bad_type_path = 1, _prim_path = 2, PATH_LIMIT };
4131   RegionNode* region = new RegionNode(PATH_LIMIT);
4132   record_for_igvn(region);
4133 
4134   // Now load the mirror's klass metaobject, and null-check it.
4135   // If kls is null, we have a primitive mirror and
4136   // nothing is an instance of a primitive type.
4137   Node* kls = load_klass_from_mirror(mirror, false, region, _prim_path);
4138 
4139   Node* res = top();


4140   if (!stopped()) {

4141     Node* bad_type_ctrl = top();
4142     // Do checkcast optimizations.
4143     res = gen_checkcast(obj, kls, &bad_type_ctrl);
4144     region->init_req(_bad_type_path, bad_type_ctrl);
4145   }
4146   if (region->in(_prim_path) != top() ||
4147       region->in(_bad_type_path) != top()) {

4148     // Let Interpreter throw ClassCastException.
4149     PreserveJVMState pjvms(this);
4150     set_control(_gvn.transform(region));



4151     uncommon_trap(Deoptimization::Reason_intrinsic,
4152                   Deoptimization::Action_maybe_recompile);
4153   }
4154   if (!stopped()) {
4155     set_result(res);
4156   }
4157   return true;
4158 }
4159 
4160 
4161 //--------------------------inline_native_subtype_check------------------------
4162 // This intrinsic takes the JNI calls out of the heart of
4163 // UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
4164 bool LibraryCallKit::inline_native_subtype_check() {
4165   // Pull both arguments off the stack.
4166   Node* args[2];                // two java.lang.Class mirrors: superc, subc
4167   args[0] = argument(0);
4168   args[1] = argument(1);
4169   Node* klasses[2];             // corresponding Klasses: superk, subk
4170   klasses[0] = klasses[1] = top();
4171 
4172   enum {
4173     // A full decision tree on {superc is prim, subc is prim}:
4174     _prim_0_path = 1,           // {P,N} => false
4175                                 // {P,P} & superc!=subc => false
4176     _prim_same_path,            // {P,P} & superc==subc => true
4177     _prim_1_path,               // {N,P} => false
4178     _ref_subtype_path,          // {N,N} & subtype check wins => true
4179     _both_ref_path,             // {N,N} & subtype check loses => false
4180     PATH_LIMIT
4181   };
4182 
4183   RegionNode* region = new RegionNode(PATH_LIMIT);

4184   Node*       phi    = new PhiNode(region, TypeInt::BOOL);
4185   record_for_igvn(region);

4186 
4187   const TypePtr* adr_type = TypeRawPtr::BOTTOM;   // memory type of loads
4188   const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
4189   int class_klass_offset = java_lang_Class::klass_offset();
4190 
4191   // First null-check both mirrors and load each mirror's klass metaobject.
4192   int which_arg;
4193   for (which_arg = 0; which_arg <= 1; which_arg++) {
4194     Node* arg = args[which_arg];
4195     arg = null_check(arg);
4196     if (stopped())  break;
4197     args[which_arg] = arg;
4198 
4199     Node* p = basic_plus_adr(arg, class_klass_offset);
4200     Node* kls = LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, adr_type, kls_type);
4201     klasses[which_arg] = _gvn.transform(kls);
4202   }
4203 
4204   // Having loaded both klasses, test each for null.
4205   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
4206   for (which_arg = 0; which_arg <= 1; which_arg++) {
4207     Node* kls = klasses[which_arg];
4208     Node* null_ctl = top();
4209     kls = null_check_oop(kls, &null_ctl, never_see_null);
4210     int prim_path = (which_arg == 0 ? _prim_0_path : _prim_1_path);
4211     region->init_req(prim_path, null_ctl);



4212     if (stopped())  break;
4213     klasses[which_arg] = kls;
4214   }
4215 
4216   if (!stopped()) {
4217     // now we have two reference types, in klasses[0..1]
4218     Node* subk   = klasses[1];  // the argument to isAssignableFrom
4219     Node* superk = klasses[0];  // the receiver
4220     region->set_req(_both_ref_path, gen_subtype_check(subk, superk));
4221     // now we have a successful reference subtype check
4222     region->set_req(_ref_subtype_path, control());
4223   }
4224 
4225   // If both operands are primitive (both klasses null), then
4226   // we must return true when they are identical primitives.
4227   // It is convenient to test this after the first null klass check.
4228   set_control(region->in(_prim_0_path)); // go back to first null check

4229   if (!stopped()) {
4230     // Since superc is primitive, make a guard for the superc==subc case.
4231     Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1]));
4232     Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq));
4233     generate_guard(bol_eq, region, PROB_FAIR);
4234     if (region->req() == PATH_LIMIT+1) {
4235       // A guard was added.  If the added guard is taken, superc==subc.
4236       region->swap_edges(PATH_LIMIT, _prim_same_path);
4237       region->del_req(PATH_LIMIT);
4238     }
4239     region->set_req(_prim_0_path, control()); // Not equal after all.
4240   }
4241 
4242   // these are the only paths that produce 'true':
4243   phi->set_req(_prim_same_path,   intcon(1));
4244   phi->set_req(_ref_subtype_path, intcon(1));
4245 
4246   // pull together the cases:
4247   assert(region->req() == PATH_LIMIT, "sane region");
4248   for (uint i = 1; i < region->req(); i++) {
4249     Node* ctl = region->in(i);
4250     if (ctl == nullptr || ctl == top()) {
4251       region->set_req(i, top());
4252       phi   ->set_req(i, top());
4253     } else if (phi->in(i) == nullptr) {
4254       phi->set_req(i, intcon(0)); // all other paths produce 'false'
4255     }
4256   }
4257 
4258   set_control(_gvn.transform(region));
4259   set_result(_gvn.transform(phi));
4260   return true;
4261 }
4262 
4263 //---------------------generate_array_guard_common------------------------
4264 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region,
4265                                                   bool obj_array, bool not_array) {
4266 
4267   if (stopped()) {
4268     return nullptr;
4269   }
4270 
4271   // If obj_array/non_array==false/false:
4272   // Branch around if the given klass is in fact an array (either obj or prim).
4273   // If obj_array/non_array==false/true:
4274   // Branch around if the given klass is not an array klass of any kind.
4275   // If obj_array/non_array==true/true:
4276   // Branch around if the kls is not an oop array (kls is int[], String, etc.)
4277   // If obj_array/non_array==true/false:
4278   // Branch around if the kls is an oop array (Object[] or subtype)
4279   //
4280   // Like generate_guard, adds a new path onto the region.
4281   jint  layout_con = 0;
4282   Node* layout_val = get_layout_helper(kls, layout_con);
4283   if (layout_val == nullptr) {
4284     bool query = (obj_array
4285                   ? Klass::layout_helper_is_objArray(layout_con)
4286                   : Klass::layout_helper_is_array(layout_con));
4287     if (query == not_array) {







4288       return nullptr;                       // never a branch
4289     } else {                             // always a branch
4290       Node* always_branch = control();
4291       if (region != nullptr)
4292         region->add_req(always_branch);
4293       set_control(top());
4294       return always_branch;
4295     }
4296   }





















4297   // Now test the correct condition.
4298   jint  nval = (obj_array
4299                 ? (jint)(Klass::_lh_array_tag_type_value
4300                    <<    Klass::_lh_array_tag_shift)
4301                 : Klass::_lh_neutral_value);
4302   Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval)));
4303   BoolTest::mask btest = BoolTest::lt;  // correct for testing is_[obj]array
4304   // invert the test if we are looking for a non-array
4305   if (not_array)  btest = BoolTest(btest).negate();
4306   Node* bol = _gvn.transform(new BoolNode(cmp, btest));
4307   return generate_fair_guard(bol, region);
4308 }
4309 


























4310 
4311 //-----------------------inline_native_newArray--------------------------
4312 // private static native Object java.lang.reflect.newArray(Class<?> componentType, int length);
4313 // private        native Object Unsafe.allocateUninitializedArray0(Class<?> cls, int size);
4314 bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) {
4315   Node* mirror;
4316   Node* count_val;
4317   if (uninitialized) {
4318     null_check_receiver();
4319     mirror    = argument(1);
4320     count_val = argument(2);
4321   } else {
4322     mirror    = argument(0);
4323     count_val = argument(1);
4324   }
4325 
4326   mirror = null_check(mirror);
4327   // If mirror or obj is dead, only null-path is taken.
4328   if (stopped())  return true;
4329 
4330   enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
4331   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4332   PhiNode*    result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);

4438   // the bytecode that invokes Arrays.copyOf if deoptimization happens.
4439   { PreserveReexecuteState preexecs(this);
4440     jvms()->set_should_reexecute(true);
4441 
4442     array_type_mirror = null_check(array_type_mirror);
4443     original          = null_check(original);
4444 
4445     // Check if a null path was taken unconditionally.
4446     if (stopped())  return true;
4447 
4448     Node* orig_length = load_array_length(original);
4449 
4450     Node* klass_node = load_klass_from_mirror(array_type_mirror, false, nullptr, 0);
4451     klass_node = null_check(klass_node);
4452 
4453     RegionNode* bailout = new RegionNode(1);
4454     record_for_igvn(bailout);
4455 
4456     // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
4457     // Bail out if that is so.
4458     Node* not_objArray = generate_non_objArray_guard(klass_node, bailout);












4459     if (not_objArray != nullptr) {
4460       // Improve the klass node's type from the new optimistic assumption:
4461       ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
4462       const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/);
4463       Node* cast = new CastPPNode(control(), klass_node, akls);
4464       klass_node = _gvn.transform(cast);
4465     }
4466 
4467     // Bail out if either start or end is negative.
4468     generate_negative_guard(start, bailout, &start);
4469     generate_negative_guard(end,   bailout, &end);
4470 
4471     Node* length = end;
4472     if (_gvn.type(start) != TypeInt::ZERO) {
4473       length = _gvn.transform(new SubINode(end, start));
4474     }
4475 
4476     // Bail out if length is negative (i.e., if start > end).
4477     // Without this the new_array would throw
4478     // NegativeArraySizeException but IllegalArgumentException is what
4479     // should be thrown
4480     generate_negative_guard(length, bailout, &length);
4481 






































4482     // Bail out if start is larger than the original length
4483     Node* orig_tail = _gvn.transform(new SubINode(orig_length, start));
4484     generate_negative_guard(orig_tail, bailout, &orig_tail);
4485 
4486     if (bailout->req() > 1) {
4487       PreserveJVMState pjvms(this);
4488       set_control(_gvn.transform(bailout));
4489       uncommon_trap(Deoptimization::Reason_intrinsic,
4490                     Deoptimization::Action_maybe_recompile);
4491     }
4492 
4493     if (!stopped()) {
4494       // How many elements will we copy from the original?
4495       // The answer is MinI(orig_tail, length).
4496       Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
4497 
4498       // Generate a direct call to the right arraycopy function(s).
4499       // We know the copy is disjoint but we might not know if the
4500       // oop stores need checking.
4501       // Extreme case:  Arrays.copyOf((Integer[])x, 10, String[].class).

4507       // to the copyOf to be validated, including that the copy to the
4508       // new array won't trigger an ArrayStoreException. That subtype
4509       // check can be optimized if we know something on the type of
4510       // the input array from type speculation.
4511       if (_gvn.type(klass_node)->singleton()) {
4512         const TypeKlassPtr* subk = _gvn.type(load_object_klass(original))->is_klassptr();
4513         const TypeKlassPtr* superk = _gvn.type(klass_node)->is_klassptr();
4514 
4515         int test = C->static_subtype_check(superk, subk);
4516         if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
4517           const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();
4518           if (t_original->speculative_type() != nullptr) {
4519             original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true);
4520           }
4521         }
4522       }
4523 
4524       bool validated = false;
4525       // Reason_class_check rather than Reason_intrinsic because we
4526       // want to intrinsify even if this traps.
4527       if (!too_many_traps(Deoptimization::Reason_class_check)) {
4528         Node* not_subtype_ctrl = gen_subtype_check(original, klass_node);
4529 
4530         if (not_subtype_ctrl != top()) {
4531           PreserveJVMState pjvms(this);
4532           set_control(not_subtype_ctrl);
4533           uncommon_trap(Deoptimization::Reason_class_check,
4534                         Deoptimization::Action_make_not_entrant);
4535           assert(stopped(), "Should be stopped");
4536         }
4537         validated = true;
4538       }
4539 
4540       if (!stopped()) {
4541         newcopy = new_array(klass_node, length, 0);  // no arguments to push
4542 
4543         ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true, true,
4544                                                 load_object_klass(original), klass_node);
4545         if (!is_copyOfRange) {
4546           ac->set_copyof(validated);
4547         } else {

4593 
4594 //-----------------------generate_method_call----------------------------
4595 // Use generate_method_call to make a slow-call to the real
4596 // method if the fast path fails.  An alternative would be to
4597 // use a stub like OptoRuntime::slow_arraycopy_Java.
4598 // This only works for expanding the current library call,
4599 // not another intrinsic.  (E.g., don't use this for making an
4600 // arraycopy call inside of the copyOf intrinsic.)
4601 CallJavaNode*
4602 LibraryCallKit::generate_method_call(vmIntrinsicID method_id, bool is_virtual, bool is_static, bool res_not_null) {
4603   // When compiling the intrinsic method itself, do not use this technique.
4604   guarantee(callee() != C->method(), "cannot make slow-call to self");
4605 
4606   ciMethod* method = callee();
4607   // ensure the JVMS we have will be correct for this call
4608   guarantee(method_id == method->intrinsic_id(), "must match");
4609 
4610   const TypeFunc* tf = TypeFunc::make(method);
4611   if (res_not_null) {
4612     assert(tf->return_type() == T_OBJECT, "");
4613     const TypeTuple* range = tf->range();
4614     const Type** fields = TypeTuple::fields(range->cnt());
4615     fields[TypeFunc::Parms] = range->field_at(TypeFunc::Parms)->filter_speculative(TypePtr::NOTNULL);
4616     const TypeTuple* new_range = TypeTuple::make(range->cnt(), fields);
4617     tf = TypeFunc::make(tf->domain(), new_range);
4618   }
4619   CallJavaNode* slow_call;
4620   if (is_static) {
4621     assert(!is_virtual, "");
4622     slow_call = new CallStaticJavaNode(C, tf,
4623                            SharedRuntime::get_resolve_static_call_stub(), method);
4624   } else if (is_virtual) {
4625     assert(!gvn().type(argument(0))->maybe_null(), "should not be null");
4626     int vtable_index = Method::invalid_vtable_index;
4627     if (UseInlineCaches) {
4628       // Suppress the vtable call
4629     } else {
4630       // hashCode and clone are not a miranda methods,
4631       // so the vtable index is fixed.
4632       // No need to use the linkResolver to get it.
4633        vtable_index = method->vtable_index();
4634        assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
4635               "bad index %d", vtable_index);
4636     }
4637     slow_call = new CallDynamicJavaNode(tf,

4654   set_edges_for_java_call(slow_call);
4655   return slow_call;
4656 }
4657 
4658 
4659 /**
4660  * Build special case code for calls to hashCode on an object. This call may
4661  * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
4662  * slightly different code.
4663  */
4664 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
4665   assert(is_static == callee()->is_static(), "correct intrinsic selection");
4666   assert(!(is_virtual && is_static), "either virtual, special, or static");
4667 
4668   enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
4669 
4670   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4671   PhiNode*    result_val = new PhiNode(result_reg, TypeInt::INT);
4672   PhiNode*    result_io  = new PhiNode(result_reg, Type::ABIO);
4673   PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4674   Node* obj = nullptr;







4675   if (!is_static) {
4676     // Check for hashing null object
4677     obj = null_check_receiver();
4678     if (stopped())  return true;        // unconditionally null
4679     result_reg->init_req(_null_path, top());
4680     result_val->init_req(_null_path, top());
4681   } else {
4682     // Do a null check, and return zero if null.
4683     // System.identityHashCode(null) == 0
4684     obj = argument(0);
4685     Node* null_ctl = top();
4686     obj = null_check_oop(obj, &null_ctl);
4687     result_reg->init_req(_null_path, null_ctl);
4688     result_val->init_req(_null_path, _gvn.intcon(0));
4689   }
4690 
4691   // Unconditionally null?  Then return right away.
4692   if (stopped()) {
4693     set_control( result_reg->in(_null_path));
4694     if (!stopped())
4695       set_result(result_val->in(_null_path));
4696     return true;
4697   }
4698 
4699   // We only go to the fast case code if we pass a number of guards.  The
4700   // paths which do not pass are accumulated in the slow_region.
4701   RegionNode* slow_region = new RegionNode(1);
4702   record_for_igvn(slow_region);
4703 
4704   // If this is a virtual call, we generate a funny guard.  We pull out
4705   // the vtable entry corresponding to hashCode() from the target object.
4706   // If the target method which we are calling happens to be the native
4707   // Object hashCode() method, we pass the guard.  We do not need this
4708   // guard for non-virtual calls -- the caller is known to be the native
4709   // Object hashCode().
4710   if (is_virtual) {
4711     // After null check, get the object's klass.
4712     Node* obj_klass = load_object_klass(obj);
4713     generate_virtual_guard(obj_klass, slow_region);
4714   }
4715 
4716   // Get the header out of the object, use LoadMarkNode when available
4717   Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
4718   // The control of the load must be null. Otherwise, the load can move before
4719   // the null check after castPP removal.
4720   Node* no_ctrl = nullptr;
4721   Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
4722 
4723   if (!UseObjectMonitorTable) {
4724     // Test the header to see if it is safe to read w.r.t. locking.
4725     Node *lock_mask      = _gvn.MakeConX(markWord::lock_mask_in_place);

4726     Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
4727     if (LockingMode == LM_LIGHTWEIGHT) {
4728       Node *monitor_val   = _gvn.MakeConX(markWord::monitor_value);
4729       Node *chk_monitor   = _gvn.transform(new CmpXNode(lmasked_header, monitor_val));
4730       Node *test_monitor  = _gvn.transform(new BoolNode(chk_monitor, BoolTest::eq));
4731 
4732       generate_slow_guard(test_monitor, slow_region);
4733     } else {
4734       Node *unlocked_val      = _gvn.MakeConX(markWord::unlocked_value);
4735       Node *chk_unlocked      = _gvn.transform(new CmpXNode(lmasked_header, unlocked_val));
4736       Node *test_not_unlocked = _gvn.transform(new BoolNode(chk_unlocked, BoolTest::ne));
4737 
4738       generate_slow_guard(test_not_unlocked, slow_region);
4739     }
4740   }
4741 
4742   // Get the hash value and check to see that it has been properly assigned.
4743   // We depend on hash_mask being at most 32 bits and avoid the use of
4744   // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
4745   // vm: see markWord.hpp.

4780     // this->control() comes from set_results_for_java_call
4781     result_reg->init_req(_slow_path, control());
4782     result_val->init_req(_slow_path, slow_result);
4783     result_io  ->set_req(_slow_path, i_o());
4784     result_mem ->set_req(_slow_path, reset_memory());
4785   }
4786 
4787   // Return the combined state.
4788   set_i_o(        _gvn.transform(result_io)  );
4789   set_all_memory( _gvn.transform(result_mem));
4790 
4791   set_result(result_reg, result_val);
4792   return true;
4793 }
4794 
4795 //---------------------------inline_native_getClass----------------------------
4796 // public final native Class<?> java.lang.Object.getClass();
4797 //
4798 // Build special case code for calls to getClass on an object.
4799 bool LibraryCallKit::inline_native_getClass() {
4800   Node* obj = null_check_receiver();









4801   if (stopped())  return true;
4802   set_result(load_mirror_from_klass(load_object_klass(obj)));
4803   return true;
4804 }
4805 
4806 //-----------------inline_native_Reflection_getCallerClass---------------------
4807 // public static native Class<?> sun.reflect.Reflection.getCallerClass();
4808 //
4809 // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
4810 //
4811 // NOTE: This code must perform the same logic as JVM_GetCallerClass
4812 // in that it must skip particular security frames and checks for
4813 // caller sensitive methods.
4814 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
4815 #ifndef PRODUCT
4816   if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4817     tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
4818   }
4819 #endif
4820 

5132     dst_type = _gvn.type(dst_addr)->is_ptr(); // narrow out memory
5133 
5134     flags |= RC_NARROW_MEM; // narrow in memory
5135   }
5136 
5137   // Call it.  Note that the length argument is not scaled.
5138   make_runtime_call(flags,
5139                     OptoRuntime::make_setmemory_Type(),
5140                     StubRoutines::unsafe_setmemory(),
5141                     "unsafe_setmemory",
5142                     dst_type,
5143                     dst_addr, size XTOP, byte);
5144 
5145   store_to_memory(control(), doing_unsafe_access_addr, intcon(0), doing_unsafe_access_bt, MemNode::unordered);
5146 
5147   return true;
5148 }
5149 
5150 #undef XTOP
5151 














5152 //------------------------clone_coping-----------------------------------
5153 // Helper function for inline_native_clone.
5154 void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array) {
5155   assert(obj_size != nullptr, "");
5156   Node* raw_obj = alloc_obj->in(1);
5157   assert(alloc_obj->is_CheckCastPP() && raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
5158 
5159   AllocateNode* alloc = nullptr;
5160   if (ReduceBulkZeroing &&
5161       // If we are implementing an array clone without knowing its source type
5162       // (can happen when compiling the array-guarded branch of a reflective
5163       // Object.clone() invocation), initialize the array within the allocation.
5164       // This is needed because some GCs (e.g. ZGC) might fall back in this case
5165       // to a runtime clone call that assumes fully initialized source arrays.
5166       (!is_array || obj->get_ptr_type()->isa_aryptr() != nullptr)) {
5167     // We will be completely responsible for initializing this object -
5168     // mark Initialize node as complete.
5169     alloc = AllocateNode::Ideal_allocation(alloc_obj);
5170     // The object was just allocated - there should be no any stores!
5171     guarantee(alloc != nullptr && alloc->maybe_set_complete(&_gvn), "");

5202 //  not cloneable or finalizer => slow path to out-of-line Object.clone
5203 //
5204 // The general case has two steps, allocation and copying.
5205 // Allocation has two cases, and uses GraphKit::new_instance or new_array.
5206 //
5207 // Copying also has two cases, oop arrays and everything else.
5208 // Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
5209 // Everything else uses the tight inline loop supplied by CopyArrayNode.
5210 //
5211 // These steps fold up nicely if and when the cloned object's klass
5212 // can be sharply typed as an object array, a type array, or an instance.
5213 //
5214 bool LibraryCallKit::inline_native_clone(bool is_virtual) {
5215   PhiNode* result_val;
5216 
5217   // Set the reexecute bit for the interpreter to reexecute
5218   // the bytecode that invokes Object.clone if deoptimization happens.
5219   { PreserveReexecuteState preexecs(this);
5220     jvms()->set_should_reexecute(true);
5221 
5222     Node* obj = null_check_receiver();

5223     if (stopped())  return true;
5224 
5225     const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();






5226 
5227     // If we are going to clone an instance, we need its exact type to
5228     // know the number and types of fields to convert the clone to
5229     // loads/stores. Maybe a speculative type can help us.
5230     if (!obj_type->klass_is_exact() &&
5231         obj_type->speculative_type() != nullptr &&
5232         obj_type->speculative_type()->is_instance_klass()) {

5233       ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass();
5234       if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem &&
5235           !spec_ik->has_injected_fields()) {
5236         if (!obj_type->isa_instptr() ||
5237             obj_type->is_instptr()->instance_klass()->has_subklass()) {
5238           obj = maybe_cast_profiled_obj(obj, obj_type->speculative_type(), false);
5239         }
5240       }
5241     }
5242 
5243     // Conservatively insert a memory barrier on all memory slices.
5244     // Do not let writes into the original float below the clone.
5245     insert_mem_bar(Op_MemBarCPUOrder);
5246 
5247     // paths into result_reg:
5248     enum {
5249       _slow_path = 1,     // out-of-line call to clone method (virtual or not)
5250       _objArray_path,     // plain array allocation, plus arrayof_oop_arraycopy
5251       _array_path,        // plain array allocation, plus arrayof_long_arraycopy
5252       _instance_path,     // plain instance allocation, plus arrayof_long_arraycopy
5253       PATH_LIMIT
5254     };
5255     RegionNode* result_reg = new RegionNode(PATH_LIMIT);
5256     result_val             = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
5257     PhiNode*    result_i_o = new PhiNode(result_reg, Type::ABIO);
5258     PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
5259     record_for_igvn(result_reg);
5260 
5261     Node* obj_klass = load_object_klass(obj);





5262     Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)nullptr);
5263     if (array_ctl != nullptr) {
5264       // It's an array.
5265       PreserveJVMState pjvms(this);
5266       set_control(array_ctl);
5267       Node* obj_length = load_array_length(obj);
5268       Node* array_size = nullptr; // Size of the array without object alignment padding.
5269       Node* alloc_obj = new_array(obj_klass, obj_length, 0, &array_size, /*deoptimize_on_exception=*/true);
5270 
5271       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
5272       if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Parsing)) {
5273         // If it is an oop array, it requires very special treatment,
5274         // because gc barriers are required when accessing the array.
5275         Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)nullptr);
5276         if (is_obja != nullptr) {
5277           PreserveJVMState pjvms2(this);
5278           set_control(is_obja);
5279           // Generate a direct call to the right arraycopy function(s).
5280           // Clones are always tightly coupled.
5281           ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, true, false);
5282           ac->set_clone_oop_array();
5283           Node* n = _gvn.transform(ac);
5284           assert(n == ac, "cannot disappear");
5285           ac->connect_outputs(this, /*deoptimize_on_exception=*/true);
5286 
5287           result_reg->init_req(_objArray_path, control());
5288           result_val->init_req(_objArray_path, alloc_obj);
5289           result_i_o ->set_req(_objArray_path, i_o());
5290           result_mem ->set_req(_objArray_path, reset_memory());
5291         }
5292       }
5293       // Otherwise, there are no barriers to worry about.
5294       // (We can dispense with card marks if we know the allocation
5295       //  comes out of eden (TLAB)...  In fact, ReduceInitialCardMarks
5296       //  causes the non-eden paths to take compensating steps to
5297       //  simulate a fresh allocation, so that no further
5298       //  card marks are required in compiled code to initialize
5299       //  the object.)
5300 
5301       if (!stopped()) {
5302         copy_to_clone(obj, alloc_obj, array_size, true);
5303 
5304         // Present the results of the copy.
5305         result_reg->init_req(_array_path, control());
5306         result_val->init_req(_array_path, alloc_obj);
5307         result_i_o ->set_req(_array_path, i_o());
5308         result_mem ->set_req(_array_path, reset_memory());




































5309       }
5310     }
5311 
5312     // We only go to the instance fast case code if we pass a number of guards.
5313     // The paths which do not pass are accumulated in the slow_region.
5314     RegionNode* slow_region = new RegionNode(1);
5315     record_for_igvn(slow_region);
5316     if (!stopped()) {
5317       // It's an instance (we did array above).  Make the slow-path tests.
5318       // If this is a virtual call, we generate a funny guard.  We grab
5319       // the vtable entry corresponding to clone() from the target object.
5320       // If the target method which we are calling happens to be the
5321       // Object clone() method, we pass the guard.  We do not need this
5322       // guard for non-virtual calls; the caller is known to be the native
5323       // Object clone().
5324       if (is_virtual) {
5325         generate_virtual_guard(obj_klass, slow_region);
5326       }
5327 
5328       // The object must be easily cloneable and must not have a finalizer.
5329       // Both of these conditions may be checked in a single test.
5330       // We could optimize the test further, but we don't care.
5331       generate_misc_flags_guard(obj_klass,
5332                                 // Test both conditions:
5333                                 KlassFlags::_misc_is_cloneable_fast | KlassFlags::_misc_has_finalizer,
5334                                 // Must be cloneable but not finalizer:
5335                                 KlassFlags::_misc_is_cloneable_fast,

5427         set_jvms(sfpt->jvms());
5428         _reexecute_sp = jvms()->sp();
5429 
5430         return saved_jvms;
5431       }
5432     }
5433   }
5434   return nullptr;
5435 }
5436 
5437 // Clone the JVMState of the array allocation and create a new safepoint with it. Re-push the array length to the stack
5438 // such that uncommon traps can be emitted to re-execute the array allocation in the interpreter.
5439 SafePointNode* LibraryCallKit::create_safepoint_with_state_before_array_allocation(const AllocateArrayNode* alloc) const {
5440   JVMState* old_jvms = alloc->jvms()->clone_shallow(C);
5441   uint size = alloc->req();
5442   SafePointNode* sfpt = new SafePointNode(size, old_jvms);
5443   old_jvms->set_map(sfpt);
5444   for (uint i = 0; i < size; i++) {
5445     sfpt->init_req(i, alloc->in(i));
5446   }












5447   // re-push array length for deoptimization
5448   sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp(), alloc->in(AllocateNode::ALength));
5449   old_jvms->set_sp(old_jvms->sp()+1);
5450   old_jvms->set_monoff(old_jvms->monoff()+1);
5451   old_jvms->set_scloff(old_jvms->scloff()+1);
5452   old_jvms->set_endoff(old_jvms->endoff()+1);
5453   old_jvms->set_should_reexecute(true);
5454 
5455   sfpt->set_i_o(map()->i_o());
5456   sfpt->set_memory(map()->memory());
5457   sfpt->set_control(map()->control());
5458   return sfpt;
5459 }
5460 
5461 // In case of a deoptimization, we restart execution at the
5462 // allocation, allocating a new array. We would leave an uninitialized
5463 // array in the heap that GCs wouldn't expect. Move the allocation
5464 // after the traps so we don't allocate the array if we
5465 // deoptimize. This is possible because tightly_coupled_allocation()
5466 // guarantees there's no observer of the allocated array at this point
5467 // and the control flow is simple enough.
5468 void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms_before_guards,
5469                                                     int saved_reexecute_sp, uint new_idx) {
5470   if (saved_jvms_before_guards != nullptr && !stopped()) {
5471     replace_unrelated_uncommon_traps_with_alloc_state(alloc, saved_jvms_before_guards);
5472 
5473     assert(alloc != nullptr, "only with a tightly coupled allocation");
5474     // restore JVM state to the state at the arraycopy
5475     saved_jvms_before_guards->map()->set_control(map()->control());
5476     assert(saved_jvms_before_guards->map()->memory() == map()->memory(), "memory state changed?");
5477     assert(saved_jvms_before_guards->map()->i_o() == map()->i_o(), "IO state changed?");
5478     // If we've improved the types of some nodes (null check) while
5479     // emitting the guards, propagate them to the current state
5480     map()->replaced_nodes().apply(saved_jvms_before_guards->map(), new_idx);
5481     set_jvms(saved_jvms_before_guards);
5482     _reexecute_sp = saved_reexecute_sp;
5483 
5484     // Remove the allocation from above the guards
5485     CallProjections callprojs;
5486     alloc->extract_projections(&callprojs, true);
5487     InitializeNode* init = alloc->initialization();
5488     Node* alloc_mem = alloc->in(TypeFunc::Memory);
5489     C->gvn_replace_by(callprojs.fallthrough_ioproj, alloc->in(TypeFunc::I_O));
5490     C->gvn_replace_by(init->proj_out(TypeFunc::Memory), alloc_mem);
5491 
5492     // The CastIINode created in GraphKit::new_array (in AllocateArrayNode::make_ideal_length) must stay below
5493     // the allocation (i.e. is only valid if the allocation succeeds):
5494     // 1) replace CastIINode with AllocateArrayNode's length here
5495     // 2) Create CastIINode again once allocation has moved (see below) at the end of this method
5496     //
5497     // Multiple identical CastIINodes might exist here. Each GraphKit::load_array_length() call will generate
5498     // new separate CastIINode (arraycopy guard checks or any array length use between array allocation and ararycopy)
5499     Node* init_control = init->proj_out(TypeFunc::Control);
5500     Node* alloc_length = alloc->Ideal_length();
5501 #ifdef ASSERT
5502     Node* prev_cast = nullptr;
5503 #endif
5504     for (uint i = 0; i < init_control->outcnt(); i++) {
5505       Node* init_out = init_control->raw_out(i);
5506       if (init_out->is_CastII() && init_out->in(TypeFunc::Control) == init_control && init_out->in(1) == alloc_length) {
5507 #ifdef ASSERT
5508         if (prev_cast == nullptr) {
5509           prev_cast = init_out;

5511           if (prev_cast->cmp(*init_out) == false) {
5512             prev_cast->dump();
5513             init_out->dump();
5514             assert(false, "not equal CastIINode");
5515           }
5516         }
5517 #endif
5518         C->gvn_replace_by(init_out, alloc_length);
5519       }
5520     }
5521     C->gvn_replace_by(init->proj_out(TypeFunc::Control), alloc->in(0));
5522 
5523     // move the allocation here (after the guards)
5524     _gvn.hash_delete(alloc);
5525     alloc->set_req(TypeFunc::Control, control());
5526     alloc->set_req(TypeFunc::I_O, i_o());
5527     Node *mem = reset_memory();
5528     set_all_memory(mem);
5529     alloc->set_req(TypeFunc::Memory, mem);
5530     set_control(init->proj_out_or_null(TypeFunc::Control));
5531     set_i_o(callprojs.fallthrough_ioproj);
5532 
5533     // Update memory as done in GraphKit::set_output_for_allocation()
5534     const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
5535     const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
5536     if (ary_type->isa_aryptr() && length_type != nullptr) {
5537       ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
5538     }
5539     const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot);
5540     int            elemidx  = C->get_alias_index(telemref);
5541     set_memory(init->proj_out_or_null(TypeFunc::Memory), Compile::AliasIdxRaw);
5542     set_memory(init->proj_out_or_null(TypeFunc::Memory), elemidx);
5543 
5544     Node* allocx = _gvn.transform(alloc);
5545     assert(allocx == alloc, "where has the allocation gone?");
5546     assert(dest->is_CheckCastPP(), "not an allocation result?");
5547 
5548     _gvn.hash_delete(dest);
5549     dest->set_req(0, control());
5550     Node* destx = _gvn.transform(dest);
5551     assert(destx == dest, "where has the allocation result gone?");

5849         top_src  = src_type->isa_aryptr();
5850         has_src = (top_src != nullptr && top_src->elem() != Type::BOTTOM);
5851         src_spec = true;
5852       }
5853       if (!has_dest) {
5854         dest = maybe_cast_profiled_obj(dest, dest_k, true);
5855         dest_type  = _gvn.type(dest);
5856         top_dest  = dest_type->isa_aryptr();
5857         has_dest = (top_dest != nullptr && top_dest->elem() != Type::BOTTOM);
5858         dest_spec = true;
5859       }
5860     }
5861   }
5862 
5863   if (has_src && has_dest && can_emit_guards) {
5864     BasicType src_elem = top_src->isa_aryptr()->elem()->array_element_basic_type();
5865     BasicType dest_elem = top_dest->isa_aryptr()->elem()->array_element_basic_type();
5866     if (is_reference_type(src_elem, true)) src_elem = T_OBJECT;
5867     if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT;
5868 
5869     if (src_elem == dest_elem && src_elem == T_OBJECT) {
5870       // If both arrays are object arrays then having the exact types
5871       // for both will remove the need for a subtype check at runtime
5872       // before the call and may make it possible to pick a faster copy
5873       // routine (without a subtype check on every element)
5874       // Do we have the exact type of src?
5875       bool could_have_src = src_spec;
5876       // Do we have the exact type of dest?
5877       bool could_have_dest = dest_spec;
5878       ciKlass* src_k = nullptr;
5879       ciKlass* dest_k = nullptr;
5880       if (!src_spec) {
5881         src_k = src_type->speculative_type_not_null();
5882         if (src_k != nullptr && src_k->is_array_klass()) {
5883           could_have_src = true;
5884         }
5885       }
5886       if (!dest_spec) {
5887         dest_k = dest_type->speculative_type_not_null();
5888         if (dest_k != nullptr && dest_k->is_array_klass()) {
5889           could_have_dest = true;
5890         }
5891       }
5892       if (could_have_src && could_have_dest) {
5893         // If we can have both exact types, emit the missing guards
5894         if (could_have_src && !src_spec) {
5895           src = maybe_cast_profiled_obj(src, src_k, true);


5896         }
5897         if (could_have_dest && !dest_spec) {
5898           dest = maybe_cast_profiled_obj(dest, dest_k, true);


5899         }
5900       }
5901     }
5902   }
5903 
5904   ciMethod* trap_method = method();
5905   int trap_bci = bci();
5906   if (saved_jvms_before_guards != nullptr) {
5907     trap_method = alloc->jvms()->method();
5908     trap_bci = alloc->jvms()->bci();
5909   }
5910 
5911   bool negative_length_guard_generated = false;
5912 
5913   if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
5914       can_emit_guards &&
5915       !src->is_top() && !dest->is_top()) {
5916     // validate arguments: enables transformation the ArrayCopyNode
5917     validated = true;
5918 
5919     RegionNode* slow_region = new RegionNode(1);
5920     record_for_igvn(slow_region);
5921 
5922     // (1) src and dest are arrays.
5923     generate_non_array_guard(load_object_klass(src), slow_region);
5924     generate_non_array_guard(load_object_klass(dest), slow_region);
5925 
5926     // (2) src and dest arrays must have elements of the same BasicType
5927     // done at macro expansion or at Ideal transformation time
5928 
5929     // (4) src_offset must not be negative.
5930     generate_negative_guard(src_offset, slow_region);
5931 
5932     // (5) dest_offset must not be negative.
5933     generate_negative_guard(dest_offset, slow_region);
5934 
5935     // (7) src_offset + length must not exceed length of src.

5938                          slow_region);
5939 
5940     // (8) dest_offset + length must not exceed length of dest.
5941     generate_limit_guard(dest_offset, length,
5942                          load_array_length(dest),
5943                          slow_region);
5944 
5945     // (6) length must not be negative.
5946     // This is also checked in generate_arraycopy() during macro expansion, but
5947     // we also have to check it here for the case where the ArrayCopyNode will
5948     // be eliminated by Escape Analysis.
5949     if (EliminateAllocations) {
5950       generate_negative_guard(length, slow_region);
5951       negative_length_guard_generated = true;
5952     }
5953 
5954     // (9) each element of an oop array must be assignable
5955     Node* dest_klass = load_object_klass(dest);
5956     if (src != dest) {
5957       Node* not_subtype_ctrl = gen_subtype_check(src, dest_klass);


5958 
5959       if (not_subtype_ctrl != top()) {
5960         PreserveJVMState pjvms(this);
5961         set_control(not_subtype_ctrl);
5962         uncommon_trap(Deoptimization::Reason_intrinsic,
5963                       Deoptimization::Action_make_not_entrant);
5964         assert(stopped(), "Should be stopped");






















5965       }
5966     }

5967     {
5968       PreserveJVMState pjvms(this);
5969       set_control(_gvn.transform(slow_region));
5970       uncommon_trap(Deoptimization::Reason_intrinsic,
5971                     Deoptimization::Action_make_not_entrant);
5972       assert(stopped(), "Should be stopped");
5973     }
5974 
5975     const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr();
5976     const Type *toop = dest_klass_t->cast_to_exactness(false)->as_instance_type();
5977     src = _gvn.transform(new CheckCastPPNode(control(), src, toop));
5978     arraycopy_move_allocation_here(alloc, dest, saved_jvms_before_guards, saved_reexecute_sp, new_idx);
5979   }
5980 
5981   if (stopped()) {
5982     return true;
5983   }
5984 
5985   ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != nullptr, negative_length_guard_generated,
5986                                           // Create LoadRange and LoadKlass nodes for use during macro expansion here
5987                                           // so the compiler has a chance to eliminate them: during macro expansion,
5988                                           // we have to set their control (CastPP nodes are eliminated).
5989                                           load_object_klass(src), load_object_klass(dest),
5990                                           load_array_length(src), load_array_length(dest));
5991 
5992   ac->set_arraycopy(validated);
5993 
5994   Node* n = _gvn.transform(ac);
5995   if (n == ac) {
5996     ac->connect_outputs(this);
5997   } else {

   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "ci/ciFlatArrayKlass.hpp"
  28 #include "ci/ciUtilities.inline.hpp"
  29 #include "classfile/vmIntrinsics.hpp"
  30 #include "compiler/compileBroker.hpp"
  31 #include "compiler/compileLog.hpp"
  32 #include "gc/shared/barrierSet.hpp"
  33 #include "jfr/support/jfrIntrinsics.hpp"
  34 #include "memory/resourceArea.hpp"
  35 #include "oops/klass.inline.hpp"
  36 #include "oops/objArrayKlass.hpp"
  37 #include "opto/addnode.hpp"
  38 #include "opto/arraycopynode.hpp"
  39 #include "opto/c2compiler.hpp"
  40 #include "opto/castnode.hpp"
  41 #include "opto/cfgnode.hpp"
  42 #include "opto/convertnode.hpp"
  43 #include "opto/countbitsnode.hpp"
  44 #include "opto/idealKit.hpp"
  45 #include "opto/library_call.hpp"
  46 #include "opto/mathexactnode.hpp"
  47 #include "opto/mulnode.hpp"

 309   case vmIntrinsics::_indexOfIL:                return inline_string_indexOfI(StrIntrinsicNode::LL);
 310   case vmIntrinsics::_indexOfIU:                return inline_string_indexOfI(StrIntrinsicNode::UU);
 311   case vmIntrinsics::_indexOfIUL:               return inline_string_indexOfI(StrIntrinsicNode::UL);
 312   case vmIntrinsics::_indexOfU_char:            return inline_string_indexOfChar(StrIntrinsicNode::U);
 313   case vmIntrinsics::_indexOfL_char:            return inline_string_indexOfChar(StrIntrinsicNode::L);
 314 
 315   case vmIntrinsics::_equalsL:                  return inline_string_equals(StrIntrinsicNode::LL);
 316 
 317   case vmIntrinsics::_vectorizedHashCode:       return inline_vectorizedHashCode();
 318 
 319   case vmIntrinsics::_toBytesStringU:           return inline_string_toBytesU();
 320   case vmIntrinsics::_getCharsStringU:          return inline_string_getCharsU();
 321   case vmIntrinsics::_getCharStringU:           return inline_string_char_access(!is_store);
 322   case vmIntrinsics::_putCharStringU:           return inline_string_char_access( is_store);
 323 
 324   case vmIntrinsics::_compressStringC:
 325   case vmIntrinsics::_compressStringB:          return inline_string_copy( is_compress);
 326   case vmIntrinsics::_inflateStringC:
 327   case vmIntrinsics::_inflateStringB:           return inline_string_copy(!is_compress);
 328 
 329   case vmIntrinsics::_makePrivateBuffer:        return inline_unsafe_make_private_buffer();
 330   case vmIntrinsics::_finishPrivateBuffer:      return inline_unsafe_finish_private_buffer();
 331   case vmIntrinsics::_getReference:             return inline_unsafe_access(!is_store, T_OBJECT,   Relaxed, false);
 332   case vmIntrinsics::_getBoolean:               return inline_unsafe_access(!is_store, T_BOOLEAN,  Relaxed, false);
 333   case vmIntrinsics::_getByte:                  return inline_unsafe_access(!is_store, T_BYTE,     Relaxed, false);
 334   case vmIntrinsics::_getShort:                 return inline_unsafe_access(!is_store, T_SHORT,    Relaxed, false);
 335   case vmIntrinsics::_getChar:                  return inline_unsafe_access(!is_store, T_CHAR,     Relaxed, false);
 336   case vmIntrinsics::_getInt:                   return inline_unsafe_access(!is_store, T_INT,      Relaxed, false);
 337   case vmIntrinsics::_getLong:                  return inline_unsafe_access(!is_store, T_LONG,     Relaxed, false);
 338   case vmIntrinsics::_getFloat:                 return inline_unsafe_access(!is_store, T_FLOAT,    Relaxed, false);
 339   case vmIntrinsics::_getDouble:                return inline_unsafe_access(!is_store, T_DOUBLE,   Relaxed, false);
 340   case vmIntrinsics::_getValue:                 return inline_unsafe_access(!is_store, T_OBJECT,   Relaxed, false, true);
 341 
 342   case vmIntrinsics::_putReference:             return inline_unsafe_access( is_store, T_OBJECT,   Relaxed, false);
 343   case vmIntrinsics::_putBoolean:               return inline_unsafe_access( is_store, T_BOOLEAN,  Relaxed, false);
 344   case vmIntrinsics::_putByte:                  return inline_unsafe_access( is_store, T_BYTE,     Relaxed, false);
 345   case vmIntrinsics::_putShort:                 return inline_unsafe_access( is_store, T_SHORT,    Relaxed, false);
 346   case vmIntrinsics::_putChar:                  return inline_unsafe_access( is_store, T_CHAR,     Relaxed, false);
 347   case vmIntrinsics::_putInt:                   return inline_unsafe_access( is_store, T_INT,      Relaxed, false);
 348   case vmIntrinsics::_putLong:                  return inline_unsafe_access( is_store, T_LONG,     Relaxed, false);
 349   case vmIntrinsics::_putFloat:                 return inline_unsafe_access( is_store, T_FLOAT,    Relaxed, false);
 350   case vmIntrinsics::_putDouble:                return inline_unsafe_access( is_store, T_DOUBLE,   Relaxed, false);
 351   case vmIntrinsics::_putValue:                 return inline_unsafe_access( is_store, T_OBJECT,   Relaxed, false, true);
 352 
 353   case vmIntrinsics::_getReferenceVolatile:     return inline_unsafe_access(!is_store, T_OBJECT,   Volatile, false);
 354   case vmIntrinsics::_getBooleanVolatile:       return inline_unsafe_access(!is_store, T_BOOLEAN,  Volatile, false);
 355   case vmIntrinsics::_getByteVolatile:          return inline_unsafe_access(!is_store, T_BYTE,     Volatile, false);
 356   case vmIntrinsics::_getShortVolatile:         return inline_unsafe_access(!is_store, T_SHORT,    Volatile, false);
 357   case vmIntrinsics::_getCharVolatile:          return inline_unsafe_access(!is_store, T_CHAR,     Volatile, false);
 358   case vmIntrinsics::_getIntVolatile:           return inline_unsafe_access(!is_store, T_INT,      Volatile, false);
 359   case vmIntrinsics::_getLongVolatile:          return inline_unsafe_access(!is_store, T_LONG,     Volatile, false);
 360   case vmIntrinsics::_getFloatVolatile:         return inline_unsafe_access(!is_store, T_FLOAT,    Volatile, false);
 361   case vmIntrinsics::_getDoubleVolatile:        return inline_unsafe_access(!is_store, T_DOUBLE,   Volatile, false);
 362 
 363   case vmIntrinsics::_putReferenceVolatile:     return inline_unsafe_access( is_store, T_OBJECT,   Volatile, false);
 364   case vmIntrinsics::_putBooleanVolatile:       return inline_unsafe_access( is_store, T_BOOLEAN,  Volatile, false);
 365   case vmIntrinsics::_putByteVolatile:          return inline_unsafe_access( is_store, T_BYTE,     Volatile, false);
 366   case vmIntrinsics::_putShortVolatile:         return inline_unsafe_access( is_store, T_SHORT,    Volatile, false);
 367   case vmIntrinsics::_putCharVolatile:          return inline_unsafe_access( is_store, T_CHAR,     Volatile, false);
 368   case vmIntrinsics::_putIntVolatile:           return inline_unsafe_access( is_store, T_INT,      Volatile, false);
 369   case vmIntrinsics::_putLongVolatile:          return inline_unsafe_access( is_store, T_LONG,     Volatile, false);
 370   case vmIntrinsics::_putFloatVolatile:         return inline_unsafe_access( is_store, T_FLOAT,    Volatile, false);
 371   case vmIntrinsics::_putDoubleVolatile:        return inline_unsafe_access( is_store, T_DOUBLE,   Volatile, false);

 498                                                                                          "notifyJvmtiEnd", false, true);
 499   case vmIntrinsics::_notifyJvmtiVThreadMount:   return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_mount()),
 500                                                                                          "notifyJvmtiMount", false, false);
 501   case vmIntrinsics::_notifyJvmtiVThreadUnmount: return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_unmount()),
 502                                                                                          "notifyJvmtiUnmount", false, false);
 503   case vmIntrinsics::_notifyJvmtiVThreadDisableSuspend: return inline_native_notify_jvmti_sync();
 504 #endif
 505 
 506 #ifdef JFR_HAVE_INTRINSICS
 507   case vmIntrinsics::_counterTime:              return inline_native_time_funcs(CAST_FROM_FN_PTR(address, JfrTime::time_function()), "counterTime");
 508   case vmIntrinsics::_getEventWriter:           return inline_native_getEventWriter();
 509   case vmIntrinsics::_jvm_commit:               return inline_native_jvm_commit();
 510 #endif
 511   case vmIntrinsics::_currentTimeMillis:        return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
 512   case vmIntrinsics::_nanoTime:                 return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
 513   case vmIntrinsics::_writeback0:               return inline_unsafe_writeback0();
 514   case vmIntrinsics::_writebackPreSync0:        return inline_unsafe_writebackSync0(true);
 515   case vmIntrinsics::_writebackPostSync0:       return inline_unsafe_writebackSync0(false);
 516   case vmIntrinsics::_allocateInstance:         return inline_unsafe_allocate();
 517   case vmIntrinsics::_copyMemory:               return inline_unsafe_copyMemory();
 518   case vmIntrinsics::_isFlatArray:              return inline_unsafe_isFlatArray();
 519   case vmIntrinsics::_setMemory:                return inline_unsafe_setMemory();
 520   case vmIntrinsics::_getLength:                return inline_native_getLength();
 521   case vmIntrinsics::_copyOf:                   return inline_array_copyOf(false);
 522   case vmIntrinsics::_copyOfRange:              return inline_array_copyOf(true);
 523   case vmIntrinsics::_equalsB:                  return inline_array_equals(StrIntrinsicNode::LL);
 524   case vmIntrinsics::_equalsC:                  return inline_array_equals(StrIntrinsicNode::UU);
 525   case vmIntrinsics::_Preconditions_checkIndex: return inline_preconditions_checkIndex(T_INT);
 526   case vmIntrinsics::_Preconditions_checkLongIndex: return inline_preconditions_checkIndex(T_LONG);
 527   case vmIntrinsics::_clone:                    return inline_native_clone(intrinsic()->is_virtual());
 528 
 529   case vmIntrinsics::_allocateUninitializedArray: return inline_unsafe_newArray(true);
 530   case vmIntrinsics::_newArray:                   return inline_unsafe_newArray(false);
 531   case vmIntrinsics::_newNullRestrictedArray:   return inline_newNullRestrictedArray();
 532 
 533   case vmIntrinsics::_isAssignableFrom:         return inline_native_subtype_check();
 534 
 535   case vmIntrinsics::_isInstance:
 536   case vmIntrinsics::_getModifiers:
 537   case vmIntrinsics::_isInterface:
 538   case vmIntrinsics::_isArray:
 539   case vmIntrinsics::_isPrimitive:
 540   case vmIntrinsics::_isHidden:
 541   case vmIntrinsics::_getSuperclass:
 542   case vmIntrinsics::_getClassAccessFlags:      return inline_native_Class_query(intrinsic_id());
 543 
 544   case vmIntrinsics::_floatToRawIntBits:
 545   case vmIntrinsics::_floatToIntBits:
 546   case vmIntrinsics::_intBitsToFloat:
 547   case vmIntrinsics::_doubleToRawLongBits:
 548   case vmIntrinsics::_doubleToLongBits:
 549   case vmIntrinsics::_longBitsToDouble:
 550   case vmIntrinsics::_floatToFloat16:
 551   case vmIntrinsics::_float16ToFloat:           return inline_fp_conversions(intrinsic_id());

2242     case vmIntrinsics::_remainderUnsigned_l: {
2243       zero_check_long(argument(2));
2244       // Compile-time detect of null-exception
2245       if (stopped()) {
2246         return true; // keep the graph constructed so far
2247       }
2248       n = new UModLNode(control(), argument(0), argument(2));
2249       break;
2250     }
2251     default:  fatal_unexpected_iid(id);  break;
2252   }
2253   set_result(_gvn.transform(n));
2254   return true;
2255 }
2256 
2257 //----------------------------inline_unsafe_access----------------------------
2258 
2259 const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type) {
2260   // Attempt to infer a sharper value type from the offset and base type.
2261   ciKlass* sharpened_klass = nullptr;
2262   bool null_free = false;
2263 
2264   // See if it is an instance field, with an object type.
2265   if (alias_type->field() != nullptr) {
2266     if (alias_type->field()->type()->is_klass()) {
2267       sharpened_klass = alias_type->field()->type()->as_klass();
2268       null_free = alias_type->field()->is_null_free();
2269     }
2270   }
2271 
2272   const TypeOopPtr* result = nullptr;
2273   // See if it is a narrow oop array.
2274   if (adr_type->isa_aryptr()) {
2275     if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
2276       const TypeOopPtr* elem_type = adr_type->is_aryptr()->elem()->make_oopptr();
2277       null_free = adr_type->is_aryptr()->is_null_free();
2278       if (elem_type != nullptr && elem_type->is_loaded()) {
2279         // Sharpen the value type.
2280         result = elem_type;
2281       }
2282     }
2283   }
2284 
2285   // The sharpened class might be unloaded if there is no class loader
2286   // contraint in place.
2287   if (result == nullptr && sharpened_klass != nullptr && sharpened_klass->is_loaded()) {
2288     // Sharpen the value type.
2289     result = TypeOopPtr::make_from_klass(sharpened_klass);
2290     if (null_free) {
2291       result = result->join_speculative(TypePtr::NOTNULL)->is_oopptr();
2292     }
2293   }
2294   if (result != nullptr) {
2295 #ifndef PRODUCT
2296     if (C->print_intrinsics() || C->print_inlining()) {
2297       tty->print("  from base type:  ");  adr_type->dump(); tty->cr();
2298       tty->print("  sharpened value: ");  result->dump();    tty->cr();
2299     }
2300 #endif
2301   }
2302   return result;
2303 }
2304 
2305 DecoratorSet LibraryCallKit::mo_decorator_for_access_kind(AccessKind kind) {
2306   switch (kind) {
2307       case Relaxed:
2308         return MO_UNORDERED;
2309       case Opaque:
2310         return MO_RELAXED;
2311       case Acquire:
2312         return MO_ACQUIRE;
2313       case Release:
2314         return MO_RELEASE;
2315       case Volatile:
2316         return MO_SEQ_CST;
2317       default:
2318         ShouldNotReachHere();
2319         return 0;
2320   }
2321 }
2322 
2323 bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, const AccessKind kind, const bool unaligned, const bool is_flat) {
2324   if (callee()->is_static())  return false;  // caller must have the capability!
2325   DecoratorSet decorators = C2_UNSAFE_ACCESS;
2326   guarantee(!is_store || kind != Acquire, "Acquire accesses can be produced only for loads");
2327   guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
2328   assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
2329 
2330   if (is_reference_type(type)) {
2331     decorators |= ON_UNKNOWN_OOP_REF;
2332   }
2333 
2334   if (unaligned) {
2335     decorators |= C2_UNALIGNED;
2336   }
2337 
2338 #ifndef PRODUCT
2339   {
2340     ResourceMark rm;
2341     // Check the signatures.
2342     ciSignature* sig = callee()->signature();
2343 #ifdef ASSERT
2344     if (!is_store) {
2345       // Object getReference(Object base, int/long offset), etc.
2346       BasicType rtype = sig->return_type()->basic_type();
2347       assert(rtype == type, "getter must return the expected value");
2348       assert(sig->count() == 2 || (is_flat && sig->count() == 3), "oop getter has 2 or 3 arguments");
2349       assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2350       assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2351     } else {
2352       // void putReference(Object base, int/long offset, Object x), etc.
2353       assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
2354       assert(sig->count() == 3 || (is_flat && sig->count() == 4), "oop putter has 3 arguments");
2355       assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
2356       assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
2357       BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
2358       assert(vtype == type, "putter must accept the expected value");
2359     }
2360 #endif // ASSERT
2361  }
2362 #endif //PRODUCT
2363 
2364   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
2365 
2366   Node* receiver = argument(0);  // type: oop
2367 
2368   // Build address expression.
2369   Node* heap_base_oop = top();
2370 
2371   // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2372   Node* base = argument(1);  // type: oop
2373   // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2374   Node* offset = argument(2);  // type: long
2375   // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2376   // to be plain byte offsets, which are also the same as those accepted
2377   // by oopDesc::field_addr.
2378   assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2379          "fieldOffset must be byte-scaled");
2380 
2381   ciInlineKlass* inline_klass = nullptr;
2382   if (is_flat) {
2383     const TypeInstPtr* cls = _gvn.type(argument(4))->isa_instptr();
2384     if (cls == nullptr || cls->const_oop() == nullptr) {
2385       return false;
2386     }
2387     ciType* mirror_type = cls->const_oop()->as_instance()->java_mirror_type();
2388     if (!mirror_type->is_inlinetype()) {
2389       return false;
2390     }
2391     inline_klass = mirror_type->as_inline_klass();
2392   }
2393 
2394   if (base->is_InlineType()) {
2395     InlineTypeNode* vt = base->as_InlineType();
2396     if (is_store) {
2397       if (!vt->is_allocated(&_gvn)) {
2398         return false;
2399       }
2400       base = vt->get_oop();
2401     } else {
2402       if (offset->is_Con()) {
2403         long off = find_long_con(offset, 0);
2404         ciInlineKlass* vk = vt->type()->inline_klass();
2405         if ((long)(int)off != off || !vk->contains_field_offset(off)) {
2406           return false;
2407         }
2408 
2409         ciField* field = vk->get_non_flat_field_by_offset(off);
2410         if (field != nullptr) {
2411           BasicType bt = type2field[field->type()->basic_type()];
2412           if (bt == T_ARRAY || bt == T_NARROWOOP) {
2413             bt = T_OBJECT;
2414           }
2415           if (bt == type && (!field->is_flat() || field->type() == inline_klass)) {
2416             Node* value = vt->field_value_by_offset(off, false);
2417             if (value->is_InlineType()) {
2418               value = value->as_InlineType()->adjust_scalarization_depth(this);
2419             }
2420             set_result(value);
2421             return true;
2422           }
2423         }
2424       }
2425       {
2426         // Re-execute the unsafe access if allocation triggers deoptimization.
2427         PreserveReexecuteState preexecs(this);
2428         jvms()->set_should_reexecute(true);
2429         vt = vt->buffer(this);
2430       }
2431       base = vt->get_oop();
2432     }
2433   }
2434 
2435   // 32-bit machines ignore the high half!
2436   offset = ConvL2X(offset);
2437 
2438   // Save state and restore on bailout
2439   uint old_sp = sp();
2440   SafePointNode* old_map = clone_map();
2441 
2442   Node* adr = make_unsafe_address(base, offset, type, kind == Relaxed);
2443   assert(!stopped(), "Inlining of unsafe access failed: address construction stopped unexpectedly");
2444 
2445   if (_gvn.type(base->uncast())->isa_ptr() == TypePtr::NULL_PTR) {
2446     if (type != T_OBJECT && (inline_klass == nullptr || !inline_klass->has_object_fields())) {
2447       decorators |= IN_NATIVE; // off-heap primitive access
2448     } else {
2449       set_map(old_map);
2450       set_sp(old_sp);
2451       return false; // off-heap oop accesses are not supported
2452     }
2453   } else {
2454     heap_base_oop = base; // on-heap or mixed access
2455   }
2456 
2457   // Can base be null? Otherwise, always on-heap access.
2458   bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(base));
2459 
2460   if (!can_access_non_heap) {
2461     decorators |= IN_HEAP;
2462   }
2463 
2464   Node* val = is_store ? argument(4 + (is_flat ? 1 : 0)) : nullptr;
2465 
2466   const TypePtr* adr_type = _gvn.type(adr)->isa_ptr();
2467   if (adr_type == TypePtr::NULL_PTR) {
2468     set_map(old_map);
2469     set_sp(old_sp);
2470     return false; // off-heap access with zero address
2471   }
2472 
2473   // Try to categorize the address.
2474   Compile::AliasType* alias_type = C->alias_type(adr_type);
2475   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2476 
2477   if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2478       alias_type->adr_type() == TypeAryPtr::RANGE) {
2479     set_map(old_map);
2480     set_sp(old_sp);
2481     return false; // not supported
2482   }
2483 
2484   bool mismatched = false;
2485   BasicType bt = T_ILLEGAL;
2486   ciField* field = nullptr;
2487   if (adr_type->isa_instptr()) {
2488     const TypeInstPtr* instptr = adr_type->is_instptr();
2489     ciInstanceKlass* k = instptr->instance_klass();
2490     int off = instptr->offset();
2491     if (instptr->const_oop() != nullptr &&
2492         k == ciEnv::current()->Class_klass() &&
2493         instptr->offset() >= (k->size_helper() * wordSize)) {
2494       k = instptr->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
2495       field = k->get_field_by_offset(off, true);
2496     } else {
2497       field = k->get_non_flat_field_by_offset(off);
2498     }
2499     if (field != nullptr) {
2500       bt = type2field[field->type()->basic_type()];
2501     }
2502     assert(bt == alias_type->basic_type() || is_flat, "should match");
2503   } else {
2504     bt = alias_type->basic_type();
2505   }
2506 
2507   if (bt != T_ILLEGAL) {
2508     assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
2509     if (bt == T_BYTE && adr_type->isa_aryptr()) {
2510       // Alias type doesn't differentiate between byte[] and boolean[]).
2511       // Use address type to get the element type.
2512       bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2513     }
2514     if (is_reference_type(bt, true)) {
2515       // accessing an array field with getReference is not a mismatch
2516       bt = T_OBJECT;
2517     }
2518     if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2519       // Don't intrinsify mismatched object accesses
2520       set_map(old_map);
2521       set_sp(old_sp);
2522       return false;
2523     }
2524     mismatched = (bt != type);
2525   } else if (alias_type->adr_type()->isa_oopptr()) {
2526     mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
2527   }
2528 
2529   if (is_flat) {
2530     if (adr_type->isa_instptr()) {
2531       if (field == nullptr || field->type() != inline_klass) {
2532         mismatched = true;
2533       }
2534     } else if (adr_type->isa_aryptr()) {
2535       const Type* elem = adr_type->is_aryptr()->elem();
2536       if (!adr_type->is_flat() || elem->inline_klass() != inline_klass) {
2537         mismatched = true;
2538       }
2539     } else {
2540       mismatched = true;
2541     }
2542     if (is_store) {
2543       const Type* val_t = _gvn.type(val);
2544       if (!val_t->is_inlinetypeptr() || val_t->inline_klass() != inline_klass) {
2545         set_map(old_map);
2546         set_sp(old_sp);
2547         return false;
2548       }
2549     }
2550   }
2551 
2552   destruct_map_clone(old_map);
2553   assert(!mismatched || is_flat || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2554 
2555   if (mismatched) {
2556     decorators |= C2_MISMATCHED;
2557   }
2558 
2559   // First guess at the value type.
2560   const Type *value_type = Type::get_const_basic_type(type);
2561 
2562   // Figure out the memory ordering.
2563   decorators |= mo_decorator_for_access_kind(kind);
2564 
2565   if (!is_store) {
2566     if (type == T_OBJECT && !is_flat) {
2567       const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2568       if (tjp != nullptr) {
2569         value_type = tjp;
2570       }
2571     }
2572   }
2573 
2574   receiver = null_check(receiver);
2575   if (stopped()) {
2576     return true;
2577   }
2578   // Heap pointers get a null-check from the interpreter,
2579   // as a courtesy.  However, this is not guaranteed by Unsafe,
2580   // and it is not possible to fully distinguish unintended nulls
2581   // from intended ones in this API.
2582 
2583   if (!is_store) {
2584     Node* p = nullptr;
2585     // Try to constant fold a load from a constant field
2586 
2587     if (heap_base_oop != top() && field != nullptr && field->is_constant() && !field->is_flat() && !mismatched) {
2588       // final or stable field
2589       p = make_constant_from_field(field, heap_base_oop);
2590     }
2591 
2592     if (p == nullptr) { // Could not constant fold the load
2593       if (is_flat) {
2594         if (adr_type->isa_instptr() && !mismatched) {
2595           ciInstanceKlass* holder = adr_type->is_instptr()->instance_klass();
2596           int offset = adr_type->is_instptr()->offset();
2597           p = InlineTypeNode::make_from_flat(this, inline_klass, base, base, holder, offset, decorators);
2598         } else {
2599           p = InlineTypeNode::make_from_flat(this, inline_klass, base, adr, nullptr, 0, decorators);
2600         }
2601       } else {
2602         p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);
2603         const TypeOopPtr* ptr = value_type->make_oopptr();
2604         if (ptr != nullptr && ptr->is_inlinetypeptr()) {
2605           // Load a non-flattened inline type from memory
2606           p = InlineTypeNode::make_from_oop(this, p, ptr->inline_klass(), !ptr->maybe_null());
2607         }
2608       }
2609       // Normalize the value returned by getBoolean in the following cases
2610       if (type == T_BOOLEAN &&
2611           (mismatched ||
2612            heap_base_oop == top() ||                  // - heap_base_oop is null or
2613            (can_access_non_heap && field == nullptr)) // - heap_base_oop is potentially null
2614                                                       //   and the unsafe access is made to large offset
2615                                                       //   (i.e., larger than the maximum offset necessary for any
2616                                                       //   field access)
2617             ) {
2618           IdealKit ideal = IdealKit(this);
2619 #define __ ideal.
2620           IdealVariable normalized_result(ideal);
2621           __ declarations_done();
2622           __ set(normalized_result, p);
2623           __ if_then(p, BoolTest::ne, ideal.ConI(0));
2624           __ set(normalized_result, ideal.ConI(1));
2625           ideal.end_if();
2626           final_sync(ideal);
2627           p = __ value(normalized_result);
2628 #undef __
2629       }
2630     }
2631     if (type == T_ADDRESS) {
2632       p = gvn().transform(new CastP2XNode(nullptr, p));
2633       p = ConvX2UL(p);
2634     }
2635     // The load node has the control of the preceding MemBarCPUOrder.  All
2636     // following nodes will have the control of the MemBarCPUOrder inserted at
2637     // the end of this method.  So, pushing the load onto the stack at a later
2638     // point is fine.
2639     set_result(p);
2640   } else {
2641     if (bt == T_ADDRESS) {
2642       // Repackage the long as a pointer.
2643       val = ConvL2X(val);
2644       val = gvn().transform(new CastX2PNode(val));
2645     }
2646     if (is_flat) {
2647       if (adr_type->isa_instptr() && !mismatched) {
2648         ciInstanceKlass* holder = adr_type->is_instptr()->instance_klass();
2649         int offset = adr_type->is_instptr()->offset();
2650         val->as_InlineType()->store_flat(this, base, base, holder, offset, decorators);
2651       } else {
2652         val->as_InlineType()->store_flat(this, base, adr, nullptr, 0, decorators);
2653       }
2654     } else {
2655       access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators);
2656     }
2657   }
2658 
2659   if (argument(1)->is_InlineType() && is_store) {
2660     InlineTypeNode* value = InlineTypeNode::make_from_oop(this, base, _gvn.type(argument(1))->inline_klass());
2661     value = value->make_larval(this, false);
2662     replace_in_map(argument(1), value);
2663   }
2664 
2665   return true;
2666 }
2667 
2668 bool LibraryCallKit::inline_unsafe_make_private_buffer() {
2669   Node* receiver = argument(0);
2670   Node* value = argument(1);
2671   if (!value->is_InlineType()) {
2672     return false;
2673   }
2674 
2675   receiver = null_check(receiver);
2676   if (stopped()) {
2677     return true;
2678   }
2679 
2680   set_result(value->as_InlineType()->make_larval(this, true));
2681   return true;
2682 }
2683 
2684 bool LibraryCallKit::inline_unsafe_finish_private_buffer() {
2685   Node* receiver = argument(0);
2686   Node* buffer = argument(1);
2687   if (!buffer->is_InlineType()) {
2688     return false;
2689   }
2690   InlineTypeNode* vt = buffer->as_InlineType();
2691   if (!vt->is_allocated(&_gvn)) {
2692     return false;
2693   }
2694   // TODO 8239003 Why is this needed?
2695   if (AllocateNode::Ideal_allocation(vt->get_oop()) == nullptr) {
2696     return false;
2697   }
2698 
2699   receiver = null_check(receiver);
2700   if (stopped()) {
2701     return true;
2702   }
2703 
2704   set_result(vt->finish_larval(this));
2705   return true;
2706 }
2707 
2708 //----------------------------inline_unsafe_load_store----------------------------
2709 // This method serves a couple of different customers (depending on LoadStoreKind):
2710 //
2711 // LS_cmp_swap:
2712 //
2713 //   boolean compareAndSetReference(Object o, long offset, Object expected, Object x);
2714 //   boolean compareAndSetInt(   Object o, long offset, int    expected, int    x);
2715 //   boolean compareAndSetLong(  Object o, long offset, long   expected, long   x);
2716 //
2717 // LS_cmp_swap_weak:
2718 //
2719 //   boolean weakCompareAndSetReference(       Object o, long offset, Object expected, Object x);
2720 //   boolean weakCompareAndSetReferencePlain(  Object o, long offset, Object expected, Object x);
2721 //   boolean weakCompareAndSetReferenceAcquire(Object o, long offset, Object expected, Object x);
2722 //   boolean weakCompareAndSetReferenceRelease(Object o, long offset, Object expected, Object x);
2723 //
2724 //   boolean weakCompareAndSetInt(          Object o, long offset, int    expected, int    x);
2725 //   boolean weakCompareAndSetIntPlain(     Object o, long offset, int    expected, int    x);
2726 //   boolean weakCompareAndSetIntAcquire(   Object o, long offset, int    expected, int    x);
2727 //   boolean weakCompareAndSetIntRelease(   Object o, long offset, int    expected, int    x);

2893     }
2894     case LS_cmp_swap:
2895     case LS_cmp_swap_weak:
2896     case LS_get_add:
2897       break;
2898     default:
2899       ShouldNotReachHere();
2900   }
2901 
2902   // Null check receiver.
2903   receiver = null_check(receiver);
2904   if (stopped()) {
2905     return true;
2906   }
2907 
2908   int alias_idx = C->get_alias_index(adr_type);
2909 
2910   if (is_reference_type(type)) {
2911     decorators |= IN_HEAP | ON_UNKNOWN_OOP_REF;
2912 
2913     if (oldval != nullptr && oldval->is_InlineType()) {
2914       // Re-execute the unsafe access if allocation triggers deoptimization.
2915       PreserveReexecuteState preexecs(this);
2916       jvms()->set_should_reexecute(true);
2917       oldval = oldval->as_InlineType()->buffer(this)->get_oop();
2918     }
2919     if (newval != nullptr && newval->is_InlineType()) {
2920       // Re-execute the unsafe access if allocation triggers deoptimization.
2921       PreserveReexecuteState preexecs(this);
2922       jvms()->set_should_reexecute(true);
2923       newval = newval->as_InlineType()->buffer(this)->get_oop();
2924     }
2925 
2926     // Transformation of a value which could be null pointer (CastPP #null)
2927     // could be delayed during Parse (for example, in adjust_map_after_if()).
2928     // Execute transformation here to avoid barrier generation in such case.
2929     if (_gvn.type(newval) == TypePtr::NULL_PTR)
2930       newval = _gvn.makecon(TypePtr::NULL_PTR);
2931 
2932     if (oldval != nullptr && _gvn.type(oldval) == TypePtr::NULL_PTR) {
2933       // Refine the value to a null constant, when it is known to be null
2934       oldval = _gvn.makecon(TypePtr::NULL_PTR);
2935     }
2936   }
2937 
2938   Node* result = nullptr;
2939   switch (kind) {
2940     case LS_cmp_exchange: {
2941       result = access_atomic_cmpxchg_val_at(base, adr, adr_type, alias_idx,
2942                                             oldval, newval, value_type, type, decorators);
2943       break;
2944     }
2945     case LS_cmp_swap_weak:

3092                     Deoptimization::Action_make_not_entrant);
3093     }
3094     if (stopped()) {
3095       return true;
3096     }
3097 #endif //INCLUDE_JVMTI
3098 
3099   Node* test = nullptr;
3100   if (LibraryCallKit::klass_needs_init_guard(kls)) {
3101     // Note:  The argument might still be an illegal value like
3102     // Serializable.class or Object[].class.   The runtime will handle it.
3103     // But we must make an explicit check for initialization.
3104     Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset()));
3105     // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
3106     // can generate code to load it as unsigned byte.
3107     Node* inst = make_load(nullptr, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::acquire);
3108     Node* bits = intcon(InstanceKlass::fully_initialized);
3109     test = _gvn.transform(new SubINode(inst, bits));
3110     // The 'test' is non-zero if we need to take a slow path.
3111   }
3112   Node* obj = nullptr;
3113   const TypeInstKlassPtr* tkls = _gvn.type(kls)->isa_instklassptr();
3114   if (tkls != nullptr && tkls->instance_klass()->is_inlinetype()) {
3115     obj = InlineTypeNode::make_default(_gvn, tkls->instance_klass()->as_inline_klass())->buffer(this);
3116   } else {
3117     obj = new_instance(kls, test);
3118   }
3119   set_result(obj);
3120   return true;
3121 }
3122 
3123 //------------------------inline_native_time_funcs--------------
3124 // inline code for System.currentTimeMillis() and System.nanoTime()
3125 // these have the same type and signature
3126 bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
3127   const TypeFunc* tf = OptoRuntime::void_long_Type();
3128   const TypePtr* no_memory_effects = nullptr;
3129   Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
3130   Node* value = _gvn.transform(new ProjNode(time, TypeFunc::Parms+0));
3131 #ifdef ASSERT
3132   Node* value_top = _gvn.transform(new ProjNode(time, TypeFunc::Parms+1));
3133   assert(value_top == top(), "second value must be top");
3134 #endif
3135   set_result(value);
3136   return true;
3137 }
3138 

3880   Node* thread = _gvn.transform(new ThreadLocalNode());
3881   Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::vthread_offset()));
3882   Node* thread_obj_handle
3883     = make_load(nullptr, p, p->bottom_type()->is_ptr(), T_OBJECT, MemNode::unordered);
3884   thread_obj_handle = _gvn.transform(thread_obj_handle);
3885   const TypePtr *adr_type = _gvn.type(thread_obj_handle)->isa_ptr();
3886   access_store_at(nullptr, thread_obj_handle, adr_type, arr, _gvn.type(arr), T_OBJECT, IN_NATIVE | MO_UNORDERED);
3887 
3888   // Change the lock_id of the JavaThread
3889   Node* tid = load_field_from_object(arr, "tid", "J");
3890   Node* thread_id_offset = basic_plus_adr(thread, in_bytes(JavaThread::lock_id_offset()));
3891   Node* tid_memory = store_to_memory(control(), thread_id_offset, tid, T_LONG, MemNode::unordered, true);
3892 
3893   JFR_ONLY(extend_setCurrentThread(thread, arr);)
3894   return true;
3895 }
3896 
3897 const Type* LibraryCallKit::scopedValueCache_type() {
3898   ciKlass* objects_klass = ciObjArrayKlass::make(env()->Object_klass());
3899   const TypeOopPtr* etype = TypeOopPtr::make_from_klass(env()->Object_klass());
3900   const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS, /* stable= */ false, /* flat= */ false, /* not_flat= */ true, /* not_null_free= */ true);
3901 
3902   // Because we create the scopedValue cache lazily we have to make the
3903   // type of the result BotPTR.
3904   bool xk = etype->klass_is_exact();
3905   const Type* objects_type = TypeAryPtr::make(TypePtr::BotPTR, arr0, objects_klass, xk, TypeAryPtr::Offset(0));
3906   return objects_type;
3907 }
3908 
3909 Node* LibraryCallKit::scopedValueCache_helper() {
3910   Node* thread = _gvn.transform(new ThreadLocalNode());
3911   Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::scopedValueCache_offset()));
3912   // We cannot use immutable_memory() because we might flip onto a
3913   // different carrier thread, at which point we'll need to use that
3914   // carrier thread's cache.
3915   // return _gvn.transform(LoadNode::make(_gvn, nullptr, immutable_memory(), p, p->bottom_type()->is_ptr(),
3916   //       TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered));
3917   return make_load(nullptr, p, p->bottom_type()->is_ptr(), T_ADDRESS, MemNode::unordered);
3918 }
3919 
3920 //------------------------inline_native_scopedValueCache------------------
3921 bool LibraryCallKit::inline_native_scopedValueCache() {
3922   Node* cache_obj_handle = scopedValueCache_helper();
3923   const Type* objects_type = scopedValueCache_type();
3924   set_result(access_load(cache_obj_handle, objects_type, T_OBJECT, IN_NATIVE));
3925 

4008   }
4009 
4010   // Result of top level CFG and Memory.
4011   RegionNode* result_rgn = new RegionNode(PATH_LIMIT);
4012   record_for_igvn(result_rgn);
4013   PhiNode* result_mem = new PhiNode(result_rgn, Type::MEMORY, TypePtr::BOTTOM);
4014   record_for_igvn(result_mem);
4015 
4016   result_rgn->init_req(_true_path, _gvn.transform(valid_pin_count));
4017   result_rgn->init_req(_false_path, _gvn.transform(continuation_is_null));
4018   result_mem->init_req(_true_path, _gvn.transform(updated_pin_count_memory));
4019   result_mem->init_req(_false_path, _gvn.transform(input_memory_state));
4020 
4021   // Set output state.
4022   set_control(_gvn.transform(result_rgn));
4023   set_all_memory(_gvn.transform(result_mem));
4024 
4025   return true;
4026 }
4027 









4028 //-----------------------load_klass_from_mirror_common-------------------------
4029 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
4030 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
4031 // and branch to the given path on the region.
4032 // If never_see_null, take an uncommon trap on null, so we can optimistically
4033 // compile for the non-null case.
4034 // If the region is null, force never_see_null = true.
4035 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
4036                                                     bool never_see_null,
4037                                                     RegionNode* region,
4038                                                     int null_path,
4039                                                     int offset) {
4040   if (region == nullptr)  never_see_null = true;
4041   Node* p = basic_plus_adr(mirror, offset);
4042   const TypeKlassPtr*  kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
4043   Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
4044   Node* null_ctl = top();
4045   kls = null_check_oop(kls, &null_ctl, never_see_null);
4046   if (region != nullptr) {
4047     // Set region->in(null_path) if the mirror is a primitive (e.g, int.class).

4051   }
4052   return kls;
4053 }
4054 
4055 //--------------------(inline_native_Class_query helpers)---------------------
4056 // Use this for JVM_ACC_INTERFACE.
4057 // Fall through if (mods & mask) == bits, take the guard otherwise.
4058 Node* LibraryCallKit::generate_klass_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region,
4059                                                  ByteSize offset, const Type* type, BasicType bt) {
4060   // Branch around if the given klass has the given modifier bit set.
4061   // Like generate_guard, adds a new path onto the region.
4062   Node* modp = basic_plus_adr(kls, in_bytes(offset));
4063   Node* mods = make_load(nullptr, modp, type, bt, MemNode::unordered);
4064   Node* mask = intcon(modifier_mask);
4065   Node* bits = intcon(modifier_bits);
4066   Node* mbit = _gvn.transform(new AndINode(mods, mask));
4067   Node* cmp  = _gvn.transform(new CmpINode(mbit, bits));
4068   Node* bol  = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
4069   return generate_fair_guard(bol, region);
4070 }
4071 
4072 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
4073   return generate_klass_flags_guard(kls, JVM_ACC_INTERFACE, 0, region,
4074                                     Klass::access_flags_offset(), TypeInt::INT, T_INT);
4075 }
4076 
4077 // Use this for testing if Klass is_hidden, has_finalizer, and is_cloneable_fast.
4078 Node* LibraryCallKit::generate_misc_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
4079   return generate_klass_flags_guard(kls, modifier_mask, modifier_bits, region,
4080                                     Klass::misc_flags_offset(), TypeInt::UBYTE, T_BOOLEAN);
4081 }
4082 
4083 Node* LibraryCallKit::generate_hidden_class_guard(Node* kls, RegionNode* region) {
4084   return generate_misc_flags_guard(kls, KlassFlags::_misc_is_hidden_class, 0, region);
4085 }
4086 
4087 //-------------------------inline_native_Class_query-------------------
4088 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
4089   const Type* return_type = TypeInt::BOOL;
4090   Node* prim_return_value = top();  // what happens if it's a primitive class?
4091   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);

4253 
4254   case vmIntrinsics::_getClassAccessFlags:
4255     p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
4256     query_value = make_load(nullptr, p, TypeInt::INT, T_INT, MemNode::unordered);
4257     break;
4258 
4259   default:
4260     fatal_unexpected_iid(id);
4261     break;
4262   }
4263 
4264   // Fall-through is the normal case of a query to a real class.
4265   phi->init_req(1, query_value);
4266   region->init_req(1, control());
4267 
4268   C->set_has_split_ifs(true); // Has chance for split-if optimization
4269   set_result(region, phi);
4270   return true;
4271 }
4272 
4273 
4274 //-------------------------inline_Class_cast-------------------
4275 bool LibraryCallKit::inline_Class_cast() {
4276   Node* mirror = argument(0); // Class
4277   Node* obj    = argument(1);
4278   const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
4279   if (mirror_con == nullptr) {
4280     return false;  // dead path (mirror->is_top()).
4281   }
4282   if (obj == nullptr || obj->is_top()) {
4283     return false;  // dead path
4284   }
4285   const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr();
4286 
4287   // First, see if Class.cast() can be folded statically.
4288   // java_mirror_type() returns non-null for compile-time Class constants.
4289   bool is_null_free_array = false;
4290   ciType* tm = mirror_con->java_mirror_type(&is_null_free_array);
4291   if (tm != nullptr && tm->is_klass() &&
4292       tp != nullptr) {
4293     if (!tp->is_loaded()) {
4294       // Don't use intrinsic when class is not loaded.
4295       return false;
4296     } else {
4297       const TypeKlassPtr* tklass = TypeKlassPtr::make(tm->as_klass(), Type::trust_interfaces);
4298       if (is_null_free_array) {
4299         tklass = tklass->is_aryklassptr()->cast_to_null_free();
4300       }
4301       int static_res = C->static_subtype_check(tklass, tp->as_klass_type());
4302       if (static_res == Compile::SSC_always_true) {
4303         // isInstance() is true - fold the code.
4304         set_result(obj);
4305         return true;
4306       } else if (static_res == Compile::SSC_always_false) {
4307         // Don't use intrinsic, have to throw ClassCastException.
4308         // If the reference is null, the non-intrinsic bytecode will
4309         // be optimized appropriately.
4310         return false;
4311       }
4312     }
4313   }
4314 
4315   // Bailout intrinsic and do normal inlining if exception path is frequent.
4316   if (too_many_traps(Deoptimization::Reason_intrinsic)) {
4317     return false;
4318   }
4319 
4320   // Generate dynamic checks.
4321   // Class.cast() is java implementation of _checkcast bytecode.
4322   // Do checkcast (Parse::do_checkcast()) optimizations here.
4323 
4324   mirror = null_check(mirror);
4325   // If mirror is dead, only null-path is taken.
4326   if (stopped()) {
4327     return true;
4328   }
4329 
4330   // Not-subtype or the mirror's klass ptr is nullptr (in case it is a primitive).
4331   enum { _bad_type_path = 1, _prim_path = 2, _npe_path = 3, PATH_LIMIT };
4332   RegionNode* region = new RegionNode(PATH_LIMIT);
4333   record_for_igvn(region);
4334 
4335   // Now load the mirror's klass metaobject, and null-check it.
4336   // If kls is null, we have a primitive mirror and
4337   // nothing is an instance of a primitive type.
4338   Node* kls = load_klass_from_mirror(mirror, false, region, _prim_path);
4339 
4340   Node* res = top();
4341   Node* io = i_o();
4342   Node* mem = merged_memory();
4343   if (!stopped()) {
4344 
4345     Node* bad_type_ctrl = top();
4346     // Do checkcast optimizations.
4347     res = gen_checkcast(obj, kls, &bad_type_ctrl);
4348     region->init_req(_bad_type_path, bad_type_ctrl);
4349   }
4350   if (region->in(_prim_path) != top() ||
4351       region->in(_bad_type_path) != top() ||
4352       region->in(_npe_path) != top()) {
4353     // Let Interpreter throw ClassCastException.
4354     PreserveJVMState pjvms(this);
4355     set_control(_gvn.transform(region));
4356     // Set IO and memory because gen_checkcast may override them when buffering inline types
4357     set_i_o(io);
4358     set_all_memory(mem);
4359     uncommon_trap(Deoptimization::Reason_intrinsic,
4360                   Deoptimization::Action_maybe_recompile);
4361   }
4362   if (!stopped()) {
4363     set_result(res);
4364   }
4365   return true;
4366 }
4367 
4368 
4369 //--------------------------inline_native_subtype_check------------------------
4370 // This intrinsic takes the JNI calls out of the heart of
4371 // UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
4372 bool LibraryCallKit::inline_native_subtype_check() {
4373   // Pull both arguments off the stack.
4374   Node* args[2];                // two java.lang.Class mirrors: superc, subc
4375   args[0] = argument(0);
4376   args[1] = argument(1);
4377   Node* klasses[2];             // corresponding Klasses: superk, subk
4378   klasses[0] = klasses[1] = top();
4379 
4380   enum {
4381     // A full decision tree on {superc is prim, subc is prim}:
4382     _prim_0_path = 1,           // {P,N} => false
4383                                 // {P,P} & superc!=subc => false
4384     _prim_same_path,            // {P,P} & superc==subc => true
4385     _prim_1_path,               // {N,P} => false
4386     _ref_subtype_path,          // {N,N} & subtype check wins => true
4387     _both_ref_path,             // {N,N} & subtype check loses => false
4388     PATH_LIMIT
4389   };
4390 
4391   RegionNode* region = new RegionNode(PATH_LIMIT);
4392   RegionNode* prim_region = new RegionNode(2);
4393   Node*       phi    = new PhiNode(region, TypeInt::BOOL);
4394   record_for_igvn(region);
4395   record_for_igvn(prim_region);
4396 
4397   const TypePtr* adr_type = TypeRawPtr::BOTTOM;   // memory type of loads
4398   const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
4399   int class_klass_offset = java_lang_Class::klass_offset();
4400 
4401   // First null-check both mirrors and load each mirror's klass metaobject.
4402   int which_arg;
4403   for (which_arg = 0; which_arg <= 1; which_arg++) {
4404     Node* arg = args[which_arg];
4405     arg = null_check(arg);
4406     if (stopped())  break;
4407     args[which_arg] = arg;
4408 
4409     Node* p = basic_plus_adr(arg, class_klass_offset);
4410     Node* kls = LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, adr_type, kls_type);
4411     klasses[which_arg] = _gvn.transform(kls);
4412   }
4413 
4414   // Having loaded both klasses, test each for null.
4415   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
4416   for (which_arg = 0; which_arg <= 1; which_arg++) {
4417     Node* kls = klasses[which_arg];
4418     Node* null_ctl = top();
4419     kls = null_check_oop(kls, &null_ctl, never_see_null);
4420     if (which_arg == 0) {
4421       prim_region->init_req(1, null_ctl);
4422     } else {
4423       region->init_req(_prim_1_path, null_ctl);
4424     }
4425     if (stopped())  break;
4426     klasses[which_arg] = kls;
4427   }
4428 
4429   if (!stopped()) {
4430     // now we have two reference types, in klasses[0..1]
4431     Node* subk   = klasses[1];  // the argument to isAssignableFrom
4432     Node* superk = klasses[0];  // the receiver
4433     region->set_req(_both_ref_path, gen_subtype_check(subk, superk));

4434     region->set_req(_ref_subtype_path, control());
4435   }
4436 
4437   // If both operands are primitive (both klasses null), then
4438   // we must return true when they are identical primitives.
4439   // It is convenient to test this after the first null klass check.
4440   // This path is also used if superc is a value mirror.
4441   set_control(_gvn.transform(prim_region));
4442   if (!stopped()) {
4443     // Since superc is primitive, make a guard for the superc==subc case.
4444     Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1]));
4445     Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq));
4446     generate_fair_guard(bol_eq, region);
4447     if (region->req() == PATH_LIMIT+1) {
4448       // A guard was added.  If the added guard is taken, superc==subc.
4449       region->swap_edges(PATH_LIMIT, _prim_same_path);
4450       region->del_req(PATH_LIMIT);
4451     }
4452     region->set_req(_prim_0_path, control()); // Not equal after all.
4453   }
4454 
4455   // these are the only paths that produce 'true':
4456   phi->set_req(_prim_same_path,   intcon(1));
4457   phi->set_req(_ref_subtype_path, intcon(1));
4458 
4459   // pull together the cases:
4460   assert(region->req() == PATH_LIMIT, "sane region");
4461   for (uint i = 1; i < region->req(); i++) {
4462     Node* ctl = region->in(i);
4463     if (ctl == nullptr || ctl == top()) {
4464       region->set_req(i, top());
4465       phi   ->set_req(i, top());
4466     } else if (phi->in(i) == nullptr) {
4467       phi->set_req(i, intcon(0)); // all other paths produce 'false'
4468     }
4469   }
4470 
4471   set_control(_gvn.transform(region));
4472   set_result(_gvn.transform(phi));
4473   return true;
4474 }
4475 
4476 //---------------------generate_array_guard_common------------------------
4477 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region, ArrayKind kind) {

4478 
4479   if (stopped()) {
4480     return nullptr;
4481   }
4482 









4483   // Like generate_guard, adds a new path onto the region.
4484   jint  layout_con = 0;
4485   Node* layout_val = get_layout_helper(kls, layout_con);
4486   if (layout_val == nullptr) {
4487     bool query = 0;
4488     switch(kind) {
4489       case ObjectArray:    query = Klass::layout_helper_is_objArray(layout_con); break;
4490       case NonObjectArray: query = !Klass::layout_helper_is_objArray(layout_con); break;
4491       case TypeArray:      query = Klass::layout_helper_is_typeArray(layout_con); break;
4492       case AnyArray:       query = Klass::layout_helper_is_array(layout_con); break;
4493       case NonArray:       query = !Klass::layout_helper_is_array(layout_con); break;
4494       default:
4495         ShouldNotReachHere();
4496     }
4497     if (!query) {
4498       return nullptr;                       // never a branch
4499     } else {                             // always a branch
4500       Node* always_branch = control();
4501       if (region != nullptr)
4502         region->add_req(always_branch);
4503       set_control(top());
4504       return always_branch;
4505     }
4506   }
4507   unsigned int value = 0;
4508   BoolTest::mask btest = BoolTest::illegal;
4509   switch(kind) {
4510     case ObjectArray:
4511     case NonObjectArray: {
4512       value = Klass::_lh_array_tag_obj_value;
4513       layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
4514       btest = (kind == ObjectArray) ? BoolTest::eq : BoolTest::ne;
4515       break;
4516     }
4517     case TypeArray: {
4518       value = Klass::_lh_array_tag_type_value;
4519       layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
4520       btest = BoolTest::eq;
4521       break;
4522     }
4523     case AnyArray:    value = Klass::_lh_neutral_value; btest = BoolTest::lt; break;
4524     case NonArray:    value = Klass::_lh_neutral_value; btest = BoolTest::gt; break;
4525     default:
4526       ShouldNotReachHere();
4527   }
4528   // Now test the correct condition.
4529   jint nval = (jint)value;



4530   Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval)));



4531   Node* bol = _gvn.transform(new BoolNode(cmp, btest));
4532   return generate_fair_guard(bol, region);
4533 }
4534 
4535 //-----------------------inline_newNullRestrictedArray--------------------------
4536 // public static native Object[] newNullRestrictedArray(Class<?> componentType, int length);
4537 bool LibraryCallKit::inline_newNullRestrictedArray() {
4538   Node* componentType = argument(0);
4539   Node* length = argument(1);
4540 
4541   const TypeInstPtr* tp = _gvn.type(componentType)->isa_instptr();
4542   if (tp != nullptr) {
4543     ciInstanceKlass* ik = tp->instance_klass();
4544     if (ik == C->env()->Class_klass()) {
4545       ciType* t = tp->java_mirror_type();
4546       if (t != nullptr && t->is_inlinetype()) {
4547         ciArrayKlass* array_klass = ciArrayKlass::make(t, true);
4548         if (array_klass->is_loaded() && array_klass->element_klass()->as_inline_klass()->is_initialized()) {
4549           const TypeAryKlassPtr* array_klass_type = TypeKlassPtr::make(array_klass, Type::trust_interfaces)->is_aryklassptr();
4550           array_klass_type = array_klass_type->cast_to_null_free();
4551           Node* obj = new_array(makecon(array_klass_type), length, 0, nullptr, false);  // no arguments to push
4552           set_result(obj);
4553           assert(gvn().type(obj)->is_aryptr()->is_null_free(), "must be null-free");
4554           return true;
4555         }
4556       }
4557     }
4558   }
4559   return false;
4560 }
4561 
4562 //-----------------------inline_native_newArray--------------------------
4563 // private static native Object java.lang.reflect.Array.newArray(Class<?> componentType, int length);
4564 // private        native Object Unsafe.allocateUninitializedArray0(Class<?> cls, int size);
4565 bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) {
4566   Node* mirror;
4567   Node* count_val;
4568   if (uninitialized) {
4569     null_check_receiver();
4570     mirror    = argument(1);
4571     count_val = argument(2);
4572   } else {
4573     mirror    = argument(0);
4574     count_val = argument(1);
4575   }
4576 
4577   mirror = null_check(mirror);
4578   // If mirror or obj is dead, only null-path is taken.
4579   if (stopped())  return true;
4580 
4581   enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
4582   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4583   PhiNode*    result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);

4689   // the bytecode that invokes Arrays.copyOf if deoptimization happens.
4690   { PreserveReexecuteState preexecs(this);
4691     jvms()->set_should_reexecute(true);
4692 
4693     array_type_mirror = null_check(array_type_mirror);
4694     original          = null_check(original);
4695 
4696     // Check if a null path was taken unconditionally.
4697     if (stopped())  return true;
4698 
4699     Node* orig_length = load_array_length(original);
4700 
4701     Node* klass_node = load_klass_from_mirror(array_type_mirror, false, nullptr, 0);
4702     klass_node = null_check(klass_node);
4703 
4704     RegionNode* bailout = new RegionNode(1);
4705     record_for_igvn(bailout);
4706 
4707     // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
4708     // Bail out if that is so.
4709     // Inline type array may have object field that would require a
4710     // write barrier. Conservatively, go to slow path.
4711     // TODO 8251971: Optimize for the case when flat src/dst are later found
4712     // to not contain oops (i.e., move this check to the macro expansion phase).
4713     BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
4714     const TypeAryPtr* orig_t = _gvn.type(original)->isa_aryptr();
4715     const TypeKlassPtr* tklass = _gvn.type(klass_node)->is_klassptr();
4716     bool exclude_flat = UseFlatArray && bs->array_copy_requires_gc_barriers(true, T_OBJECT, false, false, BarrierSetC2::Parsing) &&
4717                         // Can src array be flat and contain oops?
4718                         (orig_t == nullptr || (!orig_t->is_not_flat() && (!orig_t->is_flat() || orig_t->elem()->inline_klass()->contains_oops()))) &&
4719                         // Can dest array be flat and contain oops?
4720                         tklass->can_be_inline_array() && (!tklass->is_flat() || tklass->is_aryklassptr()->elem()->is_instklassptr()->instance_klass()->as_inline_klass()->contains_oops());
4721     Node* not_objArray = exclude_flat ? generate_non_objArray_guard(klass_node, bailout) : generate_typeArray_guard(klass_node, bailout);
4722     if (not_objArray != nullptr) {
4723       // Improve the klass node's type from the new optimistic assumption:
4724       ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
4725       const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, Type::Offset(0));
4726       Node* cast = new CastPPNode(control(), klass_node, akls);
4727       klass_node = _gvn.transform(cast);
4728     }
4729 
4730     // Bail out if either start or end is negative.
4731     generate_negative_guard(start, bailout, &start);
4732     generate_negative_guard(end,   bailout, &end);
4733 
4734     Node* length = end;
4735     if (_gvn.type(start) != TypeInt::ZERO) {
4736       length = _gvn.transform(new SubINode(end, start));
4737     }
4738 
4739     // Bail out if length is negative (i.e., if start > end).
4740     // Without this the new_array would throw
4741     // NegativeArraySizeException but IllegalArgumentException is what
4742     // should be thrown
4743     generate_negative_guard(length, bailout, &length);
4744 
4745     // Handle inline type arrays
4746     bool can_validate = !too_many_traps(Deoptimization::Reason_class_check);
4747     if (!stopped()) {
4748       // TODO JDK-8329224
4749       if (!orig_t->is_null_free()) {
4750         // Not statically known to be null free, add a check
4751         generate_fair_guard(null_free_array_test(original), bailout);
4752       }
4753       orig_t = _gvn.type(original)->isa_aryptr();
4754       if (orig_t != nullptr && orig_t->is_flat()) {
4755         // Src is flat, check that dest is flat as well
4756         if (exclude_flat) {
4757           // Dest can't be flat, bail out
4758           bailout->add_req(control());
4759           set_control(top());
4760         } else {
4761           generate_fair_guard(flat_array_test(klass_node, /* flat = */ false), bailout);
4762         }
4763       } else if (UseFlatArray && (orig_t == nullptr || !orig_t->is_not_flat()) &&
4764                  // If dest is flat, src must be flat as well (guaranteed by src <: dest check if validated).
4765                  ((!tklass->is_flat() && tklass->can_be_inline_array()) || !can_validate)) {
4766         // Src might be flat and dest might not be flat. Go to the slow path if src is flat.
4767         // TODO 8251971: Optimize for the case when src/dest are later found to be both flat.
4768         generate_fair_guard(flat_array_test(load_object_klass(original)), bailout);
4769         if (orig_t != nullptr) {
4770           orig_t = orig_t->cast_to_not_flat();
4771           original = _gvn.transform(new CheckCastPPNode(control(), original, orig_t));
4772         }
4773       }
4774       if (!can_validate) {
4775         // No validation. The subtype check emitted at macro expansion time will not go to the slow
4776         // path but call checkcast_arraycopy which can not handle flat/null-free inline type arrays.
4777         // TODO 8251971: Optimize for the case when src/dest are later found to be both flat/null-free.
4778         generate_fair_guard(flat_array_test(klass_node), bailout);
4779         generate_fair_guard(null_free_array_test(original), bailout);
4780       }
4781     }
4782 
4783     // Bail out if start is larger than the original length
4784     Node* orig_tail = _gvn.transform(new SubINode(orig_length, start));
4785     generate_negative_guard(orig_tail, bailout, &orig_tail);
4786 
4787     if (bailout->req() > 1) {
4788       PreserveJVMState pjvms(this);
4789       set_control(_gvn.transform(bailout));
4790       uncommon_trap(Deoptimization::Reason_intrinsic,
4791                     Deoptimization::Action_maybe_recompile);
4792     }
4793 
4794     if (!stopped()) {
4795       // How many elements will we copy from the original?
4796       // The answer is MinI(orig_tail, length).
4797       Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
4798 
4799       // Generate a direct call to the right arraycopy function(s).
4800       // We know the copy is disjoint but we might not know if the
4801       // oop stores need checking.
4802       // Extreme case:  Arrays.copyOf((Integer[])x, 10, String[].class).

4808       // to the copyOf to be validated, including that the copy to the
4809       // new array won't trigger an ArrayStoreException. That subtype
4810       // check can be optimized if we know something on the type of
4811       // the input array from type speculation.
4812       if (_gvn.type(klass_node)->singleton()) {
4813         const TypeKlassPtr* subk = _gvn.type(load_object_klass(original))->is_klassptr();
4814         const TypeKlassPtr* superk = _gvn.type(klass_node)->is_klassptr();
4815 
4816         int test = C->static_subtype_check(superk, subk);
4817         if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
4818           const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();
4819           if (t_original->speculative_type() != nullptr) {
4820             original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true);
4821           }
4822         }
4823       }
4824 
4825       bool validated = false;
4826       // Reason_class_check rather than Reason_intrinsic because we
4827       // want to intrinsify even if this traps.
4828       if (can_validate) {
4829         Node* not_subtype_ctrl = gen_subtype_check(original, klass_node);
4830 
4831         if (not_subtype_ctrl != top()) {
4832           PreserveJVMState pjvms(this);
4833           set_control(not_subtype_ctrl);
4834           uncommon_trap(Deoptimization::Reason_class_check,
4835                         Deoptimization::Action_make_not_entrant);
4836           assert(stopped(), "Should be stopped");
4837         }
4838         validated = true;
4839       }
4840 
4841       if (!stopped()) {
4842         newcopy = new_array(klass_node, length, 0);  // no arguments to push
4843 
4844         ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true, true,
4845                                                 load_object_klass(original), klass_node);
4846         if (!is_copyOfRange) {
4847           ac->set_copyof(validated);
4848         } else {

4894 
4895 //-----------------------generate_method_call----------------------------
4896 // Use generate_method_call to make a slow-call to the real
4897 // method if the fast path fails.  An alternative would be to
4898 // use a stub like OptoRuntime::slow_arraycopy_Java.
4899 // This only works for expanding the current library call,
4900 // not another intrinsic.  (E.g., don't use this for making an
4901 // arraycopy call inside of the copyOf intrinsic.)
4902 CallJavaNode*
4903 LibraryCallKit::generate_method_call(vmIntrinsicID method_id, bool is_virtual, bool is_static, bool res_not_null) {
4904   // When compiling the intrinsic method itself, do not use this technique.
4905   guarantee(callee() != C->method(), "cannot make slow-call to self");
4906 
4907   ciMethod* method = callee();
4908   // ensure the JVMS we have will be correct for this call
4909   guarantee(method_id == method->intrinsic_id(), "must match");
4910 
4911   const TypeFunc* tf = TypeFunc::make(method);
4912   if (res_not_null) {
4913     assert(tf->return_type() == T_OBJECT, "");
4914     const TypeTuple* range = tf->range_cc();
4915     const Type** fields = TypeTuple::fields(range->cnt());
4916     fields[TypeFunc::Parms] = range->field_at(TypeFunc::Parms)->filter_speculative(TypePtr::NOTNULL);
4917     const TypeTuple* new_range = TypeTuple::make(range->cnt(), fields);
4918     tf = TypeFunc::make(tf->domain_cc(), new_range);
4919   }
4920   CallJavaNode* slow_call;
4921   if (is_static) {
4922     assert(!is_virtual, "");
4923     slow_call = new CallStaticJavaNode(C, tf,
4924                            SharedRuntime::get_resolve_static_call_stub(), method);
4925   } else if (is_virtual) {
4926     assert(!gvn().type(argument(0))->maybe_null(), "should not be null");
4927     int vtable_index = Method::invalid_vtable_index;
4928     if (UseInlineCaches) {
4929       // Suppress the vtable call
4930     } else {
4931       // hashCode and clone are not a miranda methods,
4932       // so the vtable index is fixed.
4933       // No need to use the linkResolver to get it.
4934        vtable_index = method->vtable_index();
4935        assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
4936               "bad index %d", vtable_index);
4937     }
4938     slow_call = new CallDynamicJavaNode(tf,

4955   set_edges_for_java_call(slow_call);
4956   return slow_call;
4957 }
4958 
4959 
4960 /**
4961  * Build special case code for calls to hashCode on an object. This call may
4962  * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
4963  * slightly different code.
4964  */
4965 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
4966   assert(is_static == callee()->is_static(), "correct intrinsic selection");
4967   assert(!(is_virtual && is_static), "either virtual, special, or static");
4968 
4969   enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
4970 
4971   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4972   PhiNode*    result_val = new PhiNode(result_reg, TypeInt::INT);
4973   PhiNode*    result_io  = new PhiNode(result_reg, Type::ABIO);
4974   PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4975   Node* obj = argument(0);
4976 
4977   // Don't intrinsify hashcode on inline types for now.
4978   // The "is locked" runtime check below also serves as inline type check and goes to the slow path.
4979   if (gvn().type(obj)->is_inlinetypeptr()) {
4980     return false;
4981   }
4982 
4983   if (!is_static) {
4984     // Check for hashing null object
4985     obj = null_check_receiver();
4986     if (stopped())  return true;        // unconditionally null
4987     result_reg->init_req(_null_path, top());
4988     result_val->init_req(_null_path, top());
4989   } else {
4990     // Do a null check, and return zero if null.
4991     // System.identityHashCode(null) == 0

4992     Node* null_ctl = top();
4993     obj = null_check_oop(obj, &null_ctl);
4994     result_reg->init_req(_null_path, null_ctl);
4995     result_val->init_req(_null_path, _gvn.intcon(0));
4996   }
4997 
4998   // Unconditionally null?  Then return right away.
4999   if (stopped()) {
5000     set_control( result_reg->in(_null_path));
5001     if (!stopped())
5002       set_result(result_val->in(_null_path));
5003     return true;
5004   }
5005 
5006   // We only go to the fast case code if we pass a number of guards.  The
5007   // paths which do not pass are accumulated in the slow_region.
5008   RegionNode* slow_region = new RegionNode(1);
5009   record_for_igvn(slow_region);
5010 
5011   // If this is a virtual call, we generate a funny guard.  We pull out
5012   // the vtable entry corresponding to hashCode() from the target object.
5013   // If the target method which we are calling happens to be the native
5014   // Object hashCode() method, we pass the guard.  We do not need this
5015   // guard for non-virtual calls -- the caller is known to be the native
5016   // Object hashCode().
5017   if (is_virtual) {
5018     // After null check, get the object's klass.
5019     Node* obj_klass = load_object_klass(obj);
5020     generate_virtual_guard(obj_klass, slow_region);
5021   }
5022 
5023   // Get the header out of the object, use LoadMarkNode when available
5024   Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
5025   // The control of the load must be null. Otherwise, the load can move before
5026   // the null check after castPP removal.
5027   Node* no_ctrl = nullptr;
5028   Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
5029 
5030   if (!UseObjectMonitorTable) {
5031     // Test the header to see if it is safe to read w.r.t. locking.
5032   // This also serves as guard against inline types
5033     Node *lock_mask      = _gvn.MakeConX(markWord::inline_type_mask_in_place);
5034     Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
5035     if (LockingMode == LM_LIGHTWEIGHT) {
5036       Node *monitor_val   = _gvn.MakeConX(markWord::monitor_value);
5037       Node *chk_monitor   = _gvn.transform(new CmpXNode(lmasked_header, monitor_val));
5038       Node *test_monitor  = _gvn.transform(new BoolNode(chk_monitor, BoolTest::eq));
5039 
5040       generate_slow_guard(test_monitor, slow_region);
5041     } else {
5042       Node *unlocked_val      = _gvn.MakeConX(markWord::unlocked_value);
5043       Node *chk_unlocked      = _gvn.transform(new CmpXNode(lmasked_header, unlocked_val));
5044       Node *test_not_unlocked = _gvn.transform(new BoolNode(chk_unlocked, BoolTest::ne));
5045 
5046       generate_slow_guard(test_not_unlocked, slow_region);
5047     }
5048   }
5049 
5050   // Get the hash value and check to see that it has been properly assigned.
5051   // We depend on hash_mask being at most 32 bits and avoid the use of
5052   // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
5053   // vm: see markWord.hpp.

5088     // this->control() comes from set_results_for_java_call
5089     result_reg->init_req(_slow_path, control());
5090     result_val->init_req(_slow_path, slow_result);
5091     result_io  ->set_req(_slow_path, i_o());
5092     result_mem ->set_req(_slow_path, reset_memory());
5093   }
5094 
5095   // Return the combined state.
5096   set_i_o(        _gvn.transform(result_io)  );
5097   set_all_memory( _gvn.transform(result_mem));
5098 
5099   set_result(result_reg, result_val);
5100   return true;
5101 }
5102 
5103 //---------------------------inline_native_getClass----------------------------
5104 // public final native Class<?> java.lang.Object.getClass();
5105 //
5106 // Build special case code for calls to getClass on an object.
5107 bool LibraryCallKit::inline_native_getClass() {
5108   Node* obj = argument(0);
5109   if (obj->is_InlineType()) {
5110     const Type* t = _gvn.type(obj);
5111     if (t->maybe_null()) {
5112       null_check(obj);
5113     }
5114     set_result(makecon(TypeInstPtr::make(t->inline_klass()->java_mirror())));
5115     return true;
5116   }
5117   obj = null_check_receiver();
5118   if (stopped())  return true;
5119   set_result(load_mirror_from_klass(load_object_klass(obj)));
5120   return true;
5121 }
5122 
5123 //-----------------inline_native_Reflection_getCallerClass---------------------
5124 // public static native Class<?> sun.reflect.Reflection.getCallerClass();
5125 //
5126 // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
5127 //
5128 // NOTE: This code must perform the same logic as JVM_GetCallerClass
5129 // in that it must skip particular security frames and checks for
5130 // caller sensitive methods.
5131 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
5132 #ifndef PRODUCT
5133   if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
5134     tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
5135   }
5136 #endif
5137 

5449     dst_type = _gvn.type(dst_addr)->is_ptr(); // narrow out memory
5450 
5451     flags |= RC_NARROW_MEM; // narrow in memory
5452   }
5453 
5454   // Call it.  Note that the length argument is not scaled.
5455   make_runtime_call(flags,
5456                     OptoRuntime::make_setmemory_Type(),
5457                     StubRoutines::unsafe_setmemory(),
5458                     "unsafe_setmemory",
5459                     dst_type,
5460                     dst_addr, size XTOP, byte);
5461 
5462   store_to_memory(control(), doing_unsafe_access_addr, intcon(0), doing_unsafe_access_bt, MemNode::unordered);
5463 
5464   return true;
5465 }
5466 
5467 #undef XTOP
5468 
5469 //----------------------inline_unsafe_isFlatArray------------------------
5470 // public native boolean Unsafe.isFlatArray(Class<?> arrayClass);
5471 // This intrinsic exploits assumptions made by the native implementation
5472 // (arrayClass is neither null nor primitive) to avoid unnecessary null checks.
5473 bool LibraryCallKit::inline_unsafe_isFlatArray() {
5474   Node* cls = argument(1);
5475   Node* p = basic_plus_adr(cls, java_lang_Class::klass_offset());
5476   Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p,
5477                                                  TypeRawPtr::BOTTOM, TypeInstKlassPtr::OBJECT));
5478   Node* result = flat_array_test(kls);
5479   set_result(result);
5480   return true;
5481 }
5482 
5483 //------------------------clone_coping-----------------------------------
5484 // Helper function for inline_native_clone.
5485 void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array) {
5486   assert(obj_size != nullptr, "");
5487   Node* raw_obj = alloc_obj->in(1);
5488   assert(alloc_obj->is_CheckCastPP() && raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
5489 
5490   AllocateNode* alloc = nullptr;
5491   if (ReduceBulkZeroing &&
5492       // If we are implementing an array clone without knowing its source type
5493       // (can happen when compiling the array-guarded branch of a reflective
5494       // Object.clone() invocation), initialize the array within the allocation.
5495       // This is needed because some GCs (e.g. ZGC) might fall back in this case
5496       // to a runtime clone call that assumes fully initialized source arrays.
5497       (!is_array || obj->get_ptr_type()->isa_aryptr() != nullptr)) {
5498     // We will be completely responsible for initializing this object -
5499     // mark Initialize node as complete.
5500     alloc = AllocateNode::Ideal_allocation(alloc_obj);
5501     // The object was just allocated - there should be no any stores!
5502     guarantee(alloc != nullptr && alloc->maybe_set_complete(&_gvn), "");

5533 //  not cloneable or finalizer => slow path to out-of-line Object.clone
5534 //
5535 // The general case has two steps, allocation and copying.
5536 // Allocation has two cases, and uses GraphKit::new_instance or new_array.
5537 //
5538 // Copying also has two cases, oop arrays and everything else.
5539 // Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
5540 // Everything else uses the tight inline loop supplied by CopyArrayNode.
5541 //
5542 // These steps fold up nicely if and when the cloned object's klass
5543 // can be sharply typed as an object array, a type array, or an instance.
5544 //
5545 bool LibraryCallKit::inline_native_clone(bool is_virtual) {
5546   PhiNode* result_val;
5547 
5548   // Set the reexecute bit for the interpreter to reexecute
5549   // the bytecode that invokes Object.clone if deoptimization happens.
5550   { PreserveReexecuteState preexecs(this);
5551     jvms()->set_should_reexecute(true);
5552 
5553     Node* obj = argument(0);
5554     obj = null_check_receiver();
5555     if (stopped())  return true;
5556 
5557     const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
5558     if (obj_type->is_inlinetypeptr()) {
5559       // If the object to clone is an inline type, we can simply return it (i.e. a nop) since inline types have
5560       // no identity.
5561       set_result(obj);
5562       return true;
5563     }
5564 
5565     // If we are going to clone an instance, we need its exact type to
5566     // know the number and types of fields to convert the clone to
5567     // loads/stores. Maybe a speculative type can help us.
5568     if (!obj_type->klass_is_exact() &&
5569         obj_type->speculative_type() != nullptr &&
5570         obj_type->speculative_type()->is_instance_klass() &&
5571         !obj_type->speculative_type()->is_inlinetype()) {
5572       ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass();
5573       if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem &&
5574           !spec_ik->has_injected_fields()) {
5575         if (!obj_type->isa_instptr() ||
5576             obj_type->is_instptr()->instance_klass()->has_subklass()) {
5577           obj = maybe_cast_profiled_obj(obj, obj_type->speculative_type(), false);
5578         }
5579       }
5580     }
5581 
5582     // Conservatively insert a memory barrier on all memory slices.
5583     // Do not let writes into the original float below the clone.
5584     insert_mem_bar(Op_MemBarCPUOrder);
5585 
5586     // paths into result_reg:
5587     enum {
5588       _slow_path = 1,     // out-of-line call to clone method (virtual or not)
5589       _objArray_path,     // plain array allocation, plus arrayof_oop_arraycopy
5590       _array_path,        // plain array allocation, plus arrayof_long_arraycopy
5591       _instance_path,     // plain instance allocation, plus arrayof_long_arraycopy
5592       PATH_LIMIT
5593     };
5594     RegionNode* result_reg = new RegionNode(PATH_LIMIT);
5595     result_val             = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
5596     PhiNode*    result_i_o = new PhiNode(result_reg, Type::ABIO);
5597     PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
5598     record_for_igvn(result_reg);
5599 
5600     Node* obj_klass = load_object_klass(obj);
5601     // We only go to the fast case code if we pass a number of guards.
5602     // The paths which do not pass are accumulated in the slow_region.
5603     RegionNode* slow_region = new RegionNode(1);
5604     record_for_igvn(slow_region);
5605 
5606     Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)nullptr);
5607     if (array_ctl != nullptr) {
5608       // It's an array.
5609       PreserveJVMState pjvms(this);
5610       set_control(array_ctl);



5611 
5612       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
5613       const TypeAryPtr* ary_ptr = obj_type->isa_aryptr();
5614       if (UseFlatArray && bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Expansion) &&
5615           obj_type->can_be_inline_array() &&
5616           (ary_ptr == nullptr || (!ary_ptr->is_not_flat() && (!ary_ptr->is_flat() || ary_ptr->elem()->inline_klass()->contains_oops())))) {
5617         // Flat inline type array may have object field that would require a
5618         // write barrier. Conservatively, go to slow path.
5619         generate_fair_guard(flat_array_test(obj_klass), slow_region);













5620       }







5621 
5622       if (!stopped()) {
5623         Node* obj_length = load_array_length(obj);
5624         Node* array_size = nullptr; // Size of the array without object alignment padding.
5625         Node* alloc_obj = new_array(obj_klass, obj_length, 0, &array_size, /*deoptimize_on_exception=*/true);
5626 
5627         BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
5628         if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Parsing)) {
5629           // If it is an oop array, it requires very special treatment,
5630           // because gc barriers are required when accessing the array.
5631           Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)nullptr);
5632           if (is_obja != nullptr) {
5633             PreserveJVMState pjvms2(this);
5634             set_control(is_obja);
5635             // Generate a direct call to the right arraycopy function(s).
5636             // Clones are always tightly coupled.
5637             ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, true, false);
5638             ac->set_clone_oop_array();
5639             Node* n = _gvn.transform(ac);
5640             assert(n == ac, "cannot disappear");
5641             ac->connect_outputs(this, /*deoptimize_on_exception=*/true);
5642 
5643             result_reg->init_req(_objArray_path, control());
5644             result_val->init_req(_objArray_path, alloc_obj);
5645             result_i_o ->set_req(_objArray_path, i_o());
5646             result_mem ->set_req(_objArray_path, reset_memory());
5647           }
5648         }
5649         // Otherwise, there are no barriers to worry about.
5650         // (We can dispense with card marks if we know the allocation
5651         //  comes out of eden (TLAB)...  In fact, ReduceInitialCardMarks
5652         //  causes the non-eden paths to take compensating steps to
5653         //  simulate a fresh allocation, so that no further
5654         //  card marks are required in compiled code to initialize
5655         //  the object.)
5656 
5657         if (!stopped()) {
5658           copy_to_clone(obj, alloc_obj, array_size, true);
5659 
5660           // Present the results of the copy.
5661           result_reg->init_req(_array_path, control());
5662           result_val->init_req(_array_path, alloc_obj);
5663           result_i_o ->set_req(_array_path, i_o());
5664           result_mem ->set_req(_array_path, reset_memory());
5665         }
5666       }
5667     }
5668 




5669     if (!stopped()) {
5670       // It's an instance (we did array above).  Make the slow-path tests.
5671       // If this is a virtual call, we generate a funny guard.  We grab
5672       // the vtable entry corresponding to clone() from the target object.
5673       // If the target method which we are calling happens to be the
5674       // Object clone() method, we pass the guard.  We do not need this
5675       // guard for non-virtual calls; the caller is known to be the native
5676       // Object clone().
5677       if (is_virtual) {
5678         generate_virtual_guard(obj_klass, slow_region);
5679       }
5680 
5681       // The object must be easily cloneable and must not have a finalizer.
5682       // Both of these conditions may be checked in a single test.
5683       // We could optimize the test further, but we don't care.
5684       generate_misc_flags_guard(obj_klass,
5685                                 // Test both conditions:
5686                                 KlassFlags::_misc_is_cloneable_fast | KlassFlags::_misc_has_finalizer,
5687                                 // Must be cloneable but not finalizer:
5688                                 KlassFlags::_misc_is_cloneable_fast,

5780         set_jvms(sfpt->jvms());
5781         _reexecute_sp = jvms()->sp();
5782 
5783         return saved_jvms;
5784       }
5785     }
5786   }
5787   return nullptr;
5788 }
5789 
5790 // Clone the JVMState of the array allocation and create a new safepoint with it. Re-push the array length to the stack
5791 // such that uncommon traps can be emitted to re-execute the array allocation in the interpreter.
5792 SafePointNode* LibraryCallKit::create_safepoint_with_state_before_array_allocation(const AllocateArrayNode* alloc) const {
5793   JVMState* old_jvms = alloc->jvms()->clone_shallow(C);
5794   uint size = alloc->req();
5795   SafePointNode* sfpt = new SafePointNode(size, old_jvms);
5796   old_jvms->set_map(sfpt);
5797   for (uint i = 0; i < size; i++) {
5798     sfpt->init_req(i, alloc->in(i));
5799   }
5800   int adjustment = 1;
5801   const TypeAryKlassPtr* ary_klass_ptr = alloc->in(AllocateNode::KlassNode)->bottom_type()->is_aryklassptr();
5802   if (ary_klass_ptr->is_null_free()) {
5803     // A null-free, tightly coupled array allocation can only come from LibraryCallKit::inline_newNullRestrictedArray
5804     // which requires both the component type and the array length on stack for re-execution. Re-create and push
5805     // the component type.
5806     ciArrayKlass* klass = ary_klass_ptr->exact_klass()->as_array_klass();
5807     ciInstance* instance = klass->component_mirror_instance();
5808     const TypeInstPtr* t_instance = TypeInstPtr::make(instance);
5809     sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp(), makecon(t_instance));
5810     adjustment++;
5811   }
5812   // re-push array length for deoptimization
5813   sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp() + adjustment - 1, alloc->in(AllocateNode::ALength));
5814   old_jvms->set_sp(old_jvms->sp() + adjustment);
5815   old_jvms->set_monoff(old_jvms->monoff() + adjustment);
5816   old_jvms->set_scloff(old_jvms->scloff() + adjustment);
5817   old_jvms->set_endoff(old_jvms->endoff() + adjustment);
5818   old_jvms->set_should_reexecute(true);
5819 
5820   sfpt->set_i_o(map()->i_o());
5821   sfpt->set_memory(map()->memory());
5822   sfpt->set_control(map()->control());
5823   return sfpt;
5824 }
5825 
5826 // In case of a deoptimization, we restart execution at the
5827 // allocation, allocating a new array. We would leave an uninitialized
5828 // array in the heap that GCs wouldn't expect. Move the allocation
5829 // after the traps so we don't allocate the array if we
5830 // deoptimize. This is possible because tightly_coupled_allocation()
5831 // guarantees there's no observer of the allocated array at this point
5832 // and the control flow is simple enough.
5833 void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms_before_guards,
5834                                                     int saved_reexecute_sp, uint new_idx) {
5835   if (saved_jvms_before_guards != nullptr && !stopped()) {
5836     replace_unrelated_uncommon_traps_with_alloc_state(alloc, saved_jvms_before_guards);
5837 
5838     assert(alloc != nullptr, "only with a tightly coupled allocation");
5839     // restore JVM state to the state at the arraycopy
5840     saved_jvms_before_guards->map()->set_control(map()->control());
5841     assert(saved_jvms_before_guards->map()->memory() == map()->memory(), "memory state changed?");
5842     assert(saved_jvms_before_guards->map()->i_o() == map()->i_o(), "IO state changed?");
5843     // If we've improved the types of some nodes (null check) while
5844     // emitting the guards, propagate them to the current state
5845     map()->replaced_nodes().apply(saved_jvms_before_guards->map(), new_idx);
5846     set_jvms(saved_jvms_before_guards);
5847     _reexecute_sp = saved_reexecute_sp;
5848 
5849     // Remove the allocation from above the guards
5850     CallProjections* callprojs = alloc->extract_projections(true);

5851     InitializeNode* init = alloc->initialization();
5852     Node* alloc_mem = alloc->in(TypeFunc::Memory);
5853     C->gvn_replace_by(callprojs->fallthrough_ioproj, alloc->in(TypeFunc::I_O));
5854     C->gvn_replace_by(init->proj_out(TypeFunc::Memory), alloc_mem);
5855 
5856     // The CastIINode created in GraphKit::new_array (in AllocateArrayNode::make_ideal_length) must stay below
5857     // the allocation (i.e. is only valid if the allocation succeeds):
5858     // 1) replace CastIINode with AllocateArrayNode's length here
5859     // 2) Create CastIINode again once allocation has moved (see below) at the end of this method
5860     //
5861     // Multiple identical CastIINodes might exist here. Each GraphKit::load_array_length() call will generate
5862     // new separate CastIINode (arraycopy guard checks or any array length use between array allocation and ararycopy)
5863     Node* init_control = init->proj_out(TypeFunc::Control);
5864     Node* alloc_length = alloc->Ideal_length();
5865 #ifdef ASSERT
5866     Node* prev_cast = nullptr;
5867 #endif
5868     for (uint i = 0; i < init_control->outcnt(); i++) {
5869       Node* init_out = init_control->raw_out(i);
5870       if (init_out->is_CastII() && init_out->in(TypeFunc::Control) == init_control && init_out->in(1) == alloc_length) {
5871 #ifdef ASSERT
5872         if (prev_cast == nullptr) {
5873           prev_cast = init_out;

5875           if (prev_cast->cmp(*init_out) == false) {
5876             prev_cast->dump();
5877             init_out->dump();
5878             assert(false, "not equal CastIINode");
5879           }
5880         }
5881 #endif
5882         C->gvn_replace_by(init_out, alloc_length);
5883       }
5884     }
5885     C->gvn_replace_by(init->proj_out(TypeFunc::Control), alloc->in(0));
5886 
5887     // move the allocation here (after the guards)
5888     _gvn.hash_delete(alloc);
5889     alloc->set_req(TypeFunc::Control, control());
5890     alloc->set_req(TypeFunc::I_O, i_o());
5891     Node *mem = reset_memory();
5892     set_all_memory(mem);
5893     alloc->set_req(TypeFunc::Memory, mem);
5894     set_control(init->proj_out_or_null(TypeFunc::Control));
5895     set_i_o(callprojs->fallthrough_ioproj);
5896 
5897     // Update memory as done in GraphKit::set_output_for_allocation()
5898     const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
5899     const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
5900     if (ary_type->isa_aryptr() && length_type != nullptr) {
5901       ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
5902     }
5903     const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot);
5904     int            elemidx  = C->get_alias_index(telemref);
5905     set_memory(init->proj_out_or_null(TypeFunc::Memory), Compile::AliasIdxRaw);
5906     set_memory(init->proj_out_or_null(TypeFunc::Memory), elemidx);
5907 
5908     Node* allocx = _gvn.transform(alloc);
5909     assert(allocx == alloc, "where has the allocation gone?");
5910     assert(dest->is_CheckCastPP(), "not an allocation result?");
5911 
5912     _gvn.hash_delete(dest);
5913     dest->set_req(0, control());
5914     Node* destx = _gvn.transform(dest);
5915     assert(destx == dest, "where has the allocation result gone?");

6213         top_src  = src_type->isa_aryptr();
6214         has_src = (top_src != nullptr && top_src->elem() != Type::BOTTOM);
6215         src_spec = true;
6216       }
6217       if (!has_dest) {
6218         dest = maybe_cast_profiled_obj(dest, dest_k, true);
6219         dest_type  = _gvn.type(dest);
6220         top_dest  = dest_type->isa_aryptr();
6221         has_dest = (top_dest != nullptr && top_dest->elem() != Type::BOTTOM);
6222         dest_spec = true;
6223       }
6224     }
6225   }
6226 
6227   if (has_src && has_dest && can_emit_guards) {
6228     BasicType src_elem = top_src->isa_aryptr()->elem()->array_element_basic_type();
6229     BasicType dest_elem = top_dest->isa_aryptr()->elem()->array_element_basic_type();
6230     if (is_reference_type(src_elem, true)) src_elem = T_OBJECT;
6231     if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT;
6232 
6233     if (src_elem == dest_elem && top_src->is_flat() == top_dest->is_flat() && src_elem == T_OBJECT) {
6234       // If both arrays are object arrays then having the exact types
6235       // for both will remove the need for a subtype check at runtime
6236       // before the call and may make it possible to pick a faster copy
6237       // routine (without a subtype check on every element)
6238       // Do we have the exact type of src?
6239       bool could_have_src = src_spec;
6240       // Do we have the exact type of dest?
6241       bool could_have_dest = dest_spec;
6242       ciKlass* src_k = nullptr;
6243       ciKlass* dest_k = nullptr;
6244       if (!src_spec) {
6245         src_k = src_type->speculative_type_not_null();
6246         if (src_k != nullptr && src_k->is_array_klass()) {
6247           could_have_src = true;
6248         }
6249       }
6250       if (!dest_spec) {
6251         dest_k = dest_type->speculative_type_not_null();
6252         if (dest_k != nullptr && dest_k->is_array_klass()) {
6253           could_have_dest = true;
6254         }
6255       }
6256       if (could_have_src && could_have_dest) {
6257         // If we can have both exact types, emit the missing guards
6258         if (could_have_src && !src_spec) {
6259           src = maybe_cast_profiled_obj(src, src_k, true);
6260           src_type = _gvn.type(src);
6261           top_src = src_type->isa_aryptr();
6262         }
6263         if (could_have_dest && !dest_spec) {
6264           dest = maybe_cast_profiled_obj(dest, dest_k, true);
6265           dest_type = _gvn.type(dest);
6266           top_dest = dest_type->isa_aryptr();
6267         }
6268       }
6269     }
6270   }
6271 
6272   ciMethod* trap_method = method();
6273   int trap_bci = bci();
6274   if (saved_jvms_before_guards != nullptr) {
6275     trap_method = alloc->jvms()->method();
6276     trap_bci = alloc->jvms()->bci();
6277   }
6278 
6279   bool negative_length_guard_generated = false;
6280 
6281   if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
6282       can_emit_guards && !src->is_top() && !dest->is_top()) {

6283     // validate arguments: enables transformation the ArrayCopyNode
6284     validated = true;
6285 
6286     RegionNode* slow_region = new RegionNode(1);
6287     record_for_igvn(slow_region);
6288 
6289     // (1) src and dest are arrays.
6290     generate_non_array_guard(load_object_klass(src), slow_region);
6291     generate_non_array_guard(load_object_klass(dest), slow_region);
6292 
6293     // (2) src and dest arrays must have elements of the same BasicType
6294     // done at macro expansion or at Ideal transformation time
6295 
6296     // (4) src_offset must not be negative.
6297     generate_negative_guard(src_offset, slow_region);
6298 
6299     // (5) dest_offset must not be negative.
6300     generate_negative_guard(dest_offset, slow_region);
6301 
6302     // (7) src_offset + length must not exceed length of src.

6305                          slow_region);
6306 
6307     // (8) dest_offset + length must not exceed length of dest.
6308     generate_limit_guard(dest_offset, length,
6309                          load_array_length(dest),
6310                          slow_region);
6311 
6312     // (6) length must not be negative.
6313     // This is also checked in generate_arraycopy() during macro expansion, but
6314     // we also have to check it here for the case where the ArrayCopyNode will
6315     // be eliminated by Escape Analysis.
6316     if (EliminateAllocations) {
6317       generate_negative_guard(length, slow_region);
6318       negative_length_guard_generated = true;
6319     }
6320 
6321     // (9) each element of an oop array must be assignable
6322     Node* dest_klass = load_object_klass(dest);
6323     if (src != dest) {
6324       Node* not_subtype_ctrl = gen_subtype_check(src, dest_klass);
6325       slow_region->add_req(not_subtype_ctrl);
6326     }
6327 
6328     const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr();
6329     const Type* toop = dest_klass_t->cast_to_exactness(false)->as_instance_type();
6330     src = _gvn.transform(new CheckCastPPNode(control(), src, toop));
6331     src_type = _gvn.type(src);
6332     top_src  = src_type->isa_aryptr();
6333 
6334     // Handle flat inline type arrays (null-free arrays are handled by the subtype check above)
6335     if (!stopped() && UseFlatArray) {
6336       // If dest is flat, src must be flat as well (guaranteed by src <: dest check). Handle flat src here.
6337       assert(top_dest == nullptr || !top_dest->is_flat() || top_src->is_flat(), "src array must be flat");
6338       if (top_src != nullptr && top_src->is_flat()) {
6339         // Src is flat, check that dest is flat as well
6340         if (top_dest != nullptr && !top_dest->is_flat()) {
6341           generate_fair_guard(flat_array_test(dest_klass, /* flat = */ false), slow_region);
6342           // Since dest is flat and src <: dest, dest must have the same type as src.
6343           top_dest = top_src->cast_to_exactness(false);
6344           assert(top_dest->is_flat(), "dest must be flat");
6345           dest = _gvn.transform(new CheckCastPPNode(control(), dest, top_dest));
6346         }
6347       } else if (top_src == nullptr || !top_src->is_not_flat()) {
6348         // Src might be flat and dest might not be flat. Go to the slow path if src is flat.
6349         // TODO 8251971: Optimize for the case when src/dest are later found to be both flat.
6350         assert(top_dest == nullptr || !top_dest->is_flat(), "dest array must not be flat");
6351         generate_fair_guard(flat_array_test(src), slow_region);
6352         if (top_src != nullptr) {
6353           top_src = top_src->cast_to_not_flat();
6354           src = _gvn.transform(new CheckCastPPNode(control(), src, top_src));
6355         }
6356       }
6357     }
6358 
6359     {
6360       PreserveJVMState pjvms(this);
6361       set_control(_gvn.transform(slow_region));
6362       uncommon_trap(Deoptimization::Reason_intrinsic,
6363                     Deoptimization::Action_make_not_entrant);
6364       assert(stopped(), "Should be stopped");
6365     }




6366     arraycopy_move_allocation_here(alloc, dest, saved_jvms_before_guards, saved_reexecute_sp, new_idx);
6367   }
6368 
6369   if (stopped()) {
6370     return true;
6371   }
6372 
6373   ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != nullptr, negative_length_guard_generated,
6374                                           // Create LoadRange and LoadKlass nodes for use during macro expansion here
6375                                           // so the compiler has a chance to eliminate them: during macro expansion,
6376                                           // we have to set their control (CastPP nodes are eliminated).
6377                                           load_object_klass(src), load_object_klass(dest),
6378                                           load_array_length(src), load_array_length(dest));
6379 
6380   ac->set_arraycopy(validated);
6381 
6382   Node* n = _gvn.transform(ac);
6383   if (n == ac) {
6384     ac->connect_outputs(this);
6385   } else {
< prev index next >