< prev index next >

src/hotspot/share/opto/library_call.cpp

Print this page

   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"

  27 #include "ci/ciUtilities.inline.hpp"
  28 #include "classfile/vmIntrinsics.hpp"
  29 #include "compiler/compileBroker.hpp"
  30 #include "compiler/compileLog.hpp"
  31 #include "gc/shared/barrierSet.hpp"
  32 #include "jfr/support/jfrIntrinsics.hpp"
  33 #include "memory/resourceArea.hpp"
  34 #include "oops/klass.inline.hpp"
  35 #include "oops/objArrayKlass.hpp"
  36 #include "opto/addnode.hpp"
  37 #include "opto/arraycopynode.hpp"
  38 #include "opto/c2compiler.hpp"
  39 #include "opto/castnode.hpp"
  40 #include "opto/cfgnode.hpp"
  41 #include "opto/convertnode.hpp"
  42 #include "opto/countbitsnode.hpp"
  43 #include "opto/idealKit.hpp"
  44 #include "opto/library_call.hpp"
  45 #include "opto/mathexactnode.hpp"
  46 #include "opto/mulnode.hpp"

 305   case vmIntrinsics::_indexOfIU:                return inline_string_indexOfI(StrIntrinsicNode::UU);
 306   case vmIntrinsics::_indexOfIUL:               return inline_string_indexOfI(StrIntrinsicNode::UL);
 307   case vmIntrinsics::_indexOfU_char:            return inline_string_indexOfChar(StrIntrinsicNode::U);
 308   case vmIntrinsics::_indexOfL_char:            return inline_string_indexOfChar(StrIntrinsicNode::L);
 309 
 310   case vmIntrinsics::_equalsL:                  return inline_string_equals(StrIntrinsicNode::LL);
 311   case vmIntrinsics::_equalsU:                  return inline_string_equals(StrIntrinsicNode::UU);
 312 
 313   case vmIntrinsics::_vectorizedHashCode:       return inline_vectorizedHashCode();
 314 
 315   case vmIntrinsics::_toBytesStringU:           return inline_string_toBytesU();
 316   case vmIntrinsics::_getCharsStringU:          return inline_string_getCharsU();
 317   case vmIntrinsics::_getCharStringU:           return inline_string_char_access(!is_store);
 318   case vmIntrinsics::_putCharStringU:           return inline_string_char_access( is_store);
 319 
 320   case vmIntrinsics::_compressStringC:
 321   case vmIntrinsics::_compressStringB:          return inline_string_copy( is_compress);
 322   case vmIntrinsics::_inflateStringC:
 323   case vmIntrinsics::_inflateStringB:           return inline_string_copy(!is_compress);
 324 


 325   case vmIntrinsics::_getReference:             return inline_unsafe_access(!is_store, T_OBJECT,   Relaxed, false);
 326   case vmIntrinsics::_getBoolean:               return inline_unsafe_access(!is_store, T_BOOLEAN,  Relaxed, false);
 327   case vmIntrinsics::_getByte:                  return inline_unsafe_access(!is_store, T_BYTE,     Relaxed, false);
 328   case vmIntrinsics::_getShort:                 return inline_unsafe_access(!is_store, T_SHORT,    Relaxed, false);
 329   case vmIntrinsics::_getChar:                  return inline_unsafe_access(!is_store, T_CHAR,     Relaxed, false);
 330   case vmIntrinsics::_getInt:                   return inline_unsafe_access(!is_store, T_INT,      Relaxed, false);
 331   case vmIntrinsics::_getLong:                  return inline_unsafe_access(!is_store, T_LONG,     Relaxed, false);
 332   case vmIntrinsics::_getFloat:                 return inline_unsafe_access(!is_store, T_FLOAT,    Relaxed, false);
 333   case vmIntrinsics::_getDouble:                return inline_unsafe_access(!is_store, T_DOUBLE,   Relaxed, false);

 334 
 335   case vmIntrinsics::_putReference:             return inline_unsafe_access( is_store, T_OBJECT,   Relaxed, false);
 336   case vmIntrinsics::_putBoolean:               return inline_unsafe_access( is_store, T_BOOLEAN,  Relaxed, false);
 337   case vmIntrinsics::_putByte:                  return inline_unsafe_access( is_store, T_BYTE,     Relaxed, false);
 338   case vmIntrinsics::_putShort:                 return inline_unsafe_access( is_store, T_SHORT,    Relaxed, false);
 339   case vmIntrinsics::_putChar:                  return inline_unsafe_access( is_store, T_CHAR,     Relaxed, false);
 340   case vmIntrinsics::_putInt:                   return inline_unsafe_access( is_store, T_INT,      Relaxed, false);
 341   case vmIntrinsics::_putLong:                  return inline_unsafe_access( is_store, T_LONG,     Relaxed, false);
 342   case vmIntrinsics::_putFloat:                 return inline_unsafe_access( is_store, T_FLOAT,    Relaxed, false);
 343   case vmIntrinsics::_putDouble:                return inline_unsafe_access( is_store, T_DOUBLE,   Relaxed, false);

 344 
 345   case vmIntrinsics::_getReferenceVolatile:     return inline_unsafe_access(!is_store, T_OBJECT,   Volatile, false);
 346   case vmIntrinsics::_getBooleanVolatile:       return inline_unsafe_access(!is_store, T_BOOLEAN,  Volatile, false);
 347   case vmIntrinsics::_getByteVolatile:          return inline_unsafe_access(!is_store, T_BYTE,     Volatile, false);
 348   case vmIntrinsics::_getShortVolatile:         return inline_unsafe_access(!is_store, T_SHORT,    Volatile, false);
 349   case vmIntrinsics::_getCharVolatile:          return inline_unsafe_access(!is_store, T_CHAR,     Volatile, false);
 350   case vmIntrinsics::_getIntVolatile:           return inline_unsafe_access(!is_store, T_INT,      Volatile, false);
 351   case vmIntrinsics::_getLongVolatile:          return inline_unsafe_access(!is_store, T_LONG,     Volatile, false);
 352   case vmIntrinsics::_getFloatVolatile:         return inline_unsafe_access(!is_store, T_FLOAT,    Volatile, false);
 353   case vmIntrinsics::_getDoubleVolatile:        return inline_unsafe_access(!is_store, T_DOUBLE,   Volatile, false);
 354 
 355   case vmIntrinsics::_putReferenceVolatile:     return inline_unsafe_access( is_store, T_OBJECT,   Volatile, false);
 356   case vmIntrinsics::_putBooleanVolatile:       return inline_unsafe_access( is_store, T_BOOLEAN,  Volatile, false);
 357   case vmIntrinsics::_putByteVolatile:          return inline_unsafe_access( is_store, T_BYTE,     Volatile, false);
 358   case vmIntrinsics::_putShortVolatile:         return inline_unsafe_access( is_store, T_SHORT,    Volatile, false);
 359   case vmIntrinsics::_putCharVolatile:          return inline_unsafe_access( is_store, T_CHAR,     Volatile, false);
 360   case vmIntrinsics::_putIntVolatile:           return inline_unsafe_access( is_store, T_INT,      Volatile, false);
 361   case vmIntrinsics::_putLongVolatile:          return inline_unsafe_access( is_store, T_LONG,     Volatile, false);
 362   case vmIntrinsics::_putFloatVolatile:         return inline_unsafe_access( is_store, T_FLOAT,    Volatile, false);
 363   case vmIntrinsics::_putDoubleVolatile:        return inline_unsafe_access( is_store, T_DOUBLE,   Volatile, false);

 487                                                                                          "notifyJvmtiEnd", false, true);
 488   case vmIntrinsics::_notifyJvmtiVThreadMount:   return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_mount()),
 489                                                                                          "notifyJvmtiMount", false, false);
 490   case vmIntrinsics::_notifyJvmtiVThreadUnmount: return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_unmount()),
 491                                                                                          "notifyJvmtiUnmount", false, false);
 492   case vmIntrinsics::_notifyJvmtiVThreadHideFrames: return inline_native_notify_jvmti_hide();
 493 #endif
 494 
 495 #ifdef JFR_HAVE_INTRINSICS
 496   case vmIntrinsics::_counterTime:              return inline_native_time_funcs(CAST_FROM_FN_PTR(address, JfrTime::time_function()), "counterTime");
 497   case vmIntrinsics::_getEventWriter:           return inline_native_getEventWriter();
 498   case vmIntrinsics::_jvm_commit:               return inline_native_jvm_commit();
 499 #endif
 500   case vmIntrinsics::_currentTimeMillis:        return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
 501   case vmIntrinsics::_nanoTime:                 return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
 502   case vmIntrinsics::_writeback0:               return inline_unsafe_writeback0();
 503   case vmIntrinsics::_writebackPreSync0:        return inline_unsafe_writebackSync0(true);
 504   case vmIntrinsics::_writebackPostSync0:       return inline_unsafe_writebackSync0(false);
 505   case vmIntrinsics::_allocateInstance:         return inline_unsafe_allocate();
 506   case vmIntrinsics::_copyMemory:               return inline_unsafe_copyMemory();

 507   case vmIntrinsics::_getLength:                return inline_native_getLength();
 508   case vmIntrinsics::_copyOf:                   return inline_array_copyOf(false);
 509   case vmIntrinsics::_copyOfRange:              return inline_array_copyOf(true);
 510   case vmIntrinsics::_equalsB:                  return inline_array_equals(StrIntrinsicNode::LL);
 511   case vmIntrinsics::_equalsC:                  return inline_array_equals(StrIntrinsicNode::UU);
 512   case vmIntrinsics::_Preconditions_checkIndex: return inline_preconditions_checkIndex(T_INT);
 513   case vmIntrinsics::_Preconditions_checkLongIndex: return inline_preconditions_checkIndex(T_LONG);
 514   case vmIntrinsics::_clone:                    return inline_native_clone(intrinsic()->is_virtual());
 515 
 516   case vmIntrinsics::_allocateUninitializedArray: return inline_unsafe_newArray(true);
 517   case vmIntrinsics::_newArray:                   return inline_unsafe_newArray(false);
 518 
 519   case vmIntrinsics::_isAssignableFrom:         return inline_native_subtype_check();
 520 
 521   case vmIntrinsics::_isInstance:
 522   case vmIntrinsics::_getModifiers:
 523   case vmIntrinsics::_isInterface:
 524   case vmIntrinsics::_isArray:
 525   case vmIntrinsics::_isPrimitive:
 526   case vmIntrinsics::_isHidden:
 527   case vmIntrinsics::_getSuperclass:
 528   case vmIntrinsics::_getClassAccessFlags:      return inline_native_Class_query(intrinsic_id());
 529 





 530   case vmIntrinsics::_floatToRawIntBits:
 531   case vmIntrinsics::_floatToIntBits:
 532   case vmIntrinsics::_intBitsToFloat:
 533   case vmIntrinsics::_doubleToRawLongBits:
 534   case vmIntrinsics::_doubleToLongBits:
 535   case vmIntrinsics::_longBitsToDouble:
 536   case vmIntrinsics::_floatToFloat16:
 537   case vmIntrinsics::_float16ToFloat:           return inline_fp_conversions(intrinsic_id());
 538 
 539   case vmIntrinsics::_floatIsFinite:
 540   case vmIntrinsics::_floatIsInfinite:
 541   case vmIntrinsics::_doubleIsFinite:
 542   case vmIntrinsics::_doubleIsInfinite:         return inline_fp_range_check(intrinsic_id());
 543 
 544   case vmIntrinsics::_numberOfLeadingZeros_i:
 545   case vmIntrinsics::_numberOfLeadingZeros_l:
 546   case vmIntrinsics::_numberOfTrailingZeros_i:
 547   case vmIntrinsics::_numberOfTrailingZeros_l:
 548   case vmIntrinsics::_bitCount_i:
 549   case vmIntrinsics::_bitCount_l:

2184     case vmIntrinsics::_remainderUnsigned_l: {
2185       zero_check_long(argument(2));
2186       // Compile-time detect of null-exception
2187       if (stopped()) {
2188         return true; // keep the graph constructed so far
2189       }
2190       n = new UModLNode(control(), argument(0), argument(2));
2191       break;
2192     }
2193     default:  fatal_unexpected_iid(id);  break;
2194   }
2195   set_result(_gvn.transform(n));
2196   return true;
2197 }
2198 
2199 //----------------------------inline_unsafe_access----------------------------
2200 
2201 const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type) {
2202   // Attempt to infer a sharper value type from the offset and base type.
2203   ciKlass* sharpened_klass = nullptr;

2204 
2205   // See if it is an instance field, with an object type.
2206   if (alias_type->field() != nullptr) {
2207     if (alias_type->field()->type()->is_klass()) {
2208       sharpened_klass = alias_type->field()->type()->as_klass();

2209     }
2210   }
2211 
2212   const TypeOopPtr* result = nullptr;
2213   // See if it is a narrow oop array.
2214   if (adr_type->isa_aryptr()) {
2215     if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
2216       const TypeOopPtr* elem_type = adr_type->is_aryptr()->elem()->make_oopptr();

2217       if (elem_type != nullptr && elem_type->is_loaded()) {
2218         // Sharpen the value type.
2219         result = elem_type;
2220       }
2221     }
2222   }
2223 
2224   // The sharpened class might be unloaded if there is no class loader
2225   // contraint in place.
2226   if (result == nullptr && sharpened_klass != nullptr && sharpened_klass->is_loaded()) {
2227     // Sharpen the value type.
2228     result = TypeOopPtr::make_from_klass(sharpened_klass);



2229   }
2230   if (result != nullptr) {
2231 #ifndef PRODUCT
2232     if (C->print_intrinsics() || C->print_inlining()) {
2233       tty->print("  from base type:  ");  adr_type->dump(); tty->cr();
2234       tty->print("  sharpened value: ");  result->dump();    tty->cr();
2235     }
2236 #endif
2237   }
2238   return result;
2239 }
2240 
2241 DecoratorSet LibraryCallKit::mo_decorator_for_access_kind(AccessKind kind) {
2242   switch (kind) {
2243       case Relaxed:
2244         return MO_UNORDERED;
2245       case Opaque:
2246         return MO_RELAXED;
2247       case Acquire:
2248         return MO_ACQUIRE;
2249       case Release:
2250         return MO_RELEASE;
2251       case Volatile:
2252         return MO_SEQ_CST;
2253       default:
2254         ShouldNotReachHere();
2255         return 0;
2256   }
2257 }
2258 
2259 bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, const AccessKind kind, const bool unaligned) {
2260   if (callee()->is_static())  return false;  // caller must have the capability!
2261   DecoratorSet decorators = C2_UNSAFE_ACCESS;
2262   guarantee(!is_store || kind != Acquire, "Acquire accesses can be produced only for loads");
2263   guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
2264   assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
2265 
2266   if (is_reference_type(type)) {
2267     decorators |= ON_UNKNOWN_OOP_REF;
2268   }
2269 
2270   if (unaligned) {
2271     decorators |= C2_UNALIGNED;
2272   }
2273 
2274 #ifndef PRODUCT
2275   {
2276     ResourceMark rm;
2277     // Check the signatures.
2278     ciSignature* sig = callee()->signature();
2279 #ifdef ASSERT
2280     if (!is_store) {
2281       // Object getReference(Object base, int/long offset), etc.
2282       BasicType rtype = sig->return_type()->basic_type();
2283       assert(rtype == type, "getter must return the expected value");
2284       assert(sig->count() == 2, "oop getter has 2 arguments");
2285       assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2286       assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2287     } else {
2288       // void putReference(Object base, int/long offset, Object x), etc.
2289       assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
2290       assert(sig->count() == 3, "oop putter has 3 arguments");
2291       assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
2292       assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
2293       BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
2294       assert(vtype == type, "putter must accept the expected value");
2295     }
2296 #endif // ASSERT
2297  }
2298 #endif //PRODUCT
2299 
2300   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
2301 
2302   Node* receiver = argument(0);  // type: oop
2303 
2304   // Build address expression.
2305   Node* heap_base_oop = top();
2306 
2307   // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2308   Node* base = argument(1);  // type: oop
2309   // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2310   Node* offset = argument(2);  // type: long
2311   // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2312   // to be plain byte offsets, which are also the same as those accepted
2313   // by oopDesc::field_addr.
2314   assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2315          "fieldOffset must be byte-scaled");























































2316   // 32-bit machines ignore the high half!
2317   offset = ConvL2X(offset);
2318 
2319   // Save state and restore on bailout
2320   uint old_sp = sp();
2321   SafePointNode* old_map = clone_map();
2322 
2323   Node* adr = make_unsafe_address(base, offset, type, kind == Relaxed);
2324 
2325   if (_gvn.type(base)->isa_ptr() == TypePtr::NULL_PTR) {
2326     if (type != T_OBJECT) {
2327       decorators |= IN_NATIVE; // off-heap primitive access
2328     } else {
2329       set_map(old_map);
2330       set_sp(old_sp);
2331       return false; // off-heap oop accesses are not supported
2332     }
2333   } else {
2334     heap_base_oop = base; // on-heap or mixed access
2335   }
2336 
2337   // Can base be null? Otherwise, always on-heap access.
2338   bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(base));
2339 
2340   if (!can_access_non_heap) {
2341     decorators |= IN_HEAP;
2342   }
2343 
2344   Node* val = is_store ? argument(4) : nullptr;
2345 
2346   const TypePtr* adr_type = _gvn.type(adr)->isa_ptr();
2347   if (adr_type == TypePtr::NULL_PTR) {
2348     set_map(old_map);
2349     set_sp(old_sp);
2350     return false; // off-heap access with zero address
2351   }
2352 
2353   // Try to categorize the address.
2354   Compile::AliasType* alias_type = C->alias_type(adr_type);
2355   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2356 
2357   if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2358       alias_type->adr_type() == TypeAryPtr::RANGE) {
2359     set_map(old_map);
2360     set_sp(old_sp);
2361     return false; // not supported
2362   }
2363 
2364   bool mismatched = false;
2365   BasicType bt = alias_type->basic_type();





















2366   if (bt != T_ILLEGAL) {
2367     assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
2368     if (bt == T_BYTE && adr_type->isa_aryptr()) {
2369       // Alias type doesn't differentiate between byte[] and boolean[]).
2370       // Use address type to get the element type.
2371       bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2372     }
2373     if (is_reference_type(bt, true)) {
2374       // accessing an array field with getReference is not a mismatch
2375       bt = T_OBJECT;
2376     }
2377     if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2378       // Don't intrinsify mismatched object accesses
2379       set_map(old_map);
2380       set_sp(old_sp);
2381       return false;
2382     }
2383     mismatched = (bt != type);
2384   } else if (alias_type->adr_type()->isa_oopptr()) {
2385     mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
2386   }
2387 























2388   destruct_map_clone(old_map);
2389   assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2390 
2391   if (mismatched) {
2392     decorators |= C2_MISMATCHED;
2393   }
2394 
2395   // First guess at the value type.
2396   const Type *value_type = Type::get_const_basic_type(type);
2397 
2398   // Figure out the memory ordering.
2399   decorators |= mo_decorator_for_access_kind(kind);
2400 
2401   if (!is_store && type == T_OBJECT) {
2402     const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2403     if (tjp != nullptr) {
2404       value_type = tjp;


2405     }
2406   }
2407 
2408   receiver = null_check(receiver);
2409   if (stopped()) {
2410     return true;
2411   }
2412   // Heap pointers get a null-check from the interpreter,
2413   // as a courtesy.  However, this is not guaranteed by Unsafe,
2414   // and it is not possible to fully distinguish unintended nulls
2415   // from intended ones in this API.
2416 
2417   if (!is_store) {
2418     Node* p = nullptr;
2419     // Try to constant fold a load from a constant field
2420     ciField* field = alias_type->field();
2421     if (heap_base_oop != top() && field != nullptr && field->is_constant() && !mismatched) {
2422       // final or stable field
2423       p = make_constant_from_field(field, heap_base_oop);
2424     }
2425 
2426     if (p == nullptr) { // Could not constant fold the load
2427       p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);















2428       // Normalize the value returned by getBoolean in the following cases
2429       if (type == T_BOOLEAN &&
2430           (mismatched ||
2431            heap_base_oop == top() ||                  // - heap_base_oop is null or
2432            (can_access_non_heap && field == nullptr)) // - heap_base_oop is potentially null
2433                                                       //   and the unsafe access is made to large offset
2434                                                       //   (i.e., larger than the maximum offset necessary for any
2435                                                       //   field access)
2436             ) {
2437           IdealKit ideal = IdealKit(this);
2438 #define __ ideal.
2439           IdealVariable normalized_result(ideal);
2440           __ declarations_done();
2441           __ set(normalized_result, p);
2442           __ if_then(p, BoolTest::ne, ideal.ConI(0));
2443           __ set(normalized_result, ideal.ConI(1));
2444           ideal.end_if();
2445           final_sync(ideal);
2446           p = __ value(normalized_result);
2447 #undef __
2448       }
2449     }
2450     if (type == T_ADDRESS) {
2451       p = gvn().transform(new CastP2XNode(nullptr, p));
2452       p = ConvX2UL(p);
2453     }
2454     // The load node has the control of the preceding MemBarCPUOrder.  All
2455     // following nodes will have the control of the MemBarCPUOrder inserted at
2456     // the end of this method.  So, pushing the load onto the stack at a later
2457     // point is fine.
2458     set_result(p);
2459   } else {
2460     if (bt == T_ADDRESS) {
2461       // Repackage the long as a pointer.
2462       val = ConvL2X(val);
2463       val = gvn().transform(new CastX2PNode(val));
2464     }
2465     access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators);































2466   }
2467 

























2468   return true;
2469 }
2470 
2471 //----------------------------inline_unsafe_load_store----------------------------
2472 // This method serves a couple of different customers (depending on LoadStoreKind):
2473 //
2474 // LS_cmp_swap:
2475 //
2476 //   boolean compareAndSetReference(Object o, long offset, Object expected, Object x);
2477 //   boolean compareAndSetInt(   Object o, long offset, int    expected, int    x);
2478 //   boolean compareAndSetLong(  Object o, long offset, long   expected, long   x);
2479 //
2480 // LS_cmp_swap_weak:
2481 //
2482 //   boolean weakCompareAndSetReference(       Object o, long offset, Object expected, Object x);
2483 //   boolean weakCompareAndSetReferencePlain(  Object o, long offset, Object expected, Object x);
2484 //   boolean weakCompareAndSetReferenceAcquire(Object o, long offset, Object expected, Object x);
2485 //   boolean weakCompareAndSetReferenceRelease(Object o, long offset, Object expected, Object x);
2486 //
2487 //   boolean weakCompareAndSetInt(          Object o, long offset, int    expected, int    x);

2656     }
2657     case LS_cmp_swap:
2658     case LS_cmp_swap_weak:
2659     case LS_get_add:
2660       break;
2661     default:
2662       ShouldNotReachHere();
2663   }
2664 
2665   // Null check receiver.
2666   receiver = null_check(receiver);
2667   if (stopped()) {
2668     return true;
2669   }
2670 
2671   int alias_idx = C->get_alias_index(adr_type);
2672 
2673   if (is_reference_type(type)) {
2674     decorators |= IN_HEAP | ON_UNKNOWN_OOP_REF;
2675 













2676     // Transformation of a value which could be null pointer (CastPP #null)
2677     // could be delayed during Parse (for example, in adjust_map_after_if()).
2678     // Execute transformation here to avoid barrier generation in such case.
2679     if (_gvn.type(newval) == TypePtr::NULL_PTR)
2680       newval = _gvn.makecon(TypePtr::NULL_PTR);
2681 
2682     if (oldval != nullptr && _gvn.type(oldval) == TypePtr::NULL_PTR) {
2683       // Refine the value to a null constant, when it is known to be null
2684       oldval = _gvn.makecon(TypePtr::NULL_PTR);
2685     }
2686   }
2687 
2688   Node* result = nullptr;
2689   switch (kind) {
2690     case LS_cmp_exchange: {
2691       result = access_atomic_cmpxchg_val_at(base, adr, adr_type, alias_idx,
2692                                             oldval, newval, value_type, type, decorators);
2693       break;
2694     }
2695     case LS_cmp_swap_weak:

2842                     Deoptimization::Action_make_not_entrant);
2843     }
2844     if (stopped()) {
2845       return true;
2846     }
2847 #endif //INCLUDE_JVMTI
2848 
2849   Node* test = nullptr;
2850   if (LibraryCallKit::klass_needs_init_guard(kls)) {
2851     // Note:  The argument might still be an illegal value like
2852     // Serializable.class or Object[].class.   The runtime will handle it.
2853     // But we must make an explicit check for initialization.
2854     Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset()));
2855     // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
2856     // can generate code to load it as unsigned byte.
2857     Node* inst = make_load(nullptr, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered);
2858     Node* bits = intcon(InstanceKlass::fully_initialized);
2859     test = _gvn.transform(new SubINode(inst, bits));
2860     // The 'test' is non-zero if we need to take a slow path.
2861   }
2862 
2863   Node* obj = new_instance(kls, test);





2864   set_result(obj);
2865   return true;
2866 }
2867 
2868 //------------------------inline_native_time_funcs--------------
2869 // inline code for System.currentTimeMillis() and System.nanoTime()
2870 // these have the same type and signature
2871 bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
2872   const TypeFunc* tf = OptoRuntime::void_long_Type();
2873   const TypePtr* no_memory_effects = nullptr;
2874   Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
2875   Node* value = _gvn.transform(new ProjNode(time, TypeFunc::Parms+0));
2876 #ifdef ASSERT
2877   Node* value_top = _gvn.transform(new ProjNode(time, TypeFunc::Parms+1));
2878   assert(value_top == top(), "second value must be top");
2879 #endif
2880   set_result(value);
2881   return true;
2882 }
2883 

3598 
3599 //------------------------inline_native_setVthread------------------
3600 bool LibraryCallKit::inline_native_setCurrentThread() {
3601   assert(C->method()->changes_current_thread(),
3602          "method changes current Thread but is not annotated ChangesCurrentThread");
3603   Node* arr = argument(1);
3604   Node* thread = _gvn.transform(new ThreadLocalNode());
3605   Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::vthread_offset()));
3606   Node* thread_obj_handle
3607     = make_load(nullptr, p, p->bottom_type()->is_ptr(), T_OBJECT, MemNode::unordered);
3608   thread_obj_handle = _gvn.transform(thread_obj_handle);
3609   const TypePtr *adr_type = _gvn.type(thread_obj_handle)->isa_ptr();
3610   access_store_at(nullptr, thread_obj_handle, adr_type, arr, _gvn.type(arr), T_OBJECT, IN_NATIVE | MO_UNORDERED);
3611   JFR_ONLY(extend_setCurrentThread(thread, arr);)
3612   return true;
3613 }
3614 
3615 const Type* LibraryCallKit::scopedValueCache_type() {
3616   ciKlass* objects_klass = ciObjArrayKlass::make(env()->Object_klass());
3617   const TypeOopPtr* etype = TypeOopPtr::make_from_klass(env()->Object_klass());
3618   const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS);
3619 
3620   // Because we create the scopedValue cache lazily we have to make the
3621   // type of the result BotPTR.
3622   bool xk = etype->klass_is_exact();
3623   const Type* objects_type = TypeAryPtr::make(TypePtr::BotPTR, arr0, objects_klass, xk, 0);
3624   return objects_type;
3625 }
3626 
3627 Node* LibraryCallKit::scopedValueCache_helper() {
3628   Node* thread = _gvn.transform(new ThreadLocalNode());
3629   Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::scopedValueCache_offset()));
3630   // We cannot use immutable_memory() because we might flip onto a
3631   // different carrier thread, at which point we'll need to use that
3632   // carrier thread's cache.
3633   // return _gvn.transform(LoadNode::make(_gvn, nullptr, immutable_memory(), p, p->bottom_type()->is_ptr(),
3634   //       TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered));
3635   return make_load(nullptr, p, p->bottom_type()->is_ptr(), T_ADDRESS, MemNode::unordered);
3636 }
3637 
3638 //------------------------inline_native_scopedValueCache------------------
3639 bool LibraryCallKit::inline_native_scopedValueCache() {
3640   Node* cache_obj_handle = scopedValueCache_helper();
3641   const Type* objects_type = scopedValueCache_type();
3642   set_result(access_load(cache_obj_handle, objects_type, T_OBJECT, IN_NATIVE));
3643 
3644   return true;
3645 }
3646 
3647 //------------------------inline_native_setScopedValueCache------------------
3648 bool LibraryCallKit::inline_native_setScopedValueCache() {
3649   Node* arr = argument(0);
3650   Node* cache_obj_handle = scopedValueCache_helper();
3651   const Type* objects_type = scopedValueCache_type();
3652 
3653   const TypePtr *adr_type = _gvn.type(cache_obj_handle)->isa_ptr();
3654   access_store_at(nullptr, cache_obj_handle, adr_type, arr, objects_type, T_OBJECT, IN_NATIVE | MO_UNORDERED);
3655 
3656   return true;
3657 }
3658 
3659 //---------------------------load_mirror_from_klass----------------------------
3660 // Given a klass oop, load its java mirror (a java.lang.Class oop).
3661 Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
3662   Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
3663   Node* load = make_load(nullptr, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
3664   // mirror = ((OopHandle)mirror)->resolve();
3665   return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE);
3666 }
3667 
3668 //-----------------------load_klass_from_mirror_common-------------------------
3669 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
3670 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
3671 // and branch to the given path on the region.
3672 // If never_see_null, take an uncommon trap on null, so we can optimistically
3673 // compile for the non-null case.
3674 // If the region is null, force never_see_null = true.
3675 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
3676                                                     bool never_see_null,
3677                                                     RegionNode* region,
3678                                                     int null_path,
3679                                                     int offset) {
3680   if (region == nullptr)  never_see_null = true;
3681   Node* p = basic_plus_adr(mirror, offset);
3682   const TypeKlassPtr*  kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
3683   Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
3684   Node* null_ctl = top();
3685   kls = null_check_oop(kls, &null_ctl, never_see_null);
3686   if (region != nullptr) {
3687     // Set region->in(null_path) if the mirror is a primitive (e.g, int.class).

3690     assert(null_ctl == top(), "no loose ends");
3691   }
3692   return kls;
3693 }
3694 
3695 //--------------------(inline_native_Class_query helpers)---------------------
3696 // Use this for JVM_ACC_INTERFACE, JVM_ACC_IS_CLONEABLE_FAST, JVM_ACC_HAS_FINALIZER.
3697 // Fall through if (mods & mask) == bits, take the guard otherwise.
3698 Node* LibraryCallKit::generate_access_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
3699   // Branch around if the given klass has the given modifier bit set.
3700   // Like generate_guard, adds a new path onto the region.
3701   Node* modp = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3702   Node* mods = make_load(nullptr, modp, TypeInt::INT, T_INT, MemNode::unordered);
3703   Node* mask = intcon(modifier_mask);
3704   Node* bits = intcon(modifier_bits);
3705   Node* mbit = _gvn.transform(new AndINode(mods, mask));
3706   Node* cmp  = _gvn.transform(new CmpINode(mbit, bits));
3707   Node* bol  = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
3708   return generate_fair_guard(bol, region);
3709 }

3710 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
3711   return generate_access_flags_guard(kls, JVM_ACC_INTERFACE, 0, region);
3712 }
3713 Node* LibraryCallKit::generate_hidden_class_guard(Node* kls, RegionNode* region) {
3714   return generate_access_flags_guard(kls, JVM_ACC_IS_HIDDEN_CLASS, 0, region);
3715 }
3716 
3717 //-------------------------inline_native_Class_query-------------------
3718 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
3719   const Type* return_type = TypeInt::BOOL;
3720   Node* prim_return_value = top();  // what happens if it's a primitive class?
3721   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3722   bool expect_prim = false;     // most of these guys expect to work on refs
3723 
3724   enum { _normal_path = 1, _prim_path = 2, PATH_LIMIT };
3725 
3726   Node* mirror = argument(0);
3727   Node* obj    = top();
3728 
3729   switch (id) {

3883 
3884   case vmIntrinsics::_getClassAccessFlags:
3885     p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3886     query_value = make_load(nullptr, p, TypeInt::INT, T_INT, MemNode::unordered);
3887     break;
3888 
3889   default:
3890     fatal_unexpected_iid(id);
3891     break;
3892   }
3893 
3894   // Fall-through is the normal case of a query to a real class.
3895   phi->init_req(1, query_value);
3896   region->init_req(1, control());
3897 
3898   C->set_has_split_ifs(true); // Has chance for split-if optimization
3899   set_result(region, phi);
3900   return true;
3901 }
3902 































3903 //-------------------------inline_Class_cast-------------------
3904 bool LibraryCallKit::inline_Class_cast() {
3905   Node* mirror = argument(0); // Class
3906   Node* obj    = argument(1);
3907   const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
3908   if (mirror_con == nullptr) {
3909     return false;  // dead path (mirror->is_top()).
3910   }
3911   if (obj == nullptr || obj->is_top()) {
3912     return false;  // dead path
3913   }
3914   const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr();
3915 
3916   // First, see if Class.cast() can be folded statically.
3917   // java_mirror_type() returns non-null for compile-time Class constants.
3918   ciType* tm = mirror_con->java_mirror_type();

3919   if (tm != nullptr && tm->is_klass() &&
3920       tp != nullptr) {
3921     if (!tp->is_loaded()) {
3922       // Don't use intrinsic when class is not loaded.
3923       return false;
3924     } else {
3925       int static_res = C->static_subtype_check(TypeKlassPtr::make(tm->as_klass(), Type::trust_interfaces), tp->as_klass_type());
3926       if (static_res == Compile::SSC_always_true) {
3927         // isInstance() is true - fold the code.



3928         set_result(obj);
3929         return true;
3930       } else if (static_res == Compile::SSC_always_false) {
3931         // Don't use intrinsic, have to throw ClassCastException.
3932         // If the reference is null, the non-intrinsic bytecode will
3933         // be optimized appropriately.
3934         return false;
3935       }
3936     }
3937   }
3938 
3939   // Bailout intrinsic and do normal inlining if exception path is frequent.
3940   if (too_many_traps(Deoptimization::Reason_intrinsic)) {
3941     return false;
3942   }
3943 
3944   // Generate dynamic checks.
3945   // Class.cast() is java implementation of _checkcast bytecode.
3946   // Do checkcast (Parse::do_checkcast()) optimizations here.
3947 



3948   mirror = null_check(mirror);
3949   // If mirror is dead, only null-path is taken.
3950   if (stopped()) {
3951     return true;
3952   }
3953 
3954   // Not-subtype or the mirror's klass ptr is null (in case it is a primitive).
3955   enum { _bad_type_path = 1, _prim_path = 2, PATH_LIMIT };
3956   RegionNode* region = new RegionNode(PATH_LIMIT);
3957   record_for_igvn(region);
3958 
3959   // Now load the mirror's klass metaobject, and null-check it.
3960   // If kls is null, we have a primitive mirror and
3961   // nothing is an instance of a primitive type.
3962   Node* kls = load_klass_from_mirror(mirror, false, region, _prim_path);
3963 
3964   Node* res = top();


3965   if (!stopped()) {





















3966     Node* bad_type_ctrl = top();
3967     // Do checkcast optimizations.
3968     res = gen_checkcast(obj, kls, &bad_type_ctrl);
3969     region->init_req(_bad_type_path, bad_type_ctrl);
3970   }
3971   if (region->in(_prim_path) != top() ||
3972       region->in(_bad_type_path) != top()) {

3973     // Let Interpreter throw ClassCastException.
3974     PreserveJVMState pjvms(this);
3975     set_control(_gvn.transform(region));



3976     uncommon_trap(Deoptimization::Reason_intrinsic,
3977                   Deoptimization::Action_maybe_recompile);
3978   }
3979   if (!stopped()) {
3980     set_result(res);
3981   }
3982   return true;
3983 }
3984 
3985 
3986 //--------------------------inline_native_subtype_check------------------------
3987 // This intrinsic takes the JNI calls out of the heart of
3988 // UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
3989 bool LibraryCallKit::inline_native_subtype_check() {
3990   // Pull both arguments off the stack.
3991   Node* args[2];                // two java.lang.Class mirrors: superc, subc
3992   args[0] = argument(0);
3993   args[1] = argument(1);
3994   Node* klasses[2];             // corresponding Klasses: superk, subk
3995   klasses[0] = klasses[1] = top();
3996 
3997   enum {
3998     // A full decision tree on {superc is prim, subc is prim}:
3999     _prim_0_path = 1,           // {P,N} => false
4000                                 // {P,P} & superc!=subc => false
4001     _prim_same_path,            // {P,P} & superc==subc => true
4002     _prim_1_path,               // {N,P} => false
4003     _ref_subtype_path,          // {N,N} & subtype check wins => true
4004     _both_ref_path,             // {N,N} & subtype check loses => false
4005     PATH_LIMIT
4006   };
4007 
4008   RegionNode* region = new RegionNode(PATH_LIMIT);

4009   Node*       phi    = new PhiNode(region, TypeInt::BOOL);
4010   record_for_igvn(region);

4011 
4012   const TypePtr* adr_type = TypeRawPtr::BOTTOM;   // memory type of loads
4013   const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
4014   int class_klass_offset = java_lang_Class::klass_offset();
4015 
4016   // First null-check both mirrors and load each mirror's klass metaobject.
4017   int which_arg;
4018   for (which_arg = 0; which_arg <= 1; which_arg++) {
4019     Node* arg = args[which_arg];
4020     arg = null_check(arg);
4021     if (stopped())  break;
4022     args[which_arg] = arg;
4023 
4024     Node* p = basic_plus_adr(arg, class_klass_offset);
4025     Node* kls = LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, adr_type, kls_type);
4026     klasses[which_arg] = _gvn.transform(kls);
4027   }
4028 
4029   // Having loaded both klasses, test each for null.
4030   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
4031   for (which_arg = 0; which_arg <= 1; which_arg++) {
4032     Node* kls = klasses[which_arg];
4033     Node* null_ctl = top();
4034     kls = null_check_oop(kls, &null_ctl, never_see_null);
4035     int prim_path = (which_arg == 0 ? _prim_0_path : _prim_1_path);
4036     region->init_req(prim_path, null_ctl);



4037     if (stopped())  break;
4038     klasses[which_arg] = kls;
4039   }
4040 
4041   if (!stopped()) {
4042     // now we have two reference types, in klasses[0..1]
4043     Node* subk   = klasses[1];  // the argument to isAssignableFrom
4044     Node* superk = klasses[0];  // the receiver
4045     region->set_req(_both_ref_path, gen_subtype_check(subk, superk));



4046     // now we have a successful reference subtype check
4047     region->set_req(_ref_subtype_path, control());
4048   }
4049 
4050   // If both operands are primitive (both klasses null), then
4051   // we must return true when they are identical primitives.
4052   // It is convenient to test this after the first null klass check.
4053   set_control(region->in(_prim_0_path)); // go back to first null check

4054   if (!stopped()) {
4055     // Since superc is primitive, make a guard for the superc==subc case.
4056     Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1]));
4057     Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq));
4058     generate_guard(bol_eq, region, PROB_FAIR);
4059     if (region->req() == PATH_LIMIT+1) {
4060       // A guard was added.  If the added guard is taken, superc==subc.
4061       region->swap_edges(PATH_LIMIT, _prim_same_path);
4062       region->del_req(PATH_LIMIT);
4063     }
4064     region->set_req(_prim_0_path, control()); // Not equal after all.
4065   }
4066 
4067   // these are the only paths that produce 'true':
4068   phi->set_req(_prim_same_path,   intcon(1));
4069   phi->set_req(_ref_subtype_path, intcon(1));
4070 
4071   // pull together the cases:
4072   assert(region->req() == PATH_LIMIT, "sane region");
4073   for (uint i = 1; i < region->req(); i++) {
4074     Node* ctl = region->in(i);
4075     if (ctl == nullptr || ctl == top()) {
4076       region->set_req(i, top());
4077       phi   ->set_req(i, top());
4078     } else if (phi->in(i) == nullptr) {
4079       phi->set_req(i, intcon(0)); // all other paths produce 'false'
4080     }
4081   }
4082 
4083   set_control(_gvn.transform(region));
4084   set_result(_gvn.transform(phi));
4085   return true;
4086 }
4087 
4088 //---------------------generate_array_guard_common------------------------
4089 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region,
4090                                                   bool obj_array, bool not_array) {
4091 
4092   if (stopped()) {
4093     return nullptr;
4094   }
4095 
4096   // If obj_array/non_array==false/false:
4097   // Branch around if the given klass is in fact an array (either obj or prim).
4098   // If obj_array/non_array==false/true:
4099   // Branch around if the given klass is not an array klass of any kind.
4100   // If obj_array/non_array==true/true:
4101   // Branch around if the kls is not an oop array (kls is int[], String, etc.)
4102   // If obj_array/non_array==true/false:
4103   // Branch around if the kls is an oop array (Object[] or subtype)
4104   //
4105   // Like generate_guard, adds a new path onto the region.
4106   jint  layout_con = 0;
4107   Node* layout_val = get_layout_helper(kls, layout_con);
4108   if (layout_val == nullptr) {
4109     bool query = (obj_array
4110                   ? Klass::layout_helper_is_objArray(layout_con)
4111                   : Klass::layout_helper_is_array(layout_con));
4112     if (query == not_array) {







4113       return nullptr;                       // never a branch
4114     } else {                             // always a branch
4115       Node* always_branch = control();
4116       if (region != nullptr)
4117         region->add_req(always_branch);
4118       set_control(top());
4119       return always_branch;
4120     }
4121   }





















4122   // Now test the correct condition.
4123   jint  nval = (obj_array
4124                 ? (jint)(Klass::_lh_array_tag_type_value
4125                    <<    Klass::_lh_array_tag_shift)
4126                 : Klass::_lh_neutral_value);
4127   Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval)));
4128   BoolTest::mask btest = BoolTest::lt;  // correct for testing is_[obj]array
4129   // invert the test if we are looking for a non-array
4130   if (not_array)  btest = BoolTest(btest).negate();
4131   Node* bol = _gvn.transform(new BoolNode(cmp, btest));
4132   return generate_fair_guard(bol, region);
4133 }
4134 
4135 
4136 //-----------------------inline_native_newArray--------------------------
4137 // private static native Object java.lang.reflect.newArray(Class<?> componentType, int length);
4138 // private        native Object Unsafe.allocateUninitializedArray0(Class<?> cls, int size);
4139 bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) {
4140   Node* mirror;
4141   Node* count_val;
4142   if (uninitialized) {
4143     null_check_receiver();
4144     mirror    = argument(1);
4145     count_val = argument(2);
4146   } else {
4147     mirror    = argument(0);
4148     count_val = argument(1);
4149   }
4150 
4151   mirror = null_check(mirror);
4152   // If mirror or obj is dead, only null-path is taken.
4153   if (stopped())  return true;
4154 
4155   enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
4156   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4157   PhiNode*    result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);

4263   // the bytecode that invokes Arrays.copyOf if deoptimization happens.
4264   { PreserveReexecuteState preexecs(this);
4265     jvms()->set_should_reexecute(true);
4266 
4267     array_type_mirror = null_check(array_type_mirror);
4268     original          = null_check(original);
4269 
4270     // Check if a null path was taken unconditionally.
4271     if (stopped())  return true;
4272 
4273     Node* orig_length = load_array_length(original);
4274 
4275     Node* klass_node = load_klass_from_mirror(array_type_mirror, false, nullptr, 0);
4276     klass_node = null_check(klass_node);
4277 
4278     RegionNode* bailout = new RegionNode(1);
4279     record_for_igvn(bailout);
4280 
4281     // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
4282     // Bail out if that is so.
4283     Node* not_objArray = generate_non_objArray_guard(klass_node, bailout);












4284     if (not_objArray != nullptr) {
4285       // Improve the klass node's type from the new optimistic assumption:
4286       ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
4287       const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/);
4288       Node* cast = new CastPPNode(klass_node, akls);
4289       cast->init_req(0, control());
4290       klass_node = _gvn.transform(cast);
4291     }
4292 
4293     // Bail out if either start or end is negative.
4294     generate_negative_guard(start, bailout, &start);
4295     generate_negative_guard(end,   bailout, &end);
4296 
4297     Node* length = end;
4298     if (_gvn.type(start) != TypeInt::ZERO) {
4299       length = _gvn.transform(new SubINode(end, start));
4300     }
4301 
4302     // Bail out if length is negative.
4303     // Without this the new_array would throw
4304     // NegativeArraySizeException but IllegalArgumentException is what
4305     // should be thrown
4306     generate_negative_guard(length, bailout, &length);
4307 
































4308     if (bailout->req() > 1) {
4309       PreserveJVMState pjvms(this);
4310       set_control(_gvn.transform(bailout));
4311       uncommon_trap(Deoptimization::Reason_intrinsic,
4312                     Deoptimization::Action_maybe_recompile);
4313     }
4314 
4315     if (!stopped()) {
4316       // How many elements will we copy from the original?
4317       // The answer is MinI(orig_length - start, length).
4318       Node* orig_tail = _gvn.transform(new SubINode(orig_length, start));
4319       Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
4320 
4321       // Generate a direct call to the right arraycopy function(s).
4322       // We know the copy is disjoint but we might not know if the
4323       // oop stores need checking.
4324       // Extreme case:  Arrays.copyOf((Integer[])x, 10, String[].class).
4325       // This will fail a store-check if x contains any non-nulls.
4326 
4327       // ArrayCopyNode:Ideal may transform the ArrayCopyNode to

4330       // to the copyOf to be validated, including that the copy to the
4331       // new array won't trigger an ArrayStoreException. That subtype
4332       // check can be optimized if we know something on the type of
4333       // the input array from type speculation.
4334       if (_gvn.type(klass_node)->singleton()) {
4335         const TypeKlassPtr* subk = _gvn.type(load_object_klass(original))->is_klassptr();
4336         const TypeKlassPtr* superk = _gvn.type(klass_node)->is_klassptr();
4337 
4338         int test = C->static_subtype_check(superk, subk);
4339         if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
4340           const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();
4341           if (t_original->speculative_type() != nullptr) {
4342             original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true);
4343           }
4344         }
4345       }
4346 
4347       bool validated = false;
4348       // Reason_class_check rather than Reason_intrinsic because we
4349       // want to intrinsify even if this traps.
4350       if (!too_many_traps(Deoptimization::Reason_class_check)) {
4351         Node* not_subtype_ctrl = gen_subtype_check(original, klass_node);
4352 
4353         if (not_subtype_ctrl != top()) {
4354           PreserveJVMState pjvms(this);
4355           set_control(not_subtype_ctrl);
4356           uncommon_trap(Deoptimization::Reason_class_check,
4357                         Deoptimization::Action_make_not_entrant);
4358           assert(stopped(), "Should be stopped");
4359         }
4360         validated = true;
4361       }
4362 
4363       if (!stopped()) {
4364         newcopy = new_array(klass_node, length, 0);  // no arguments to push
4365 
4366         ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true, false,
4367                                                 load_object_klass(original), klass_node);
4368         if (!is_copyOfRange) {
4369           ac->set_copyof(validated);
4370         } else {

4416 
4417 //-----------------------generate_method_call----------------------------
4418 // Use generate_method_call to make a slow-call to the real
4419 // method if the fast path fails.  An alternative would be to
4420 // use a stub like OptoRuntime::slow_arraycopy_Java.
4421 // This only works for expanding the current library call,
4422 // not another intrinsic.  (E.g., don't use this for making an
4423 // arraycopy call inside of the copyOf intrinsic.)
4424 CallJavaNode*
4425 LibraryCallKit::generate_method_call(vmIntrinsicID method_id, bool is_virtual, bool is_static, bool res_not_null) {
4426   // When compiling the intrinsic method itself, do not use this technique.
4427   guarantee(callee() != C->method(), "cannot make slow-call to self");
4428 
4429   ciMethod* method = callee();
4430   // ensure the JVMS we have will be correct for this call
4431   guarantee(method_id == method->intrinsic_id(), "must match");
4432 
4433   const TypeFunc* tf = TypeFunc::make(method);
4434   if (res_not_null) {
4435     assert(tf->return_type() == T_OBJECT, "");
4436     const TypeTuple* range = tf->range();
4437     const Type** fields = TypeTuple::fields(range->cnt());
4438     fields[TypeFunc::Parms] = range->field_at(TypeFunc::Parms)->filter_speculative(TypePtr::NOTNULL);
4439     const TypeTuple* new_range = TypeTuple::make(range->cnt(), fields);
4440     tf = TypeFunc::make(tf->domain(), new_range);
4441   }
4442   CallJavaNode* slow_call;
4443   if (is_static) {
4444     assert(!is_virtual, "");
4445     slow_call = new CallStaticJavaNode(C, tf,
4446                            SharedRuntime::get_resolve_static_call_stub(), method);
4447   } else if (is_virtual) {
4448     assert(!gvn().type(argument(0))->maybe_null(), "should not be null");
4449     int vtable_index = Method::invalid_vtable_index;
4450     if (UseInlineCaches) {
4451       // Suppress the vtable call
4452     } else {
4453       // hashCode and clone are not a miranda methods,
4454       // so the vtable index is fixed.
4455       // No need to use the linkResolver to get it.
4456        vtable_index = method->vtable_index();
4457        assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
4458               "bad index %d", vtable_index);
4459     }
4460     slow_call = new CallDynamicJavaNode(tf,

4477   set_edges_for_java_call(slow_call);
4478   return slow_call;
4479 }
4480 
4481 
4482 /**
4483  * Build special case code for calls to hashCode on an object. This call may
4484  * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
4485  * slightly different code.
4486  */
4487 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
4488   assert(is_static == callee()->is_static(), "correct intrinsic selection");
4489   assert(!(is_virtual && is_static), "either virtual, special, or static");
4490 
4491   enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
4492 
4493   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4494   PhiNode*    result_val = new PhiNode(result_reg, TypeInt::INT);
4495   PhiNode*    result_io  = new PhiNode(result_reg, Type::ABIO);
4496   PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4497   Node* obj = nullptr;





4498   if (!is_static) {
4499     // Check for hashing null object
4500     obj = null_check_receiver();
4501     if (stopped())  return true;        // unconditionally null
4502     result_reg->init_req(_null_path, top());
4503     result_val->init_req(_null_path, top());
4504   } else {
4505     // Do a null check, and return zero if null.
4506     // System.identityHashCode(null) == 0
4507     obj = argument(0);
4508     Node* null_ctl = top();
4509     obj = null_check_oop(obj, &null_ctl);
4510     result_reg->init_req(_null_path, null_ctl);
4511     result_val->init_req(_null_path, _gvn.intcon(0));
4512   }
4513 
4514   // Unconditionally null?  Then return right away.
4515   if (stopped()) {
4516     set_control( result_reg->in(_null_path));
4517     if (!stopped())
4518       set_result(result_val->in(_null_path));
4519     return true;
4520   }
4521 
4522   // We only go to the fast case code if we pass a number of guards.  The
4523   // paths which do not pass are accumulated in the slow_region.
4524   RegionNode* slow_region = new RegionNode(1);
4525   record_for_igvn(slow_region);
4526 
4527   // If this is a virtual call, we generate a funny guard.  We pull out
4528   // the vtable entry corresponding to hashCode() from the target object.
4529   // If the target method which we are calling happens to be the native
4530   // Object hashCode() method, we pass the guard.  We do not need this
4531   // guard for non-virtual calls -- the caller is known to be the native
4532   // Object hashCode().
4533   if (is_virtual) {
4534     // After null check, get the object's klass.
4535     Node* obj_klass = load_object_klass(obj);
4536     generate_virtual_guard(obj_klass, slow_region);
4537   }
4538 
4539   // Get the header out of the object, use LoadMarkNode when available
4540   Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
4541   // The control of the load must be null. Otherwise, the load can move before
4542   // the null check after castPP removal.
4543   Node* no_ctrl = nullptr;
4544   Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
4545 
4546   // Test the header to see if it is unlocked.
4547   Node *lock_mask      = _gvn.MakeConX(markWord::lock_mask_in_place);

4548   Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
4549   Node *unlocked_val   = _gvn.MakeConX(markWord::unlocked_value);
4550   Node *chk_unlocked   = _gvn.transform(new CmpXNode( lmasked_header, unlocked_val));
4551   Node *test_unlocked  = _gvn.transform(new BoolNode( chk_unlocked, BoolTest::ne));
4552 
4553   generate_slow_guard(test_unlocked, slow_region);
4554 
4555   // Get the hash value and check to see that it has been properly assigned.
4556   // We depend on hash_mask being at most 32 bits and avoid the use of
4557   // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
4558   // vm: see markWord.hpp.
4559   Node *hash_mask      = _gvn.intcon(markWord::hash_mask);
4560   Node *hash_shift     = _gvn.intcon(markWord::hash_shift);
4561   Node *hshifted_header= _gvn.transform(new URShiftXNode(header, hash_shift));
4562   // This hack lets the hash bits live anywhere in the mark object now, as long
4563   // as the shift drops the relevant bits into the low 32 bits.  Note that
4564   // Java spec says that HashCode is an int so there's no point in capturing
4565   // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build).
4566   hshifted_header      = ConvX2I(hshifted_header);
4567   Node *hash_val       = _gvn.transform(new AndINode(hshifted_header, hash_mask));

4593     // this->control() comes from set_results_for_java_call
4594     result_reg->init_req(_slow_path, control());
4595     result_val->init_req(_slow_path, slow_result);
4596     result_io  ->set_req(_slow_path, i_o());
4597     result_mem ->set_req(_slow_path, reset_memory());
4598   }
4599 
4600   // Return the combined state.
4601   set_i_o(        _gvn.transform(result_io)  );
4602   set_all_memory( _gvn.transform(result_mem));
4603 
4604   set_result(result_reg, result_val);
4605   return true;
4606 }
4607 
4608 //---------------------------inline_native_getClass----------------------------
4609 // public final native Class<?> java.lang.Object.getClass();
4610 //
4611 // Build special case code for calls to getClass on an object.
4612 bool LibraryCallKit::inline_native_getClass() {
4613   Node* obj = null_check_receiver();









4614   if (stopped())  return true;
4615   set_result(load_mirror_from_klass(load_object_klass(obj)));
4616   return true;
4617 }
4618 
4619 //-----------------inline_native_Reflection_getCallerClass---------------------
4620 // public static native Class<?> sun.reflect.Reflection.getCallerClass();
4621 //
4622 // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
4623 //
4624 // NOTE: This code must perform the same logic as JVM_GetCallerClass
4625 // in that it must skip particular security frames and checks for
4626 // caller sensitive methods.
4627 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
4628 #ifndef PRODUCT
4629   if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4630     tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
4631   }
4632 #endif
4633 

4894     if (C->get_alias_index(src_type) == C->get_alias_index(dst_type)) {
4895       flags |= RC_NARROW_MEM; // narrow in memory
4896     }
4897   }
4898 
4899   // Call it.  Note that the length argument is not scaled.
4900   make_runtime_call(flags,
4901                     OptoRuntime::fast_arraycopy_Type(),
4902                     StubRoutines::unsafe_arraycopy(),
4903                     "unsafe_arraycopy",
4904                     dst_type,
4905                     src_addr, dst_addr, size XTOP);
4906 
4907   store_to_memory(control(), doing_unsafe_access_addr, intcon(0), doing_unsafe_access_bt, Compile::AliasIdxRaw, MemNode::unordered);
4908 
4909   return true;
4910 }
4911 
4912 #undef XTOP
4913 














4914 //------------------------clone_coping-----------------------------------
4915 // Helper function for inline_native_clone.
4916 void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array) {
4917   assert(obj_size != nullptr, "");
4918   Node* raw_obj = alloc_obj->in(1);
4919   assert(alloc_obj->is_CheckCastPP() && raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
4920 
4921   AllocateNode* alloc = nullptr;
4922   if (ReduceBulkZeroing) {
4923     // We will be completely responsible for initializing this object -
4924     // mark Initialize node as complete.
4925     alloc = AllocateNode::Ideal_allocation(alloc_obj);
4926     // The object was just allocated - there should be no any stores!
4927     guarantee(alloc != nullptr && alloc->maybe_set_complete(&_gvn), "");
4928     // Mark as complete_with_arraycopy so that on AllocateNode
4929     // expansion, we know this AllocateNode is initialized by an array
4930     // copy and a StoreStore barrier exists after the array copy.
4931     alloc->initialization()->set_complete_with_arraycopy();
4932   }
4933 

4958 //  not cloneable or finalizer => slow path to out-of-line Object.clone
4959 //
4960 // The general case has two steps, allocation and copying.
4961 // Allocation has two cases, and uses GraphKit::new_instance or new_array.
4962 //
4963 // Copying also has two cases, oop arrays and everything else.
4964 // Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
4965 // Everything else uses the tight inline loop supplied by CopyArrayNode.
4966 //
4967 // These steps fold up nicely if and when the cloned object's klass
4968 // can be sharply typed as an object array, a type array, or an instance.
4969 //
4970 bool LibraryCallKit::inline_native_clone(bool is_virtual) {
4971   PhiNode* result_val;
4972 
4973   // Set the reexecute bit for the interpreter to reexecute
4974   // the bytecode that invokes Object.clone if deoptimization happens.
4975   { PreserveReexecuteState preexecs(this);
4976     jvms()->set_should_reexecute(true);
4977 
4978     Node* obj = null_check_receiver();

4979     if (stopped())  return true;
4980 
4981     const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
4982 
4983     // If we are going to clone an instance, we need its exact type to
4984     // know the number and types of fields to convert the clone to
4985     // loads/stores. Maybe a speculative type can help us.
4986     if (!obj_type->klass_is_exact() &&
4987         obj_type->speculative_type() != nullptr &&
4988         obj_type->speculative_type()->is_instance_klass()) {

4989       ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass();
4990       if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem &&
4991           !spec_ik->has_injected_fields()) {
4992         if (!obj_type->isa_instptr() ||
4993             obj_type->is_instptr()->instance_klass()->has_subklass()) {
4994           obj = maybe_cast_profiled_obj(obj, obj_type->speculative_type(), false);
4995         }
4996       }
4997     }
4998 
4999     // Conservatively insert a memory barrier on all memory slices.
5000     // Do not let writes into the original float below the clone.
5001     insert_mem_bar(Op_MemBarCPUOrder);
5002 
5003     // paths into result_reg:
5004     enum {
5005       _slow_path = 1,     // out-of-line call to clone method (virtual or not)
5006       _objArray_path,     // plain array allocation, plus arrayof_oop_arraycopy
5007       _array_path,        // plain array allocation, plus arrayof_long_arraycopy
5008       _instance_path,     // plain instance allocation, plus arrayof_long_arraycopy
5009       PATH_LIMIT
5010     };
5011     RegionNode* result_reg = new RegionNode(PATH_LIMIT);
5012     result_val             = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
5013     PhiNode*    result_i_o = new PhiNode(result_reg, Type::ABIO);
5014     PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
5015     record_for_igvn(result_reg);
5016 
5017     Node* obj_klass = load_object_klass(obj);





5018     Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)nullptr);
5019     if (array_ctl != nullptr) {
5020       // It's an array.
5021       PreserveJVMState pjvms(this);
5022       set_control(array_ctl);
5023       Node* obj_length = load_array_length(obj);
5024       Node* obj_size  = nullptr;
5025       Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size, /*deoptimize_on_exception=*/true);
5026 
5027       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
5028       if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Parsing)) {
5029         // If it is an oop array, it requires very special treatment,
5030         // because gc barriers are required when accessing the array.
5031         Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)nullptr);
5032         if (is_obja != nullptr) {
5033           PreserveJVMState pjvms2(this);
5034           set_control(is_obja);
5035           // Generate a direct call to the right arraycopy function(s).
5036           // Clones are always tightly coupled.
5037           ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, true, false);
5038           ac->set_clone_oop_array();
5039           Node* n = _gvn.transform(ac);
5040           assert(n == ac, "cannot disappear");
5041           ac->connect_outputs(this, /*deoptimize_on_exception=*/true);
5042 
5043           result_reg->init_req(_objArray_path, control());
5044           result_val->init_req(_objArray_path, alloc_obj);
5045           result_i_o ->set_req(_objArray_path, i_o());
5046           result_mem ->set_req(_objArray_path, reset_memory());
5047         }
5048       }
5049       // Otherwise, there are no barriers to worry about.
5050       // (We can dispense with card marks if we know the allocation
5051       //  comes out of eden (TLAB)...  In fact, ReduceInitialCardMarks
5052       //  causes the non-eden paths to take compensating steps to
5053       //  simulate a fresh allocation, so that no further
5054       //  card marks are required in compiled code to initialize
5055       //  the object.)
5056 
5057       if (!stopped()) {
5058         copy_to_clone(obj, alloc_obj, obj_size, true);
5059 
5060         // Present the results of the copy.
5061         result_reg->init_req(_array_path, control());
5062         result_val->init_req(_array_path, alloc_obj);
5063         result_i_o ->set_req(_array_path, i_o());
5064         result_mem ->set_req(_array_path, reset_memory());




































5065       }
5066     }
5067 
5068     // We only go to the instance fast case code if we pass a number of guards.
5069     // The paths which do not pass are accumulated in the slow_region.
5070     RegionNode* slow_region = new RegionNode(1);
5071     record_for_igvn(slow_region);
5072     if (!stopped()) {
5073       // It's an instance (we did array above).  Make the slow-path tests.
5074       // If this is a virtual call, we generate a funny guard.  We grab
5075       // the vtable entry corresponding to clone() from the target object.
5076       // If the target method which we are calling happens to be the
5077       // Object clone() method, we pass the guard.  We do not need this
5078       // guard for non-virtual calls; the caller is known to be the native
5079       // Object clone().
5080       if (is_virtual) {
5081         generate_virtual_guard(obj_klass, slow_region);
5082       }
5083 
5084       // The object must be easily cloneable and must not have a finalizer.
5085       // Both of these conditions may be checked in a single test.
5086       // We could optimize the test further, but we don't care.
5087       generate_access_flags_guard(obj_klass,
5088                                   // Test both conditions:
5089                                   JVM_ACC_IS_CLONEABLE_FAST | JVM_ACC_HAS_FINALIZER,
5090                                   // Must be cloneable but not finalizer:
5091                                   JVM_ACC_IS_CLONEABLE_FAST,

5221 // deoptimize. This is possible because tightly_coupled_allocation()
5222 // guarantees there's no observer of the allocated array at this point
5223 // and the control flow is simple enough.
5224 void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms_before_guards,
5225                                                     int saved_reexecute_sp, uint new_idx) {
5226   if (saved_jvms_before_guards != nullptr && !stopped()) {
5227     replace_unrelated_uncommon_traps_with_alloc_state(alloc, saved_jvms_before_guards);
5228 
5229     assert(alloc != nullptr, "only with a tightly coupled allocation");
5230     // restore JVM state to the state at the arraycopy
5231     saved_jvms_before_guards->map()->set_control(map()->control());
5232     assert(saved_jvms_before_guards->map()->memory() == map()->memory(), "memory state changed?");
5233     assert(saved_jvms_before_guards->map()->i_o() == map()->i_o(), "IO state changed?");
5234     // If we've improved the types of some nodes (null check) while
5235     // emitting the guards, propagate them to the current state
5236     map()->replaced_nodes().apply(saved_jvms_before_guards->map(), new_idx);
5237     set_jvms(saved_jvms_before_guards);
5238     _reexecute_sp = saved_reexecute_sp;
5239 
5240     // Remove the allocation from above the guards
5241     CallProjections callprojs;
5242     alloc->extract_projections(&callprojs, true);
5243     InitializeNode* init = alloc->initialization();
5244     Node* alloc_mem = alloc->in(TypeFunc::Memory);
5245     C->gvn_replace_by(callprojs.fallthrough_ioproj, alloc->in(TypeFunc::I_O));
5246     C->gvn_replace_by(init->proj_out(TypeFunc::Memory), alloc_mem);
5247 
5248     // The CastIINode created in GraphKit::new_array (in AllocateArrayNode::make_ideal_length) must stay below
5249     // the allocation (i.e. is only valid if the allocation succeeds):
5250     // 1) replace CastIINode with AllocateArrayNode's length here
5251     // 2) Create CastIINode again once allocation has moved (see below) at the end of this method
5252     //
5253     // Multiple identical CastIINodes might exist here. Each GraphKit::load_array_length() call will generate
5254     // new separate CastIINode (arraycopy guard checks or any array length use between array allocation and ararycopy)
5255     Node* init_control = init->proj_out(TypeFunc::Control);
5256     Node* alloc_length = alloc->Ideal_length();
5257 #ifdef ASSERT
5258     Node* prev_cast = nullptr;
5259 #endif
5260     for (uint i = 0; i < init_control->outcnt(); i++) {
5261       Node* init_out = init_control->raw_out(i);
5262       if (init_out->is_CastII() && init_out->in(TypeFunc::Control) == init_control && init_out->in(1) == alloc_length) {
5263 #ifdef ASSERT
5264         if (prev_cast == nullptr) {
5265           prev_cast = init_out;

5267           if (prev_cast->cmp(*init_out) == false) {
5268             prev_cast->dump();
5269             init_out->dump();
5270             assert(false, "not equal CastIINode");
5271           }
5272         }
5273 #endif
5274         C->gvn_replace_by(init_out, alloc_length);
5275       }
5276     }
5277     C->gvn_replace_by(init->proj_out(TypeFunc::Control), alloc->in(0));
5278 
5279     // move the allocation here (after the guards)
5280     _gvn.hash_delete(alloc);
5281     alloc->set_req(TypeFunc::Control, control());
5282     alloc->set_req(TypeFunc::I_O, i_o());
5283     Node *mem = reset_memory();
5284     set_all_memory(mem);
5285     alloc->set_req(TypeFunc::Memory, mem);
5286     set_control(init->proj_out_or_null(TypeFunc::Control));
5287     set_i_o(callprojs.fallthrough_ioproj);
5288 
5289     // Update memory as done in GraphKit::set_output_for_allocation()
5290     const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
5291     const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
5292     if (ary_type->isa_aryptr() && length_type != nullptr) {
5293       ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
5294     }
5295     const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot);
5296     int            elemidx  = C->get_alias_index(telemref);
5297     set_memory(init->proj_out_or_null(TypeFunc::Memory), Compile::AliasIdxRaw);
5298     set_memory(init->proj_out_or_null(TypeFunc::Memory), elemidx);
5299 
5300     Node* allocx = _gvn.transform(alloc);
5301     assert(allocx == alloc, "where has the allocation gone?");
5302     assert(dest->is_CheckCastPP(), "not an allocation result?");
5303 
5304     _gvn.hash_delete(dest);
5305     dest->set_req(0, control());
5306     Node* destx = _gvn.transform(dest);
5307     assert(destx == dest, "where has the allocation result gone?");

5468         top_src  = src_type->isa_aryptr();
5469         has_src = (top_src != nullptr && top_src->elem() != Type::BOTTOM);
5470         src_spec = true;
5471       }
5472       if (!has_dest) {
5473         dest = maybe_cast_profiled_obj(dest, dest_k, true);
5474         dest_type  = _gvn.type(dest);
5475         top_dest  = dest_type->isa_aryptr();
5476         has_dest = (top_dest != nullptr && top_dest->elem() != Type::BOTTOM);
5477         dest_spec = true;
5478       }
5479     }
5480   }
5481 
5482   if (has_src && has_dest && can_emit_guards) {
5483     BasicType src_elem = top_src->isa_aryptr()->elem()->array_element_basic_type();
5484     BasicType dest_elem = top_dest->isa_aryptr()->elem()->array_element_basic_type();
5485     if (is_reference_type(src_elem, true)) src_elem = T_OBJECT;
5486     if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT;
5487 
5488     if (src_elem == dest_elem && src_elem == T_OBJECT) {
5489       // If both arrays are object arrays then having the exact types
5490       // for both will remove the need for a subtype check at runtime
5491       // before the call and may make it possible to pick a faster copy
5492       // routine (without a subtype check on every element)
5493       // Do we have the exact type of src?
5494       bool could_have_src = src_spec;
5495       // Do we have the exact type of dest?
5496       bool could_have_dest = dest_spec;
5497       ciKlass* src_k = nullptr;
5498       ciKlass* dest_k = nullptr;
5499       if (!src_spec) {
5500         src_k = src_type->speculative_type_not_null();
5501         if (src_k != nullptr && src_k->is_array_klass()) {
5502           could_have_src = true;
5503         }
5504       }
5505       if (!dest_spec) {
5506         dest_k = dest_type->speculative_type_not_null();
5507         if (dest_k != nullptr && dest_k->is_array_klass()) {
5508           could_have_dest = true;
5509         }
5510       }
5511       if (could_have_src && could_have_dest) {
5512         // If we can have both exact types, emit the missing guards
5513         if (could_have_src && !src_spec) {
5514           src = maybe_cast_profiled_obj(src, src_k, true);


5515         }
5516         if (could_have_dest && !dest_spec) {
5517           dest = maybe_cast_profiled_obj(dest, dest_k, true);


5518         }
5519       }
5520     }
5521   }
5522 
5523   ciMethod* trap_method = method();
5524   int trap_bci = bci();
5525   if (saved_jvms_before_guards != nullptr) {
5526     trap_method = alloc->jvms()->method();
5527     trap_bci = alloc->jvms()->bci();
5528   }
5529 
5530   bool negative_length_guard_generated = false;
5531 
5532   if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
5533       can_emit_guards &&
5534       !src->is_top() && !dest->is_top()) {
5535     // validate arguments: enables transformation the ArrayCopyNode
5536     validated = true;
5537 
5538     RegionNode* slow_region = new RegionNode(1);
5539     record_for_igvn(slow_region);
5540 
5541     // (1) src and dest are arrays.
5542     generate_non_array_guard(load_object_klass(src), slow_region);
5543     generate_non_array_guard(load_object_klass(dest), slow_region);
5544 
5545     // (2) src and dest arrays must have elements of the same BasicType
5546     // done at macro expansion or at Ideal transformation time
5547 
5548     // (4) src_offset must not be negative.
5549     generate_negative_guard(src_offset, slow_region);
5550 
5551     // (5) dest_offset must not be negative.
5552     generate_negative_guard(dest_offset, slow_region);
5553 
5554     // (7) src_offset + length must not exceed length of src.

5557                          slow_region);
5558 
5559     // (8) dest_offset + length must not exceed length of dest.
5560     generate_limit_guard(dest_offset, length,
5561                          load_array_length(dest),
5562                          slow_region);
5563 
5564     // (6) length must not be negative.
5565     // This is also checked in generate_arraycopy() during macro expansion, but
5566     // we also have to check it here for the case where the ArrayCopyNode will
5567     // be eliminated by Escape Analysis.
5568     if (EliminateAllocations) {
5569       generate_negative_guard(length, slow_region);
5570       negative_length_guard_generated = true;
5571     }
5572 
5573     // (9) each element of an oop array must be assignable
5574     Node* dest_klass = load_object_klass(dest);
5575     if (src != dest) {
5576       Node* not_subtype_ctrl = gen_subtype_check(src, dest_klass);


5577 
5578       if (not_subtype_ctrl != top()) {
5579         PreserveJVMState pjvms(this);
5580         set_control(not_subtype_ctrl);
5581         uncommon_trap(Deoptimization::Reason_intrinsic,
5582                       Deoptimization::Action_make_not_entrant);
5583         assert(stopped(), "Should be stopped");






















5584       }
5585     }

5586     {
5587       PreserveJVMState pjvms(this);
5588       set_control(_gvn.transform(slow_region));
5589       uncommon_trap(Deoptimization::Reason_intrinsic,
5590                     Deoptimization::Action_make_not_entrant);
5591       assert(stopped(), "Should be stopped");
5592     }
5593 
5594     const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr();
5595     const Type *toop = dest_klass_t->cast_to_exactness(false)->as_instance_type();
5596     src = _gvn.transform(new CheckCastPPNode(control(), src, toop));
5597     arraycopy_move_allocation_here(alloc, dest, saved_jvms_before_guards, saved_reexecute_sp, new_idx);
5598   }
5599 
5600   if (stopped()) {
5601     return true;
5602   }
5603 
5604   ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != nullptr, negative_length_guard_generated,
5605                                           // Create LoadRange and LoadKlass nodes for use during macro expansion here
5606                                           // so the compiler has a chance to eliminate them: during macro expansion,
5607                                           // we have to set their control (CastPP nodes are eliminated).
5608                                           load_object_klass(src), load_object_klass(dest),
5609                                           load_array_length(src), load_array_length(dest));
5610 
5611   ac->set_arraycopy(validated);
5612 
5613   Node* n = _gvn.transform(ac);
5614   if (n == ac) {
5615     ac->connect_outputs(this);
5616   } else {

   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "ci/ciFlatArrayKlass.hpp"
  28 #include "ci/ciUtilities.inline.hpp"
  29 #include "classfile/vmIntrinsics.hpp"
  30 #include "compiler/compileBroker.hpp"
  31 #include "compiler/compileLog.hpp"
  32 #include "gc/shared/barrierSet.hpp"
  33 #include "jfr/support/jfrIntrinsics.hpp"
  34 #include "memory/resourceArea.hpp"
  35 #include "oops/klass.inline.hpp"
  36 #include "oops/objArrayKlass.hpp"
  37 #include "opto/addnode.hpp"
  38 #include "opto/arraycopynode.hpp"
  39 #include "opto/c2compiler.hpp"
  40 #include "opto/castnode.hpp"
  41 #include "opto/cfgnode.hpp"
  42 #include "opto/convertnode.hpp"
  43 #include "opto/countbitsnode.hpp"
  44 #include "opto/idealKit.hpp"
  45 #include "opto/library_call.hpp"
  46 #include "opto/mathexactnode.hpp"
  47 #include "opto/mulnode.hpp"

 306   case vmIntrinsics::_indexOfIU:                return inline_string_indexOfI(StrIntrinsicNode::UU);
 307   case vmIntrinsics::_indexOfIUL:               return inline_string_indexOfI(StrIntrinsicNode::UL);
 308   case vmIntrinsics::_indexOfU_char:            return inline_string_indexOfChar(StrIntrinsicNode::U);
 309   case vmIntrinsics::_indexOfL_char:            return inline_string_indexOfChar(StrIntrinsicNode::L);
 310 
 311   case vmIntrinsics::_equalsL:                  return inline_string_equals(StrIntrinsicNode::LL);
 312   case vmIntrinsics::_equalsU:                  return inline_string_equals(StrIntrinsicNode::UU);
 313 
 314   case vmIntrinsics::_vectorizedHashCode:       return inline_vectorizedHashCode();
 315 
 316   case vmIntrinsics::_toBytesStringU:           return inline_string_toBytesU();
 317   case vmIntrinsics::_getCharsStringU:          return inline_string_getCharsU();
 318   case vmIntrinsics::_getCharStringU:           return inline_string_char_access(!is_store);
 319   case vmIntrinsics::_putCharStringU:           return inline_string_char_access( is_store);
 320 
 321   case vmIntrinsics::_compressStringC:
 322   case vmIntrinsics::_compressStringB:          return inline_string_copy( is_compress);
 323   case vmIntrinsics::_inflateStringC:
 324   case vmIntrinsics::_inflateStringB:           return inline_string_copy(!is_compress);
 325 
 326   case vmIntrinsics::_makePrivateBuffer:        return inline_unsafe_make_private_buffer();
 327   case vmIntrinsics::_finishPrivateBuffer:      return inline_unsafe_finish_private_buffer();
 328   case vmIntrinsics::_getReference:             return inline_unsafe_access(!is_store, T_OBJECT,   Relaxed, false);
 329   case vmIntrinsics::_getBoolean:               return inline_unsafe_access(!is_store, T_BOOLEAN,  Relaxed, false);
 330   case vmIntrinsics::_getByte:                  return inline_unsafe_access(!is_store, T_BYTE,     Relaxed, false);
 331   case vmIntrinsics::_getShort:                 return inline_unsafe_access(!is_store, T_SHORT,    Relaxed, false);
 332   case vmIntrinsics::_getChar:                  return inline_unsafe_access(!is_store, T_CHAR,     Relaxed, false);
 333   case vmIntrinsics::_getInt:                   return inline_unsafe_access(!is_store, T_INT,      Relaxed, false);
 334   case vmIntrinsics::_getLong:                  return inline_unsafe_access(!is_store, T_LONG,     Relaxed, false);
 335   case vmIntrinsics::_getFloat:                 return inline_unsafe_access(!is_store, T_FLOAT,    Relaxed, false);
 336   case vmIntrinsics::_getDouble:                return inline_unsafe_access(!is_store, T_DOUBLE,   Relaxed, false);
 337   case vmIntrinsics::_getValue:                 return inline_unsafe_access(!is_store, T_OBJECT,   Relaxed, false, true);
 338 
 339   case vmIntrinsics::_putReference:             return inline_unsafe_access( is_store, T_OBJECT,   Relaxed, false);
 340   case vmIntrinsics::_putBoolean:               return inline_unsafe_access( is_store, T_BOOLEAN,  Relaxed, false);
 341   case vmIntrinsics::_putByte:                  return inline_unsafe_access( is_store, T_BYTE,     Relaxed, false);
 342   case vmIntrinsics::_putShort:                 return inline_unsafe_access( is_store, T_SHORT,    Relaxed, false);
 343   case vmIntrinsics::_putChar:                  return inline_unsafe_access( is_store, T_CHAR,     Relaxed, false);
 344   case vmIntrinsics::_putInt:                   return inline_unsafe_access( is_store, T_INT,      Relaxed, false);
 345   case vmIntrinsics::_putLong:                  return inline_unsafe_access( is_store, T_LONG,     Relaxed, false);
 346   case vmIntrinsics::_putFloat:                 return inline_unsafe_access( is_store, T_FLOAT,    Relaxed, false);
 347   case vmIntrinsics::_putDouble:                return inline_unsafe_access( is_store, T_DOUBLE,   Relaxed, false);
 348   case vmIntrinsics::_putValue:                 return inline_unsafe_access( is_store, T_OBJECT,   Relaxed, false, true);
 349 
 350   case vmIntrinsics::_getReferenceVolatile:     return inline_unsafe_access(!is_store, T_OBJECT,   Volatile, false);
 351   case vmIntrinsics::_getBooleanVolatile:       return inline_unsafe_access(!is_store, T_BOOLEAN,  Volatile, false);
 352   case vmIntrinsics::_getByteVolatile:          return inline_unsafe_access(!is_store, T_BYTE,     Volatile, false);
 353   case vmIntrinsics::_getShortVolatile:         return inline_unsafe_access(!is_store, T_SHORT,    Volatile, false);
 354   case vmIntrinsics::_getCharVolatile:          return inline_unsafe_access(!is_store, T_CHAR,     Volatile, false);
 355   case vmIntrinsics::_getIntVolatile:           return inline_unsafe_access(!is_store, T_INT,      Volatile, false);
 356   case vmIntrinsics::_getLongVolatile:          return inline_unsafe_access(!is_store, T_LONG,     Volatile, false);
 357   case vmIntrinsics::_getFloatVolatile:         return inline_unsafe_access(!is_store, T_FLOAT,    Volatile, false);
 358   case vmIntrinsics::_getDoubleVolatile:        return inline_unsafe_access(!is_store, T_DOUBLE,   Volatile, false);
 359 
 360   case vmIntrinsics::_putReferenceVolatile:     return inline_unsafe_access( is_store, T_OBJECT,   Volatile, false);
 361   case vmIntrinsics::_putBooleanVolatile:       return inline_unsafe_access( is_store, T_BOOLEAN,  Volatile, false);
 362   case vmIntrinsics::_putByteVolatile:          return inline_unsafe_access( is_store, T_BYTE,     Volatile, false);
 363   case vmIntrinsics::_putShortVolatile:         return inline_unsafe_access( is_store, T_SHORT,    Volatile, false);
 364   case vmIntrinsics::_putCharVolatile:          return inline_unsafe_access( is_store, T_CHAR,     Volatile, false);
 365   case vmIntrinsics::_putIntVolatile:           return inline_unsafe_access( is_store, T_INT,      Volatile, false);
 366   case vmIntrinsics::_putLongVolatile:          return inline_unsafe_access( is_store, T_LONG,     Volatile, false);
 367   case vmIntrinsics::_putFloatVolatile:         return inline_unsafe_access( is_store, T_FLOAT,    Volatile, false);
 368   case vmIntrinsics::_putDoubleVolatile:        return inline_unsafe_access( is_store, T_DOUBLE,   Volatile, false);

 492                                                                                          "notifyJvmtiEnd", false, true);
 493   case vmIntrinsics::_notifyJvmtiVThreadMount:   return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_mount()),
 494                                                                                          "notifyJvmtiMount", false, false);
 495   case vmIntrinsics::_notifyJvmtiVThreadUnmount: return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_unmount()),
 496                                                                                          "notifyJvmtiUnmount", false, false);
 497   case vmIntrinsics::_notifyJvmtiVThreadHideFrames: return inline_native_notify_jvmti_hide();
 498 #endif
 499 
 500 #ifdef JFR_HAVE_INTRINSICS
 501   case vmIntrinsics::_counterTime:              return inline_native_time_funcs(CAST_FROM_FN_PTR(address, JfrTime::time_function()), "counterTime");
 502   case vmIntrinsics::_getEventWriter:           return inline_native_getEventWriter();
 503   case vmIntrinsics::_jvm_commit:               return inline_native_jvm_commit();
 504 #endif
 505   case vmIntrinsics::_currentTimeMillis:        return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
 506   case vmIntrinsics::_nanoTime:                 return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
 507   case vmIntrinsics::_writeback0:               return inline_unsafe_writeback0();
 508   case vmIntrinsics::_writebackPreSync0:        return inline_unsafe_writebackSync0(true);
 509   case vmIntrinsics::_writebackPostSync0:       return inline_unsafe_writebackSync0(false);
 510   case vmIntrinsics::_allocateInstance:         return inline_unsafe_allocate();
 511   case vmIntrinsics::_copyMemory:               return inline_unsafe_copyMemory();
 512   case vmIntrinsics::_isFlattenedArray:         return inline_unsafe_isFlattenedArray();
 513   case vmIntrinsics::_getLength:                return inline_native_getLength();
 514   case vmIntrinsics::_copyOf:                   return inline_array_copyOf(false);
 515   case vmIntrinsics::_copyOfRange:              return inline_array_copyOf(true);
 516   case vmIntrinsics::_equalsB:                  return inline_array_equals(StrIntrinsicNode::LL);
 517   case vmIntrinsics::_equalsC:                  return inline_array_equals(StrIntrinsicNode::UU);
 518   case vmIntrinsics::_Preconditions_checkIndex: return inline_preconditions_checkIndex(T_INT);
 519   case vmIntrinsics::_Preconditions_checkLongIndex: return inline_preconditions_checkIndex(T_LONG);
 520   case vmIntrinsics::_clone:                    return inline_native_clone(intrinsic()->is_virtual());
 521 
 522   case vmIntrinsics::_allocateUninitializedArray: return inline_unsafe_newArray(true);
 523   case vmIntrinsics::_newArray:                   return inline_unsafe_newArray(false);
 524 
 525   case vmIntrinsics::_isAssignableFrom:         return inline_native_subtype_check();
 526 
 527   case vmIntrinsics::_isInstance:
 528   case vmIntrinsics::_getModifiers:
 529   case vmIntrinsics::_isInterface:
 530   case vmIntrinsics::_isArray:
 531   case vmIntrinsics::_isPrimitive:
 532   case vmIntrinsics::_isHidden:
 533   case vmIntrinsics::_getSuperclass:
 534   case vmIntrinsics::_getClassAccessFlags:      return inline_native_Class_query(intrinsic_id());
 535 
 536   case vmIntrinsics::_asPrimaryType:
 537   case vmIntrinsics::_asPrimaryTypeArg:
 538   case vmIntrinsics::_asValueType:
 539   case vmIntrinsics::_asValueTypeArg:           return inline_primitive_Class_conversion(intrinsic_id());
 540 
 541   case vmIntrinsics::_floatToRawIntBits:
 542   case vmIntrinsics::_floatToIntBits:
 543   case vmIntrinsics::_intBitsToFloat:
 544   case vmIntrinsics::_doubleToRawLongBits:
 545   case vmIntrinsics::_doubleToLongBits:
 546   case vmIntrinsics::_longBitsToDouble:
 547   case vmIntrinsics::_floatToFloat16:
 548   case vmIntrinsics::_float16ToFloat:           return inline_fp_conversions(intrinsic_id());
 549 
 550   case vmIntrinsics::_floatIsFinite:
 551   case vmIntrinsics::_floatIsInfinite:
 552   case vmIntrinsics::_doubleIsFinite:
 553   case vmIntrinsics::_doubleIsInfinite:         return inline_fp_range_check(intrinsic_id());
 554 
 555   case vmIntrinsics::_numberOfLeadingZeros_i:
 556   case vmIntrinsics::_numberOfLeadingZeros_l:
 557   case vmIntrinsics::_numberOfTrailingZeros_i:
 558   case vmIntrinsics::_numberOfTrailingZeros_l:
 559   case vmIntrinsics::_bitCount_i:
 560   case vmIntrinsics::_bitCount_l:

2195     case vmIntrinsics::_remainderUnsigned_l: {
2196       zero_check_long(argument(2));
2197       // Compile-time detect of null-exception
2198       if (stopped()) {
2199         return true; // keep the graph constructed so far
2200       }
2201       n = new UModLNode(control(), argument(0), argument(2));
2202       break;
2203     }
2204     default:  fatal_unexpected_iid(id);  break;
2205   }
2206   set_result(_gvn.transform(n));
2207   return true;
2208 }
2209 
2210 //----------------------------inline_unsafe_access----------------------------
2211 
2212 const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type) {
2213   // Attempt to infer a sharper value type from the offset and base type.
2214   ciKlass* sharpened_klass = nullptr;
2215   bool null_free = false;
2216 
2217   // See if it is an instance field, with an object type.
2218   if (alias_type->field() != nullptr) {
2219     if (alias_type->field()->type()->is_klass()) {
2220       sharpened_klass = alias_type->field()->type()->as_klass();
2221       null_free = alias_type->field()->is_null_free();
2222     }
2223   }
2224 
2225   const TypeOopPtr* result = nullptr;
2226   // See if it is a narrow oop array.
2227   if (adr_type->isa_aryptr()) {
2228     if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
2229       const TypeOopPtr* elem_type = adr_type->is_aryptr()->elem()->make_oopptr();
2230       null_free = adr_type->is_aryptr()->is_null_free();
2231       if (elem_type != nullptr && elem_type->is_loaded()) {
2232         // Sharpen the value type.
2233         result = elem_type;
2234       }
2235     }
2236   }
2237 
2238   // The sharpened class might be unloaded if there is no class loader
2239   // contraint in place.
2240   if (result == nullptr && sharpened_klass != nullptr && sharpened_klass->is_loaded()) {
2241     // Sharpen the value type.
2242     result = TypeOopPtr::make_from_klass(sharpened_klass);
2243     if (null_free) {
2244       result = result->join_speculative(TypePtr::NOTNULL)->is_oopptr();
2245     }
2246   }
2247   if (result != nullptr) {
2248 #ifndef PRODUCT
2249     if (C->print_intrinsics() || C->print_inlining()) {
2250       tty->print("  from base type:  ");  adr_type->dump(); tty->cr();
2251       tty->print("  sharpened value: ");  result->dump();    tty->cr();
2252     }
2253 #endif
2254   }
2255   return result;
2256 }
2257 
2258 DecoratorSet LibraryCallKit::mo_decorator_for_access_kind(AccessKind kind) {
2259   switch (kind) {
2260       case Relaxed:
2261         return MO_UNORDERED;
2262       case Opaque:
2263         return MO_RELAXED;
2264       case Acquire:
2265         return MO_ACQUIRE;
2266       case Release:
2267         return MO_RELEASE;
2268       case Volatile:
2269         return MO_SEQ_CST;
2270       default:
2271         ShouldNotReachHere();
2272         return 0;
2273   }
2274 }
2275 
2276 bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, const AccessKind kind, const bool unaligned, const bool is_flat) {
2277   if (callee()->is_static())  return false;  // caller must have the capability!
2278   DecoratorSet decorators = C2_UNSAFE_ACCESS;
2279   guarantee(!is_store || kind != Acquire, "Acquire accesses can be produced only for loads");
2280   guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
2281   assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
2282 
2283   if (is_reference_type(type)) {
2284     decorators |= ON_UNKNOWN_OOP_REF;
2285   }
2286 
2287   if (unaligned) {
2288     decorators |= C2_UNALIGNED;
2289   }
2290 
2291 #ifndef PRODUCT
2292   {
2293     ResourceMark rm;
2294     // Check the signatures.
2295     ciSignature* sig = callee()->signature();
2296 #ifdef ASSERT
2297     if (!is_store) {
2298       // Object getReference(Object base, int/long offset), etc.
2299       BasicType rtype = sig->return_type()->basic_type();
2300       assert(rtype == type, "getter must return the expected value");
2301       assert(sig->count() == 2 || (is_flat && sig->count() == 3), "oop getter has 2 or 3 arguments");
2302       assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2303       assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2304     } else {
2305       // void putReference(Object base, int/long offset, Object x), etc.
2306       assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
2307       assert(sig->count() == 3 || (is_flat && sig->count() == 4), "oop putter has 3 arguments");
2308       assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
2309       assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
2310       BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
2311       assert(vtype == type, "putter must accept the expected value");
2312     }
2313 #endif // ASSERT
2314  }
2315 #endif //PRODUCT
2316 
2317   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
2318 
2319   Node* receiver = argument(0);  // type: oop
2320 
2321   // Build address expression.
2322   Node* heap_base_oop = top();
2323 
2324   // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2325   Node* base = argument(1);  // type: oop
2326   // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2327   Node* offset = argument(2);  // type: long
2328   // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2329   // to be plain byte offsets, which are also the same as those accepted
2330   // by oopDesc::field_addr.
2331   assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2332          "fieldOffset must be byte-scaled");
2333 
2334   ciInlineKlass* inline_klass = nullptr;
2335   if (is_flat) {
2336     const TypeInstPtr* cls = _gvn.type(argument(4))->isa_instptr();
2337     if (cls == nullptr || cls->const_oop() == nullptr) {
2338       return false;
2339     }
2340     ciType* mirror_type = cls->const_oop()->as_instance()->java_mirror_type();
2341     if (!mirror_type->is_inlinetype()) {
2342       return false;
2343     }
2344     inline_klass = mirror_type->as_inline_klass();
2345   }
2346 
2347   if (base->is_InlineType()) {
2348     InlineTypeNode* vt = base->as_InlineType();
2349     if (is_store) {
2350       if (!vt->is_allocated(&_gvn)) {
2351         return false;
2352       }
2353       base = vt->get_oop();
2354     } else {
2355       if (offset->is_Con()) {
2356         long off = find_long_con(offset, 0);
2357         ciInlineKlass* vk = vt->type()->inline_klass();
2358         if ((long)(int)off != off || !vk->contains_field_offset(off)) {
2359           return false;
2360         }
2361 
2362         ciField* field = vk->get_non_flat_field_by_offset(off);
2363         if (field != nullptr) {
2364           BasicType bt = type2field[field->type()->basic_type()];
2365           if (bt == T_ARRAY || bt == T_NARROWOOP) {
2366             bt = T_OBJECT;
2367           }
2368           if (bt == type && (!field->is_flat() || field->type() == inline_klass)) {
2369             Node* value = vt->field_value_by_offset(off, false);
2370             if (value->is_InlineType()) {
2371               value = value->as_InlineType()->adjust_scalarization_depth(this);
2372             }
2373             set_result(value);
2374             return true;
2375           }
2376         }
2377       }
2378       {
2379         // Re-execute the unsafe access if allocation triggers deoptimization.
2380         PreserveReexecuteState preexecs(this);
2381         jvms()->set_should_reexecute(true);
2382         vt = vt->buffer(this);
2383       }
2384       base = vt->get_oop();
2385     }
2386   }
2387 
2388   // 32-bit machines ignore the high half!
2389   offset = ConvL2X(offset);
2390 
2391   // Save state and restore on bailout
2392   uint old_sp = sp();
2393   SafePointNode* old_map = clone_map();
2394 
2395   Node* adr = make_unsafe_address(base, offset, type, kind == Relaxed);
2396 
2397   if (_gvn.type(base)->isa_ptr() == TypePtr::NULL_PTR) {
2398     if (type != T_OBJECT && (inline_klass == nullptr || !inline_klass->has_object_fields())) {
2399       decorators |= IN_NATIVE; // off-heap primitive access
2400     } else {
2401       set_map(old_map);
2402       set_sp(old_sp);
2403       return false; // off-heap oop accesses are not supported
2404     }
2405   } else {
2406     heap_base_oop = base; // on-heap or mixed access
2407   }
2408 
2409   // Can base be null? Otherwise, always on-heap access.
2410   bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(base));
2411 
2412   if (!can_access_non_heap) {
2413     decorators |= IN_HEAP;
2414   }
2415 
2416   Node* val = is_store ? argument(4 + (is_flat ? 1 : 0)) : nullptr;
2417 
2418   const TypePtr* adr_type = _gvn.type(adr)->isa_ptr();
2419   if (adr_type == TypePtr::NULL_PTR) {
2420     set_map(old_map);
2421     set_sp(old_sp);
2422     return false; // off-heap access with zero address
2423   }
2424 
2425   // Try to categorize the address.
2426   Compile::AliasType* alias_type = C->alias_type(adr_type);
2427   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2428 
2429   if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2430       alias_type->adr_type() == TypeAryPtr::RANGE) {
2431     set_map(old_map);
2432     set_sp(old_sp);
2433     return false; // not supported
2434   }
2435 
2436   bool mismatched = false;
2437   BasicType bt = T_ILLEGAL;
2438   ciField* field = nullptr;
2439   if (adr_type->isa_instptr()) {
2440     const TypeInstPtr* instptr = adr_type->is_instptr();
2441     ciInstanceKlass* k = instptr->instance_klass();
2442     int off = instptr->offset();
2443     if (instptr->const_oop() != nullptr &&
2444         k == ciEnv::current()->Class_klass() &&
2445         instptr->offset() >= (k->size_helper() * wordSize)) {
2446       k = instptr->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
2447       field = k->get_field_by_offset(off, true);
2448     } else {
2449       field = k->get_non_flat_field_by_offset(off);
2450     }
2451     if (field != nullptr) {
2452       bt = type2field[field->type()->basic_type()];
2453     }
2454     assert(bt == alias_type->basic_type() || is_flat, "should match");
2455   } else {
2456     bt = alias_type->basic_type();
2457   }
2458 
2459   if (bt != T_ILLEGAL) {
2460     assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
2461     if (bt == T_BYTE && adr_type->isa_aryptr()) {
2462       // Alias type doesn't differentiate between byte[] and boolean[]).
2463       // Use address type to get the element type.
2464       bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2465     }
2466     if (is_reference_type(bt, true)) {
2467       // accessing an array field with getReference is not a mismatch
2468       bt = T_OBJECT;
2469     }
2470     if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2471       // Don't intrinsify mismatched object accesses
2472       set_map(old_map);
2473       set_sp(old_sp);
2474       return false;
2475     }
2476     mismatched = (bt != type);
2477   } else if (alias_type->adr_type()->isa_oopptr()) {
2478     mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
2479   }
2480 
2481   if (is_flat) {
2482     if (adr_type->isa_instptr()) {
2483       if (field == nullptr || field->type() != inline_klass) {
2484         mismatched = true;
2485       }
2486     } else if (adr_type->isa_aryptr()) {
2487       const Type* elem = adr_type->is_aryptr()->elem();
2488       if (!adr_type->is_flat() || elem->inline_klass() != inline_klass) {
2489         mismatched = true;
2490       }
2491     } else {
2492       mismatched = true;
2493     }
2494     if (is_store) {
2495       const Type* val_t = _gvn.type(val);
2496       if (!val_t->is_inlinetypeptr() || val_t->inline_klass() != inline_klass) {
2497         set_map(old_map);
2498         set_sp(old_sp);
2499         return false;
2500       }
2501     }
2502   }
2503 
2504   destruct_map_clone(old_map);
2505   assert(!mismatched || is_flat || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2506 
2507   if (mismatched) {
2508     decorators |= C2_MISMATCHED;
2509   }
2510 
2511   // First guess at the value type.
2512   const Type *value_type = Type::get_const_basic_type(type);
2513 
2514   // Figure out the memory ordering.
2515   decorators |= mo_decorator_for_access_kind(kind);
2516 
2517   if (!is_store) {
2518     if (type == T_OBJECT && !is_flat) {
2519       const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2520       if (tjp != nullptr) {
2521         value_type = tjp;
2522       }
2523     }
2524   }
2525 
2526   receiver = null_check(receiver);
2527   if (stopped()) {
2528     return true;
2529   }
2530   // Heap pointers get a null-check from the interpreter,
2531   // as a courtesy.  However, this is not guaranteed by Unsafe,
2532   // and it is not possible to fully distinguish unintended nulls
2533   // from intended ones in this API.
2534 
2535   if (!is_store) {
2536     Node* p = nullptr;
2537     // Try to constant fold a load from a constant field
2538 
2539     if (heap_base_oop != top() && field != nullptr && field->is_constant() && !field->is_flat() && !mismatched) {
2540       // final or stable field
2541       p = make_constant_from_field(field, heap_base_oop);
2542     }
2543 
2544     if (p == nullptr) { // Could not constant fold the load
2545       if (is_flat) {
2546         if (adr_type->isa_instptr() && !mismatched) {
2547           ciInstanceKlass* holder = adr_type->is_instptr()->instance_klass();
2548           int offset = adr_type->is_instptr()->offset();
2549           p = InlineTypeNode::make_from_flat(this, inline_klass, base, base, holder, offset, decorators);
2550         } else {
2551           p = InlineTypeNode::make_from_flat(this, inline_klass, base, adr, nullptr, 0, decorators);
2552         }
2553       } else {
2554         p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);
2555         const TypeOopPtr* ptr = value_type->make_oopptr();
2556         if (ptr != nullptr && ptr->is_inlinetypeptr()) {
2557           // Load a non-flattened inline type from memory
2558           p = InlineTypeNode::make_from_oop(this, p, ptr->inline_klass(), !ptr->maybe_null());
2559         }
2560       }
2561       // Normalize the value returned by getBoolean in the following cases
2562       if (type == T_BOOLEAN &&
2563           (mismatched ||
2564            heap_base_oop == top() ||                  // - heap_base_oop is null or
2565            (can_access_non_heap && field == nullptr)) // - heap_base_oop is potentially null
2566                                                       //   and the unsafe access is made to large offset
2567                                                       //   (i.e., larger than the maximum offset necessary for any
2568                                                       //   field access)
2569             ) {
2570           IdealKit ideal = IdealKit(this);
2571 #define __ ideal.
2572           IdealVariable normalized_result(ideal);
2573           __ declarations_done();
2574           __ set(normalized_result, p);
2575           __ if_then(p, BoolTest::ne, ideal.ConI(0));
2576           __ set(normalized_result, ideal.ConI(1));
2577           ideal.end_if();
2578           final_sync(ideal);
2579           p = __ value(normalized_result);
2580 #undef __
2581       }
2582     }
2583     if (type == T_ADDRESS) {
2584       p = gvn().transform(new CastP2XNode(nullptr, p));
2585       p = ConvX2UL(p);
2586     }
2587     // The load node has the control of the preceding MemBarCPUOrder.  All
2588     // following nodes will have the control of the MemBarCPUOrder inserted at
2589     // the end of this method.  So, pushing the load onto the stack at a later
2590     // point is fine.
2591     set_result(p);
2592   } else {
2593     if (bt == T_ADDRESS) {
2594       // Repackage the long as a pointer.
2595       val = ConvL2X(val);
2596       val = gvn().transform(new CastX2PNode(val));
2597     }
2598     if (is_flat) {
2599       if (adr_type->isa_instptr() && !mismatched) {
2600         ciInstanceKlass* holder = adr_type->is_instptr()->instance_klass();
2601         int offset = adr_type->is_instptr()->offset();
2602         val->as_InlineType()->store_flat(this, base, base, holder, offset, decorators);
2603       } else {
2604         val->as_InlineType()->store_flat(this, base, adr, nullptr, 0, decorators);
2605       }
2606     } else {
2607       access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators);
2608     }
2609   }
2610 
2611   if (argument(1)->is_InlineType() && is_store) {
2612     InlineTypeNode* value = InlineTypeNode::make_from_oop(this, base, _gvn.type(argument(1))->inline_klass());
2613     value = value->make_larval(this, false);
2614     replace_in_map(argument(1), value);
2615   }
2616 
2617   return true;
2618 }
2619 
2620 bool LibraryCallKit::inline_unsafe_make_private_buffer() {
2621   Node* receiver = argument(0);
2622   Node* value = argument(1);
2623   if (!value->is_InlineType()) {
2624     return false;
2625   }
2626 
2627   receiver = null_check(receiver);
2628   if (stopped()) {
2629     return true;
2630   }
2631 
2632   set_result(value->as_InlineType()->make_larval(this, true));
2633   return true;
2634 }
2635 
2636 bool LibraryCallKit::inline_unsafe_finish_private_buffer() {
2637   Node* receiver = argument(0);
2638   Node* buffer = argument(1);
2639   if (!buffer->is_InlineType()) {
2640     return false;
2641   }
2642   InlineTypeNode* vt = buffer->as_InlineType();
2643   if (!vt->is_allocated(&_gvn)) {
2644     return false;
2645   }
2646   // TODO 8239003 Why is this needed?
2647   if (AllocateNode::Ideal_allocation(vt->get_oop()) == nullptr) {
2648     return false;
2649   }
2650 
2651   receiver = null_check(receiver);
2652   if (stopped()) {
2653     return true;
2654   }
2655 
2656   set_result(vt->finish_larval(this));
2657   return true;
2658 }
2659 
2660 //----------------------------inline_unsafe_load_store----------------------------
2661 // This method serves a couple of different customers (depending on LoadStoreKind):
2662 //
2663 // LS_cmp_swap:
2664 //
2665 //   boolean compareAndSetReference(Object o, long offset, Object expected, Object x);
2666 //   boolean compareAndSetInt(   Object o, long offset, int    expected, int    x);
2667 //   boolean compareAndSetLong(  Object o, long offset, long   expected, long   x);
2668 //
2669 // LS_cmp_swap_weak:
2670 //
2671 //   boolean weakCompareAndSetReference(       Object o, long offset, Object expected, Object x);
2672 //   boolean weakCompareAndSetReferencePlain(  Object o, long offset, Object expected, Object x);
2673 //   boolean weakCompareAndSetReferenceAcquire(Object o, long offset, Object expected, Object x);
2674 //   boolean weakCompareAndSetReferenceRelease(Object o, long offset, Object expected, Object x);
2675 //
2676 //   boolean weakCompareAndSetInt(          Object o, long offset, int    expected, int    x);

2845     }
2846     case LS_cmp_swap:
2847     case LS_cmp_swap_weak:
2848     case LS_get_add:
2849       break;
2850     default:
2851       ShouldNotReachHere();
2852   }
2853 
2854   // Null check receiver.
2855   receiver = null_check(receiver);
2856   if (stopped()) {
2857     return true;
2858   }
2859 
2860   int alias_idx = C->get_alias_index(adr_type);
2861 
2862   if (is_reference_type(type)) {
2863     decorators |= IN_HEAP | ON_UNKNOWN_OOP_REF;
2864 
2865     if (oldval != nullptr && oldval->is_InlineType()) {
2866       // Re-execute the unsafe access if allocation triggers deoptimization.
2867       PreserveReexecuteState preexecs(this);
2868       jvms()->set_should_reexecute(true);
2869       oldval = oldval->as_InlineType()->buffer(this)->get_oop();
2870     }
2871     if (newval != nullptr && newval->is_InlineType()) {
2872       // Re-execute the unsafe access if allocation triggers deoptimization.
2873       PreserveReexecuteState preexecs(this);
2874       jvms()->set_should_reexecute(true);
2875       newval = newval->as_InlineType()->buffer(this)->get_oop();
2876     }
2877 
2878     // Transformation of a value which could be null pointer (CastPP #null)
2879     // could be delayed during Parse (for example, in adjust_map_after_if()).
2880     // Execute transformation here to avoid barrier generation in such case.
2881     if (_gvn.type(newval) == TypePtr::NULL_PTR)
2882       newval = _gvn.makecon(TypePtr::NULL_PTR);
2883 
2884     if (oldval != nullptr && _gvn.type(oldval) == TypePtr::NULL_PTR) {
2885       // Refine the value to a null constant, when it is known to be null
2886       oldval = _gvn.makecon(TypePtr::NULL_PTR);
2887     }
2888   }
2889 
2890   Node* result = nullptr;
2891   switch (kind) {
2892     case LS_cmp_exchange: {
2893       result = access_atomic_cmpxchg_val_at(base, adr, adr_type, alias_idx,
2894                                             oldval, newval, value_type, type, decorators);
2895       break;
2896     }
2897     case LS_cmp_swap_weak:

3044                     Deoptimization::Action_make_not_entrant);
3045     }
3046     if (stopped()) {
3047       return true;
3048     }
3049 #endif //INCLUDE_JVMTI
3050 
3051   Node* test = nullptr;
3052   if (LibraryCallKit::klass_needs_init_guard(kls)) {
3053     // Note:  The argument might still be an illegal value like
3054     // Serializable.class or Object[].class.   The runtime will handle it.
3055     // But we must make an explicit check for initialization.
3056     Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset()));
3057     // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
3058     // can generate code to load it as unsigned byte.
3059     Node* inst = make_load(nullptr, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered);
3060     Node* bits = intcon(InstanceKlass::fully_initialized);
3061     test = _gvn.transform(new SubINode(inst, bits));
3062     // The 'test' is non-zero if we need to take a slow path.
3063   }
3064   Node* obj = nullptr;
3065   const TypeInstKlassPtr* tkls = _gvn.type(kls)->isa_instklassptr();
3066   if (tkls != nullptr && tkls->instance_klass()->is_inlinetype()) {
3067     obj = InlineTypeNode::make_default(_gvn, tkls->instance_klass()->as_inline_klass())->buffer(this);
3068   } else {
3069     obj = new_instance(kls, test);
3070   }
3071   set_result(obj);
3072   return true;
3073 }
3074 
3075 //------------------------inline_native_time_funcs--------------
3076 // inline code for System.currentTimeMillis() and System.nanoTime()
3077 // these have the same type and signature
3078 bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
3079   const TypeFunc* tf = OptoRuntime::void_long_Type();
3080   const TypePtr* no_memory_effects = nullptr;
3081   Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
3082   Node* value = _gvn.transform(new ProjNode(time, TypeFunc::Parms+0));
3083 #ifdef ASSERT
3084   Node* value_top = _gvn.transform(new ProjNode(time, TypeFunc::Parms+1));
3085   assert(value_top == top(), "second value must be top");
3086 #endif
3087   set_result(value);
3088   return true;
3089 }
3090 

3805 
3806 //------------------------inline_native_setVthread------------------
3807 bool LibraryCallKit::inline_native_setCurrentThread() {
3808   assert(C->method()->changes_current_thread(),
3809          "method changes current Thread but is not annotated ChangesCurrentThread");
3810   Node* arr = argument(1);
3811   Node* thread = _gvn.transform(new ThreadLocalNode());
3812   Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::vthread_offset()));
3813   Node* thread_obj_handle
3814     = make_load(nullptr, p, p->bottom_type()->is_ptr(), T_OBJECT, MemNode::unordered);
3815   thread_obj_handle = _gvn.transform(thread_obj_handle);
3816   const TypePtr *adr_type = _gvn.type(thread_obj_handle)->isa_ptr();
3817   access_store_at(nullptr, thread_obj_handle, adr_type, arr, _gvn.type(arr), T_OBJECT, IN_NATIVE | MO_UNORDERED);
3818   JFR_ONLY(extend_setCurrentThread(thread, arr);)
3819   return true;
3820 }
3821 
3822 const Type* LibraryCallKit::scopedValueCache_type() {
3823   ciKlass* objects_klass = ciObjArrayKlass::make(env()->Object_klass());
3824   const TypeOopPtr* etype = TypeOopPtr::make_from_klass(env()->Object_klass());
3825   const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS, /* stable= */ false, /* flat= */ false, /* not_flat= */ true, /* not_null_free= */ true);
3826 
3827   // Because we create the scopedValue cache lazily we have to make the
3828   // type of the result BotPTR.
3829   bool xk = etype->klass_is_exact();
3830   const Type* objects_type = TypeAryPtr::make(TypePtr::BotPTR, arr0, objects_klass, xk, TypeAryPtr::Offset(0));
3831   return objects_type;
3832 }
3833 
3834 Node* LibraryCallKit::scopedValueCache_helper() {
3835   Node* thread = _gvn.transform(new ThreadLocalNode());
3836   Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::scopedValueCache_offset()));
3837   // We cannot use immutable_memory() because we might flip onto a
3838   // different carrier thread, at which point we'll need to use that
3839   // carrier thread's cache.
3840   // return _gvn.transform(LoadNode::make(_gvn, nullptr, immutable_memory(), p, p->bottom_type()->is_ptr(),
3841   //       TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered));
3842   return make_load(nullptr, p, p->bottom_type()->is_ptr(), T_ADDRESS, MemNode::unordered);
3843 }
3844 
3845 //------------------------inline_native_scopedValueCache------------------
3846 bool LibraryCallKit::inline_native_scopedValueCache() {
3847   Node* cache_obj_handle = scopedValueCache_helper();
3848   const Type* objects_type = scopedValueCache_type();
3849   set_result(access_load(cache_obj_handle, objects_type, T_OBJECT, IN_NATIVE));
3850 
3851   return true;
3852 }
3853 
3854 //------------------------inline_native_setScopedValueCache------------------
3855 bool LibraryCallKit::inline_native_setScopedValueCache() {
3856   Node* arr = argument(0);
3857   Node* cache_obj_handle = scopedValueCache_helper();
3858   const Type* objects_type = scopedValueCache_type();
3859 
3860   const TypePtr *adr_type = _gvn.type(cache_obj_handle)->isa_ptr();
3861   access_store_at(nullptr, cache_obj_handle, adr_type, arr, objects_type, T_OBJECT, IN_NATIVE | MO_UNORDERED);
3862 
3863   return true;
3864 }
3865 









3866 //-----------------------load_klass_from_mirror_common-------------------------
3867 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
3868 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
3869 // and branch to the given path on the region.
3870 // If never_see_null, take an uncommon trap on null, so we can optimistically
3871 // compile for the non-null case.
3872 // If the region is null, force never_see_null = true.
3873 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
3874                                                     bool never_see_null,
3875                                                     RegionNode* region,
3876                                                     int null_path,
3877                                                     int offset) {
3878   if (region == nullptr)  never_see_null = true;
3879   Node* p = basic_plus_adr(mirror, offset);
3880   const TypeKlassPtr*  kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
3881   Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
3882   Node* null_ctl = top();
3883   kls = null_check_oop(kls, &null_ctl, never_see_null);
3884   if (region != nullptr) {
3885     // Set region->in(null_path) if the mirror is a primitive (e.g, int.class).

3888     assert(null_ctl == top(), "no loose ends");
3889   }
3890   return kls;
3891 }
3892 
3893 //--------------------(inline_native_Class_query helpers)---------------------
3894 // Use this for JVM_ACC_INTERFACE, JVM_ACC_IS_CLONEABLE_FAST, JVM_ACC_HAS_FINALIZER.
3895 // Fall through if (mods & mask) == bits, take the guard otherwise.
3896 Node* LibraryCallKit::generate_access_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
3897   // Branch around if the given klass has the given modifier bit set.
3898   // Like generate_guard, adds a new path onto the region.
3899   Node* modp = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3900   Node* mods = make_load(nullptr, modp, TypeInt::INT, T_INT, MemNode::unordered);
3901   Node* mask = intcon(modifier_mask);
3902   Node* bits = intcon(modifier_bits);
3903   Node* mbit = _gvn.transform(new AndINode(mods, mask));
3904   Node* cmp  = _gvn.transform(new CmpINode(mbit, bits));
3905   Node* bol  = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
3906   return generate_fair_guard(bol, region);
3907 }
3908 
3909 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
3910   return generate_access_flags_guard(kls, JVM_ACC_INTERFACE, 0, region);
3911 }
3912 Node* LibraryCallKit::generate_hidden_class_guard(Node* kls, RegionNode* region) {
3913   return generate_access_flags_guard(kls, JVM_ACC_IS_HIDDEN_CLASS, 0, region);
3914 }
3915 
3916 //-------------------------inline_native_Class_query-------------------
3917 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
3918   const Type* return_type = TypeInt::BOOL;
3919   Node* prim_return_value = top();  // what happens if it's a primitive class?
3920   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3921   bool expect_prim = false;     // most of these guys expect to work on refs
3922 
3923   enum { _normal_path = 1, _prim_path = 2, PATH_LIMIT };
3924 
3925   Node* mirror = argument(0);
3926   Node* obj    = top();
3927 
3928   switch (id) {

4082 
4083   case vmIntrinsics::_getClassAccessFlags:
4084     p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
4085     query_value = make_load(nullptr, p, TypeInt::INT, T_INT, MemNode::unordered);
4086     break;
4087 
4088   default:
4089     fatal_unexpected_iid(id);
4090     break;
4091   }
4092 
4093   // Fall-through is the normal case of a query to a real class.
4094   phi->init_req(1, query_value);
4095   region->init_req(1, control());
4096 
4097   C->set_has_split_ifs(true); // Has chance for split-if optimization
4098   set_result(region, phi);
4099   return true;
4100 }
4101 
4102 //-------------------------inline_primitive_Class_conversion-------------------
4103 //               Class<T> java.lang.Class                  .asPrimaryType()
4104 // public static Class<T> jdk.internal.value.PrimitiveClass.asPrimaryType(Class<T>)
4105 //               Class<T> java.lang.Class                  .asValueType()
4106 // public static Class<T> jdk.internal.value.PrimitiveClass.asValueType(Class<T>)
4107 bool LibraryCallKit::inline_primitive_Class_conversion(vmIntrinsics::ID id) {
4108   Node* mirror = argument(0); // Receiver/argument Class
4109   const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
4110   if (mirror_con == nullptr) {
4111     return false;
4112   }
4113 
4114   bool is_val_mirror = true;
4115   ciType* tm = mirror_con->java_mirror_type(&is_val_mirror);
4116   if (tm != nullptr) {
4117     Node* result = mirror;
4118     if ((id == vmIntrinsics::_asPrimaryType || id == vmIntrinsics::_asPrimaryTypeArg) && is_val_mirror) {
4119       result = _gvn.makecon(TypeInstPtr::make(tm->as_inline_klass()->ref_mirror()));
4120     } else if (id == vmIntrinsics::_asValueType || id == vmIntrinsics::_asValueTypeArg) {
4121       if (!tm->is_inlinetype()) {
4122         return false; // Throw UnsupportedOperationException
4123       } else if (!is_val_mirror) {
4124         result = _gvn.makecon(TypeInstPtr::make(tm->as_inline_klass()->val_mirror()));
4125       }
4126     }
4127     set_result(result);
4128     return true;
4129   }
4130   return false;
4131 }
4132 
4133 //-------------------------inline_Class_cast-------------------
4134 bool LibraryCallKit::inline_Class_cast() {
4135   Node* mirror = argument(0); // Class
4136   Node* obj    = argument(1);
4137   const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
4138   if (mirror_con == nullptr) {
4139     return false;  // dead path (mirror->is_top()).
4140   }
4141   if (obj == nullptr || obj->is_top()) {
4142     return false;  // dead path
4143   }
4144   const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr();
4145 
4146   // First, see if Class.cast() can be folded statically.
4147   // java_mirror_type() returns non-null for compile-time Class constants.
4148   bool requires_null_check = false;
4149   ciType* tm = mirror_con->java_mirror_type(&requires_null_check);
4150   if (tm != nullptr && tm->is_klass() &&
4151       tp != nullptr) {
4152     if (!tp->is_loaded()) {
4153       // Don't use intrinsic when class is not loaded.
4154       return false;
4155     } else {
4156       int static_res = C->static_subtype_check(TypeKlassPtr::make(tm->as_klass(), Type::trust_interfaces), tp->as_klass_type());
4157       if (static_res == Compile::SSC_always_true) {
4158         // isInstance() is true - fold the code.
4159         if (requires_null_check) {
4160           obj = null_check(obj);
4161         }
4162         set_result(obj);
4163         return true;
4164       } else if (static_res == Compile::SSC_always_false) {
4165         // Don't use intrinsic, have to throw ClassCastException.
4166         // If the reference is null, the non-intrinsic bytecode will
4167         // be optimized appropriately.
4168         return false;
4169       }
4170     }
4171   }
4172 
4173   // Bailout intrinsic and do normal inlining if exception path is frequent.
4174   if (too_many_traps(Deoptimization::Reason_intrinsic)) {
4175     return false;
4176   }
4177 
4178   // Generate dynamic checks.
4179   // Class.cast() is java implementation of _checkcast bytecode.
4180   // Do checkcast (Parse::do_checkcast()) optimizations here.
4181 
4182   if (requires_null_check) {
4183     obj = null_check(obj);
4184   }
4185   mirror = null_check(mirror);
4186   // If mirror is dead, only null-path is taken.
4187   if (stopped()) {
4188     return true;
4189   }
4190 
4191   // Not-subtype or the mirror's klass ptr is nullptr (in case it is a primitive).
4192   enum { _bad_type_path = 1, _prim_path = 2, _npe_path = 3, PATH_LIMIT };
4193   RegionNode* region = new RegionNode(PATH_LIMIT);
4194   record_for_igvn(region);
4195 
4196   // Now load the mirror's klass metaobject, and null-check it.
4197   // If kls is null, we have a primitive mirror and
4198   // nothing is an instance of a primitive type.
4199   Node* kls = load_klass_from_mirror(mirror, false, region, _prim_path);
4200 
4201   Node* res = top();
4202   Node* io = i_o();
4203   Node* mem = merged_memory();
4204   if (!stopped()) {
4205     if (EnableValhalla && !requires_null_check) {
4206       // Check if we are casting to QMyValue
4207       Node* ctrl_val_mirror = generate_fair_guard(is_val_mirror(mirror), nullptr);
4208       if (ctrl_val_mirror != nullptr) {
4209         RegionNode* r = new RegionNode(3);
4210         record_for_igvn(r);
4211         r->init_req(1, control());
4212 
4213         // Casting to QMyValue, check for null
4214         set_control(ctrl_val_mirror);
4215         { // PreserveJVMState because null check replaces obj in map
4216           PreserveJVMState pjvms(this);
4217           Node* null_ctr = top();
4218           null_check_oop(obj, &null_ctr);
4219           region->init_req(_npe_path, null_ctr);
4220           r->init_req(2, control());
4221         }
4222         set_control(_gvn.transform(r));
4223       }
4224     }
4225 
4226     Node* bad_type_ctrl = top();
4227     // Do checkcast optimizations.
4228     res = gen_checkcast(obj, kls, &bad_type_ctrl);
4229     region->init_req(_bad_type_path, bad_type_ctrl);
4230   }
4231   if (region->in(_prim_path) != top() ||
4232       region->in(_bad_type_path) != top() ||
4233       region->in(_npe_path) != top()) {
4234     // Let Interpreter throw ClassCastException.
4235     PreserveJVMState pjvms(this);
4236     set_control(_gvn.transform(region));
4237     // Set IO and memory because gen_checkcast may override them when buffering inline types
4238     set_i_o(io);
4239     set_all_memory(mem);
4240     uncommon_trap(Deoptimization::Reason_intrinsic,
4241                   Deoptimization::Action_maybe_recompile);
4242   }
4243   if (!stopped()) {
4244     set_result(res);
4245   }
4246   return true;
4247 }
4248 
4249 
4250 //--------------------------inline_native_subtype_check------------------------
4251 // This intrinsic takes the JNI calls out of the heart of
4252 // UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
4253 bool LibraryCallKit::inline_native_subtype_check() {
4254   // Pull both arguments off the stack.
4255   Node* args[2];                // two java.lang.Class mirrors: superc, subc
4256   args[0] = argument(0);
4257   args[1] = argument(1);
4258   Node* klasses[2];             // corresponding Klasses: superk, subk
4259   klasses[0] = klasses[1] = top();
4260 
4261   enum {
4262     // A full decision tree on {superc is prim, subc is prim}:
4263     _prim_0_path = 1,           // {P,N} => false
4264                                 // {P,P} & superc!=subc => false
4265     _prim_same_path,            // {P,P} & superc==subc => true
4266     _prim_1_path,               // {N,P} => false
4267     _ref_subtype_path,          // {N,N} & subtype check wins => true
4268     _both_ref_path,             // {N,N} & subtype check loses => false
4269     PATH_LIMIT
4270   };
4271 
4272   RegionNode* region = new RegionNode(PATH_LIMIT);
4273   RegionNode* prim_region = new RegionNode(2);
4274   Node*       phi    = new PhiNode(region, TypeInt::BOOL);
4275   record_for_igvn(region);
4276   record_for_igvn(prim_region);
4277 
4278   const TypePtr* adr_type = TypeRawPtr::BOTTOM;   // memory type of loads
4279   const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
4280   int class_klass_offset = java_lang_Class::klass_offset();
4281 
4282   // First null-check both mirrors and load each mirror's klass metaobject.
4283   int which_arg;
4284   for (which_arg = 0; which_arg <= 1; which_arg++) {
4285     Node* arg = args[which_arg];
4286     arg = null_check(arg);
4287     if (stopped())  break;
4288     args[which_arg] = arg;
4289 
4290     Node* p = basic_plus_adr(arg, class_klass_offset);
4291     Node* kls = LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, adr_type, kls_type);
4292     klasses[which_arg] = _gvn.transform(kls);
4293   }
4294 
4295   // Having loaded both klasses, test each for null.
4296   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
4297   for (which_arg = 0; which_arg <= 1; which_arg++) {
4298     Node* kls = klasses[which_arg];
4299     Node* null_ctl = top();
4300     kls = null_check_oop(kls, &null_ctl, never_see_null);
4301     if (which_arg == 0) {
4302       prim_region->init_req(1, null_ctl);
4303     } else {
4304       region->init_req(_prim_1_path, null_ctl);
4305     }
4306     if (stopped())  break;
4307     klasses[which_arg] = kls;
4308   }
4309 
4310   if (!stopped()) {
4311     // now we have two reference types, in klasses[0..1]
4312     Node* subk   = klasses[1];  // the argument to isAssignableFrom
4313     Node* superk = klasses[0];  // the receiver
4314     region->set_req(_both_ref_path, gen_subtype_check(subk, superk));
4315     // If superc is an inline mirror, we also need to check if superc == subc because LMyValue
4316     // is not a subtype of QMyValue but due to subk == superk the subtype check will pass.
4317     generate_fair_guard(is_val_mirror(args[0]), prim_region);
4318     // now we have a successful reference subtype check
4319     region->set_req(_ref_subtype_path, control());
4320   }
4321 
4322   // If both operands are primitive (both klasses null), then
4323   // we must return true when they are identical primitives.
4324   // It is convenient to test this after the first null klass check.
4325   // This path is also used if superc is a value mirror.
4326   set_control(_gvn.transform(prim_region));
4327   if (!stopped()) {
4328     // Since superc is primitive, make a guard for the superc==subc case.
4329     Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1]));
4330     Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq));
4331     generate_fair_guard(bol_eq, region);
4332     if (region->req() == PATH_LIMIT+1) {
4333       // A guard was added.  If the added guard is taken, superc==subc.
4334       region->swap_edges(PATH_LIMIT, _prim_same_path);
4335       region->del_req(PATH_LIMIT);
4336     }
4337     region->set_req(_prim_0_path, control()); // Not equal after all.
4338   }
4339 
4340   // these are the only paths that produce 'true':
4341   phi->set_req(_prim_same_path,   intcon(1));
4342   phi->set_req(_ref_subtype_path, intcon(1));
4343 
4344   // pull together the cases:
4345   assert(region->req() == PATH_LIMIT, "sane region");
4346   for (uint i = 1; i < region->req(); i++) {
4347     Node* ctl = region->in(i);
4348     if (ctl == nullptr || ctl == top()) {
4349       region->set_req(i, top());
4350       phi   ->set_req(i, top());
4351     } else if (phi->in(i) == nullptr) {
4352       phi->set_req(i, intcon(0)); // all other paths produce 'false'
4353     }
4354   }
4355 
4356   set_control(_gvn.transform(region));
4357   set_result(_gvn.transform(phi));
4358   return true;
4359 }
4360 
4361 //---------------------generate_array_guard_common------------------------
4362 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region, ArrayKind kind) {

4363 
4364   if (stopped()) {
4365     return nullptr;
4366   }
4367 









4368   // Like generate_guard, adds a new path onto the region.
4369   jint  layout_con = 0;
4370   Node* layout_val = get_layout_helper(kls, layout_con);
4371   if (layout_val == nullptr) {
4372     bool query = 0;
4373     switch(kind) {
4374       case ObjectArray:    query = Klass::layout_helper_is_objArray(layout_con); break;
4375       case NonObjectArray: query = !Klass::layout_helper_is_objArray(layout_con); break;
4376       case TypeArray:      query = Klass::layout_helper_is_typeArray(layout_con); break;
4377       case AnyArray:       query = Klass::layout_helper_is_array(layout_con); break;
4378       case NonArray:       query = !Klass::layout_helper_is_array(layout_con); break;
4379       default:
4380         ShouldNotReachHere();
4381     }
4382     if (!query) {
4383       return nullptr;                       // never a branch
4384     } else {                             // always a branch
4385       Node* always_branch = control();
4386       if (region != nullptr)
4387         region->add_req(always_branch);
4388       set_control(top());
4389       return always_branch;
4390     }
4391   }
4392   unsigned int value = 0;
4393   BoolTest::mask btest = BoolTest::illegal;
4394   switch(kind) {
4395     case ObjectArray:
4396     case NonObjectArray: {
4397       value = Klass::_lh_array_tag_obj_value;
4398       layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
4399       btest = (kind == ObjectArray) ? BoolTest::eq : BoolTest::ne;
4400       break;
4401     }
4402     case TypeArray: {
4403       value = Klass::_lh_array_tag_type_value;
4404       layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
4405       btest = BoolTest::eq;
4406       break;
4407     }
4408     case AnyArray:    value = Klass::_lh_neutral_value; btest = BoolTest::lt; break;
4409     case NonArray:    value = Klass::_lh_neutral_value; btest = BoolTest::gt; break;
4410     default:
4411       ShouldNotReachHere();
4412   }
4413   // Now test the correct condition.
4414   jint nval = (jint)value;



4415   Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval)));



4416   Node* bol = _gvn.transform(new BoolNode(cmp, btest));
4417   return generate_fair_guard(bol, region);
4418 }
4419 
4420 
4421 //-----------------------inline_native_newArray--------------------------
4422 // private static native Object java.lang.reflect.Array.newArray(Class<?> componentType, int length);
4423 // private        native Object Unsafe.allocateUninitializedArray0(Class<?> cls, int size);
4424 bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) {
4425   Node* mirror;
4426   Node* count_val;
4427   if (uninitialized) {
4428     null_check_receiver();
4429     mirror    = argument(1);
4430     count_val = argument(2);
4431   } else {
4432     mirror    = argument(0);
4433     count_val = argument(1);
4434   }
4435 
4436   mirror = null_check(mirror);
4437   // If mirror or obj is dead, only null-path is taken.
4438   if (stopped())  return true;
4439 
4440   enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
4441   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4442   PhiNode*    result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);

4548   // the bytecode that invokes Arrays.copyOf if deoptimization happens.
4549   { PreserveReexecuteState preexecs(this);
4550     jvms()->set_should_reexecute(true);
4551 
4552     array_type_mirror = null_check(array_type_mirror);
4553     original          = null_check(original);
4554 
4555     // Check if a null path was taken unconditionally.
4556     if (stopped())  return true;
4557 
4558     Node* orig_length = load_array_length(original);
4559 
4560     Node* klass_node = load_klass_from_mirror(array_type_mirror, false, nullptr, 0);
4561     klass_node = null_check(klass_node);
4562 
4563     RegionNode* bailout = new RegionNode(1);
4564     record_for_igvn(bailout);
4565 
4566     // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
4567     // Bail out if that is so.
4568     // Inline type array may have object field that would require a
4569     // write barrier. Conservatively, go to slow path.
4570     // TODO 8251971: Optimize for the case when flat src/dst are later found
4571     // to not contain oops (i.e., move this check to the macro expansion phase).
4572     BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
4573     const TypeAryPtr* orig_t = _gvn.type(original)->isa_aryptr();
4574     const TypeKlassPtr* tklass = _gvn.type(klass_node)->is_klassptr();
4575     bool exclude_flat = UseFlatArray && bs->array_copy_requires_gc_barriers(true, T_OBJECT, false, false, BarrierSetC2::Parsing) &&
4576                         // Can src array be flat and contain oops?
4577                         (orig_t == nullptr || (!orig_t->is_not_flat() && (!orig_t->is_flat() || orig_t->elem()->inline_klass()->contains_oops()))) &&
4578                         // Can dest array be flat and contain oops?
4579                         tklass->can_be_inline_array() && (!tklass->is_flat() || tklass->is_aryklassptr()->elem()->is_instklassptr()->instance_klass()->as_inline_klass()->contains_oops());
4580     Node* not_objArray = exclude_flat ? generate_non_objArray_guard(klass_node, bailout) : generate_typeArray_guard(klass_node, bailout);
4581     if (not_objArray != nullptr) {
4582       // Improve the klass node's type from the new optimistic assumption:
4583       ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
4584       const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, Type::Offset(0));
4585       Node* cast = new CastPPNode(klass_node, akls);
4586       cast->init_req(0, control());
4587       klass_node = _gvn.transform(cast);
4588     }
4589 
4590     // Bail out if either start or end is negative.
4591     generate_negative_guard(start, bailout, &start);
4592     generate_negative_guard(end,   bailout, &end);
4593 
4594     Node* length = end;
4595     if (_gvn.type(start) != TypeInt::ZERO) {
4596       length = _gvn.transform(new SubINode(end, start));
4597     }
4598 
4599     // Bail out if length is negative.
4600     // Without this the new_array would throw
4601     // NegativeArraySizeException but IllegalArgumentException is what
4602     // should be thrown
4603     generate_negative_guard(length, bailout, &length);
4604 
4605     // Handle inline type arrays
4606     bool can_validate = !too_many_traps(Deoptimization::Reason_class_check);
4607     if (!stopped()) {
4608       orig_t = _gvn.type(original)->isa_aryptr();
4609       if (orig_t != nullptr && orig_t->is_flat()) {
4610         // Src is flat, check that dest is flat as well
4611         if (exclude_flat) {
4612           // Dest can't be flat, bail out
4613           bailout->add_req(control());
4614           set_control(top());
4615         } else {
4616           generate_fair_guard(flat_array_test(klass_node, /* flat = */ false), bailout);
4617         }
4618       } else if (UseFlatArray && (orig_t == nullptr || !orig_t->is_not_flat()) &&
4619                  // If dest is flat, src must be flat as well (guaranteed by src <: dest check if validated).
4620                  ((!tklass->is_flat() && tklass->can_be_inline_array()) || !can_validate)) {
4621         // Src might be flat and dest might not be flat. Go to the slow path if src is flat.
4622         // TODO 8251971: Optimize for the case when src/dest are later found to be both flat.
4623         generate_fair_guard(flat_array_test(load_object_klass(original)), bailout);
4624         if (orig_t != nullptr) {
4625           orig_t = orig_t->cast_to_not_flat();
4626           original = _gvn.transform(new CheckCastPPNode(control(), original, orig_t));
4627         }
4628       }
4629       if (!can_validate) {
4630         // No validation. The subtype check emitted at macro expansion time will not go to the slow
4631         // path but call checkcast_arraycopy which can not handle flat/null-free inline type arrays.
4632         // TODO 8251971: Optimize for the case when src/dest are later found to be both flat/null-free.
4633         generate_fair_guard(null_free_array_test(klass_node), bailout);
4634       }
4635     }
4636 
4637     if (bailout->req() > 1) {
4638       PreserveJVMState pjvms(this);
4639       set_control(_gvn.transform(bailout));
4640       uncommon_trap(Deoptimization::Reason_intrinsic,
4641                     Deoptimization::Action_maybe_recompile);
4642     }
4643 
4644     if (!stopped()) {
4645       // How many elements will we copy from the original?
4646       // The answer is MinI(orig_length - start, length).
4647       Node* orig_tail = _gvn.transform(new SubINode(orig_length, start));
4648       Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
4649 
4650       // Generate a direct call to the right arraycopy function(s).
4651       // We know the copy is disjoint but we might not know if the
4652       // oop stores need checking.
4653       // Extreme case:  Arrays.copyOf((Integer[])x, 10, String[].class).
4654       // This will fail a store-check if x contains any non-nulls.
4655 
4656       // ArrayCopyNode:Ideal may transform the ArrayCopyNode to

4659       // to the copyOf to be validated, including that the copy to the
4660       // new array won't trigger an ArrayStoreException. That subtype
4661       // check can be optimized if we know something on the type of
4662       // the input array from type speculation.
4663       if (_gvn.type(klass_node)->singleton()) {
4664         const TypeKlassPtr* subk = _gvn.type(load_object_klass(original))->is_klassptr();
4665         const TypeKlassPtr* superk = _gvn.type(klass_node)->is_klassptr();
4666 
4667         int test = C->static_subtype_check(superk, subk);
4668         if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
4669           const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();
4670           if (t_original->speculative_type() != nullptr) {
4671             original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true);
4672           }
4673         }
4674       }
4675 
4676       bool validated = false;
4677       // Reason_class_check rather than Reason_intrinsic because we
4678       // want to intrinsify even if this traps.
4679       if (can_validate) {
4680         Node* not_subtype_ctrl = gen_subtype_check(original, klass_node);
4681 
4682         if (not_subtype_ctrl != top()) {
4683           PreserveJVMState pjvms(this);
4684           set_control(not_subtype_ctrl);
4685           uncommon_trap(Deoptimization::Reason_class_check,
4686                         Deoptimization::Action_make_not_entrant);
4687           assert(stopped(), "Should be stopped");
4688         }
4689         validated = true;
4690       }
4691 
4692       if (!stopped()) {
4693         newcopy = new_array(klass_node, length, 0);  // no arguments to push
4694 
4695         ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true, false,
4696                                                 load_object_klass(original), klass_node);
4697         if (!is_copyOfRange) {
4698           ac->set_copyof(validated);
4699         } else {

4745 
4746 //-----------------------generate_method_call----------------------------
4747 // Use generate_method_call to make a slow-call to the real
4748 // method if the fast path fails.  An alternative would be to
4749 // use a stub like OptoRuntime::slow_arraycopy_Java.
4750 // This only works for expanding the current library call,
4751 // not another intrinsic.  (E.g., don't use this for making an
4752 // arraycopy call inside of the copyOf intrinsic.)
4753 CallJavaNode*
4754 LibraryCallKit::generate_method_call(vmIntrinsicID method_id, bool is_virtual, bool is_static, bool res_not_null) {
4755   // When compiling the intrinsic method itself, do not use this technique.
4756   guarantee(callee() != C->method(), "cannot make slow-call to self");
4757 
4758   ciMethod* method = callee();
4759   // ensure the JVMS we have will be correct for this call
4760   guarantee(method_id == method->intrinsic_id(), "must match");
4761 
4762   const TypeFunc* tf = TypeFunc::make(method);
4763   if (res_not_null) {
4764     assert(tf->return_type() == T_OBJECT, "");
4765     const TypeTuple* range = tf->range_cc();
4766     const Type** fields = TypeTuple::fields(range->cnt());
4767     fields[TypeFunc::Parms] = range->field_at(TypeFunc::Parms)->filter_speculative(TypePtr::NOTNULL);
4768     const TypeTuple* new_range = TypeTuple::make(range->cnt(), fields);
4769     tf = TypeFunc::make(tf->domain_cc(), new_range);
4770   }
4771   CallJavaNode* slow_call;
4772   if (is_static) {
4773     assert(!is_virtual, "");
4774     slow_call = new CallStaticJavaNode(C, tf,
4775                            SharedRuntime::get_resolve_static_call_stub(), method);
4776   } else if (is_virtual) {
4777     assert(!gvn().type(argument(0))->maybe_null(), "should not be null");
4778     int vtable_index = Method::invalid_vtable_index;
4779     if (UseInlineCaches) {
4780       // Suppress the vtable call
4781     } else {
4782       // hashCode and clone are not a miranda methods,
4783       // so the vtable index is fixed.
4784       // No need to use the linkResolver to get it.
4785        vtable_index = method->vtable_index();
4786        assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
4787               "bad index %d", vtable_index);
4788     }
4789     slow_call = new CallDynamicJavaNode(tf,

4806   set_edges_for_java_call(slow_call);
4807   return slow_call;
4808 }
4809 
4810 
4811 /**
4812  * Build special case code for calls to hashCode on an object. This call may
4813  * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
4814  * slightly different code.
4815  */
4816 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
4817   assert(is_static == callee()->is_static(), "correct intrinsic selection");
4818   assert(!(is_virtual && is_static), "either virtual, special, or static");
4819 
4820   enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
4821 
4822   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4823   PhiNode*    result_val = new PhiNode(result_reg, TypeInt::INT);
4824   PhiNode*    result_io  = new PhiNode(result_reg, Type::ABIO);
4825   PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4826   Node* obj = argument(0);
4827 
4828   if (gvn().type(obj)->is_inlinetypeptr()) {
4829     return false;
4830   }
4831 
4832   if (!is_static) {
4833     // Check for hashing null object
4834     obj = null_check_receiver();
4835     if (stopped())  return true;        // unconditionally null
4836     result_reg->init_req(_null_path, top());
4837     result_val->init_req(_null_path, top());
4838   } else {
4839     // Do a null check, and return zero if null.
4840     // System.identityHashCode(null) == 0

4841     Node* null_ctl = top();
4842     obj = null_check_oop(obj, &null_ctl);
4843     result_reg->init_req(_null_path, null_ctl);
4844     result_val->init_req(_null_path, _gvn.intcon(0));
4845   }
4846 
4847   // Unconditionally null?  Then return right away.
4848   if (stopped()) {
4849     set_control( result_reg->in(_null_path));
4850     if (!stopped())
4851       set_result(result_val->in(_null_path));
4852     return true;
4853   }
4854 
4855   // We only go to the fast case code if we pass a number of guards.  The
4856   // paths which do not pass are accumulated in the slow_region.
4857   RegionNode* slow_region = new RegionNode(1);
4858   record_for_igvn(slow_region);
4859 
4860   // If this is a virtual call, we generate a funny guard.  We pull out
4861   // the vtable entry corresponding to hashCode() from the target object.
4862   // If the target method which we are calling happens to be the native
4863   // Object hashCode() method, we pass the guard.  We do not need this
4864   // guard for non-virtual calls -- the caller is known to be the native
4865   // Object hashCode().
4866   if (is_virtual) {
4867     // After null check, get the object's klass.
4868     Node* obj_klass = load_object_klass(obj);
4869     generate_virtual_guard(obj_klass, slow_region);
4870   }
4871 
4872   // Get the header out of the object, use LoadMarkNode when available
4873   Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
4874   // The control of the load must be null. Otherwise, the load can move before
4875   // the null check after castPP removal.
4876   Node* no_ctrl = nullptr;
4877   Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
4878 
4879   // Test the header to see if it is unlocked.
4880   // This also serves as guard against inline types
4881   Node *lock_mask      = _gvn.MakeConX(markWord::inline_type_mask_in_place);
4882   Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
4883   Node *unlocked_val   = _gvn.MakeConX(markWord::unlocked_value);
4884   Node *chk_unlocked   = _gvn.transform(new CmpXNode( lmasked_header, unlocked_val));
4885   Node *test_unlocked  = _gvn.transform(new BoolNode( chk_unlocked, BoolTest::ne));
4886 
4887   generate_slow_guard(test_unlocked, slow_region);
4888 
4889   // Get the hash value and check to see that it has been properly assigned.
4890   // We depend on hash_mask being at most 32 bits and avoid the use of
4891   // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
4892   // vm: see markWord.hpp.
4893   Node *hash_mask      = _gvn.intcon(markWord::hash_mask);
4894   Node *hash_shift     = _gvn.intcon(markWord::hash_shift);
4895   Node *hshifted_header= _gvn.transform(new URShiftXNode(header, hash_shift));
4896   // This hack lets the hash bits live anywhere in the mark object now, as long
4897   // as the shift drops the relevant bits into the low 32 bits.  Note that
4898   // Java spec says that HashCode is an int so there's no point in capturing
4899   // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build).
4900   hshifted_header      = ConvX2I(hshifted_header);
4901   Node *hash_val       = _gvn.transform(new AndINode(hshifted_header, hash_mask));

4927     // this->control() comes from set_results_for_java_call
4928     result_reg->init_req(_slow_path, control());
4929     result_val->init_req(_slow_path, slow_result);
4930     result_io  ->set_req(_slow_path, i_o());
4931     result_mem ->set_req(_slow_path, reset_memory());
4932   }
4933 
4934   // Return the combined state.
4935   set_i_o(        _gvn.transform(result_io)  );
4936   set_all_memory( _gvn.transform(result_mem));
4937 
4938   set_result(result_reg, result_val);
4939   return true;
4940 }
4941 
4942 //---------------------------inline_native_getClass----------------------------
4943 // public final native Class<?> java.lang.Object.getClass();
4944 //
4945 // Build special case code for calls to getClass on an object.
4946 bool LibraryCallKit::inline_native_getClass() {
4947   Node* obj = argument(0);
4948   if (obj->is_InlineType()) {
4949     const Type* t = _gvn.type(obj);
4950     if (t->maybe_null()) {
4951       null_check(obj);
4952     }
4953     set_result(makecon(TypeInstPtr::make(t->inline_klass()->java_mirror())));
4954     return true;
4955   }
4956   obj = null_check_receiver();
4957   if (stopped())  return true;
4958   set_result(load_mirror_from_klass(load_object_klass(obj)));
4959   return true;
4960 }
4961 
4962 //-----------------inline_native_Reflection_getCallerClass---------------------
4963 // public static native Class<?> sun.reflect.Reflection.getCallerClass();
4964 //
4965 // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
4966 //
4967 // NOTE: This code must perform the same logic as JVM_GetCallerClass
4968 // in that it must skip particular security frames and checks for
4969 // caller sensitive methods.
4970 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
4971 #ifndef PRODUCT
4972   if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4973     tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
4974   }
4975 #endif
4976 

5237     if (C->get_alias_index(src_type) == C->get_alias_index(dst_type)) {
5238       flags |= RC_NARROW_MEM; // narrow in memory
5239     }
5240   }
5241 
5242   // Call it.  Note that the length argument is not scaled.
5243   make_runtime_call(flags,
5244                     OptoRuntime::fast_arraycopy_Type(),
5245                     StubRoutines::unsafe_arraycopy(),
5246                     "unsafe_arraycopy",
5247                     dst_type,
5248                     src_addr, dst_addr, size XTOP);
5249 
5250   store_to_memory(control(), doing_unsafe_access_addr, intcon(0), doing_unsafe_access_bt, Compile::AliasIdxRaw, MemNode::unordered);
5251 
5252   return true;
5253 }
5254 
5255 #undef XTOP
5256 
5257 //----------------------inline_unsafe_isFlattenedArray-------------------
5258 // public native boolean Unsafe.isFlattenedArray(Class<?> arrayClass);
5259 // This intrinsic exploits assumptions made by the native implementation
5260 // (arrayClass is neither null nor primitive) to avoid unnecessary null checks.
5261 bool LibraryCallKit::inline_unsafe_isFlattenedArray() {
5262   Node* cls = argument(1);
5263   Node* p = basic_plus_adr(cls, java_lang_Class::klass_offset());
5264   Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p,
5265                                                  TypeRawPtr::BOTTOM, TypeInstKlassPtr::OBJECT));
5266   Node* result = flat_array_test(kls);
5267   set_result(result);
5268   return true;
5269 }
5270 
5271 //------------------------clone_coping-----------------------------------
5272 // Helper function for inline_native_clone.
5273 void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array) {
5274   assert(obj_size != nullptr, "");
5275   Node* raw_obj = alloc_obj->in(1);
5276   assert(alloc_obj->is_CheckCastPP() && raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
5277 
5278   AllocateNode* alloc = nullptr;
5279   if (ReduceBulkZeroing) {
5280     // We will be completely responsible for initializing this object -
5281     // mark Initialize node as complete.
5282     alloc = AllocateNode::Ideal_allocation(alloc_obj);
5283     // The object was just allocated - there should be no any stores!
5284     guarantee(alloc != nullptr && alloc->maybe_set_complete(&_gvn), "");
5285     // Mark as complete_with_arraycopy so that on AllocateNode
5286     // expansion, we know this AllocateNode is initialized by an array
5287     // copy and a StoreStore barrier exists after the array copy.
5288     alloc->initialization()->set_complete_with_arraycopy();
5289   }
5290 

5315 //  not cloneable or finalizer => slow path to out-of-line Object.clone
5316 //
5317 // The general case has two steps, allocation and copying.
5318 // Allocation has two cases, and uses GraphKit::new_instance or new_array.
5319 //
5320 // Copying also has two cases, oop arrays and everything else.
5321 // Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
5322 // Everything else uses the tight inline loop supplied by CopyArrayNode.
5323 //
5324 // These steps fold up nicely if and when the cloned object's klass
5325 // can be sharply typed as an object array, a type array, or an instance.
5326 //
5327 bool LibraryCallKit::inline_native_clone(bool is_virtual) {
5328   PhiNode* result_val;
5329 
5330   // Set the reexecute bit for the interpreter to reexecute
5331   // the bytecode that invokes Object.clone if deoptimization happens.
5332   { PreserveReexecuteState preexecs(this);
5333     jvms()->set_should_reexecute(true);
5334 
5335     Node* obj = argument(0);
5336     obj = null_check_receiver();
5337     if (stopped())  return true;
5338 
5339     const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
5340 
5341     // If we are going to clone an instance, we need its exact type to
5342     // know the number and types of fields to convert the clone to
5343     // loads/stores. Maybe a speculative type can help us.
5344     if (!obj_type->klass_is_exact() &&
5345         obj_type->speculative_type() != nullptr &&
5346         obj_type->speculative_type()->is_instance_klass() &&
5347         !obj_type->speculative_type()->is_inlinetype()) {
5348       ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass();
5349       if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem &&
5350           !spec_ik->has_injected_fields()) {
5351         if (!obj_type->isa_instptr() ||
5352             obj_type->is_instptr()->instance_klass()->has_subklass()) {
5353           obj = maybe_cast_profiled_obj(obj, obj_type->speculative_type(), false);
5354         }
5355       }
5356     }
5357 
5358     // Conservatively insert a memory barrier on all memory slices.
5359     // Do not let writes into the original float below the clone.
5360     insert_mem_bar(Op_MemBarCPUOrder);
5361 
5362     // paths into result_reg:
5363     enum {
5364       _slow_path = 1,     // out-of-line call to clone method (virtual or not)
5365       _objArray_path,     // plain array allocation, plus arrayof_oop_arraycopy
5366       _array_path,        // plain array allocation, plus arrayof_long_arraycopy
5367       _instance_path,     // plain instance allocation, plus arrayof_long_arraycopy
5368       PATH_LIMIT
5369     };
5370     RegionNode* result_reg = new RegionNode(PATH_LIMIT);
5371     result_val             = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
5372     PhiNode*    result_i_o = new PhiNode(result_reg, Type::ABIO);
5373     PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
5374     record_for_igvn(result_reg);
5375 
5376     Node* obj_klass = load_object_klass(obj);
5377     // We only go to the fast case code if we pass a number of guards.
5378     // The paths which do not pass are accumulated in the slow_region.
5379     RegionNode* slow_region = new RegionNode(1);
5380     record_for_igvn(slow_region);
5381 
5382     Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)nullptr);
5383     if (array_ctl != nullptr) {
5384       // It's an array.
5385       PreserveJVMState pjvms(this);
5386       set_control(array_ctl);



5387 
5388       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
5389       const TypeAryPtr* ary_ptr = obj_type->isa_aryptr();
5390       if (UseFlatArray && bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Expansion) &&
5391           obj_type->can_be_inline_array() &&
5392           (ary_ptr == nullptr || (!ary_ptr->is_not_flat() && (!ary_ptr->is_flat() || ary_ptr->elem()->inline_klass()->contains_oops())))) {
5393         // Flat inline type array may have object field that would require a
5394         // write barrier. Conservatively, go to slow path.
5395         generate_fair_guard(flat_array_test(obj_klass), slow_region);













5396       }







5397 
5398       if (!stopped()) {
5399         Node* obj_length = load_array_length(obj);
5400         Node* obj_size  = nullptr;
5401         Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size, /*deoptimize_on_exception=*/true);
5402 
5403         BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
5404         if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Parsing)) {
5405           // If it is an oop array, it requires very special treatment,
5406           // because gc barriers are required when accessing the array.
5407           Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)nullptr);
5408           if (is_obja != nullptr) {
5409             PreserveJVMState pjvms2(this);
5410             set_control(is_obja);
5411             // Generate a direct call to the right arraycopy function(s).
5412             // Clones are always tightly coupled.
5413             ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, true, false);
5414             ac->set_clone_oop_array();
5415             Node* n = _gvn.transform(ac);
5416             assert(n == ac, "cannot disappear");
5417             ac->connect_outputs(this, /*deoptimize_on_exception=*/true);
5418 
5419             result_reg->init_req(_objArray_path, control());
5420             result_val->init_req(_objArray_path, alloc_obj);
5421             result_i_o ->set_req(_objArray_path, i_o());
5422             result_mem ->set_req(_objArray_path, reset_memory());
5423           }
5424         }
5425         // Otherwise, there are no barriers to worry about.
5426         // (We can dispense with card marks if we know the allocation
5427         //  comes out of eden (TLAB)...  In fact, ReduceInitialCardMarks
5428         //  causes the non-eden paths to take compensating steps to
5429         //  simulate a fresh allocation, so that no further
5430         //  card marks are required in compiled code to initialize
5431         //  the object.)
5432 
5433         if (!stopped()) {
5434           copy_to_clone(obj, alloc_obj, obj_size, true);
5435 
5436           // Present the results of the copy.
5437           result_reg->init_req(_array_path, control());
5438           result_val->init_req(_array_path, alloc_obj);
5439           result_i_o ->set_req(_array_path, i_o());
5440           result_mem ->set_req(_array_path, reset_memory());
5441         }
5442       }
5443     }
5444 




5445     if (!stopped()) {
5446       // It's an instance (we did array above).  Make the slow-path tests.
5447       // If this is a virtual call, we generate a funny guard.  We grab
5448       // the vtable entry corresponding to clone() from the target object.
5449       // If the target method which we are calling happens to be the
5450       // Object clone() method, we pass the guard.  We do not need this
5451       // guard for non-virtual calls; the caller is known to be the native
5452       // Object clone().
5453       if (is_virtual) {
5454         generate_virtual_guard(obj_klass, slow_region);
5455       }
5456 
5457       // The object must be easily cloneable and must not have a finalizer.
5458       // Both of these conditions may be checked in a single test.
5459       // We could optimize the test further, but we don't care.
5460       generate_access_flags_guard(obj_klass,
5461                                   // Test both conditions:
5462                                   JVM_ACC_IS_CLONEABLE_FAST | JVM_ACC_HAS_FINALIZER,
5463                                   // Must be cloneable but not finalizer:
5464                                   JVM_ACC_IS_CLONEABLE_FAST,

5594 // deoptimize. This is possible because tightly_coupled_allocation()
5595 // guarantees there's no observer of the allocated array at this point
5596 // and the control flow is simple enough.
5597 void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms_before_guards,
5598                                                     int saved_reexecute_sp, uint new_idx) {
5599   if (saved_jvms_before_guards != nullptr && !stopped()) {
5600     replace_unrelated_uncommon_traps_with_alloc_state(alloc, saved_jvms_before_guards);
5601 
5602     assert(alloc != nullptr, "only with a tightly coupled allocation");
5603     // restore JVM state to the state at the arraycopy
5604     saved_jvms_before_guards->map()->set_control(map()->control());
5605     assert(saved_jvms_before_guards->map()->memory() == map()->memory(), "memory state changed?");
5606     assert(saved_jvms_before_guards->map()->i_o() == map()->i_o(), "IO state changed?");
5607     // If we've improved the types of some nodes (null check) while
5608     // emitting the guards, propagate them to the current state
5609     map()->replaced_nodes().apply(saved_jvms_before_guards->map(), new_idx);
5610     set_jvms(saved_jvms_before_guards);
5611     _reexecute_sp = saved_reexecute_sp;
5612 
5613     // Remove the allocation from above the guards
5614     CallProjections* callprojs = alloc->extract_projections(true);

5615     InitializeNode* init = alloc->initialization();
5616     Node* alloc_mem = alloc->in(TypeFunc::Memory);
5617     C->gvn_replace_by(callprojs->fallthrough_ioproj, alloc->in(TypeFunc::I_O));
5618     C->gvn_replace_by(init->proj_out(TypeFunc::Memory), alloc_mem);
5619 
5620     // The CastIINode created in GraphKit::new_array (in AllocateArrayNode::make_ideal_length) must stay below
5621     // the allocation (i.e. is only valid if the allocation succeeds):
5622     // 1) replace CastIINode with AllocateArrayNode's length here
5623     // 2) Create CastIINode again once allocation has moved (see below) at the end of this method
5624     //
5625     // Multiple identical CastIINodes might exist here. Each GraphKit::load_array_length() call will generate
5626     // new separate CastIINode (arraycopy guard checks or any array length use between array allocation and ararycopy)
5627     Node* init_control = init->proj_out(TypeFunc::Control);
5628     Node* alloc_length = alloc->Ideal_length();
5629 #ifdef ASSERT
5630     Node* prev_cast = nullptr;
5631 #endif
5632     for (uint i = 0; i < init_control->outcnt(); i++) {
5633       Node* init_out = init_control->raw_out(i);
5634       if (init_out->is_CastII() && init_out->in(TypeFunc::Control) == init_control && init_out->in(1) == alloc_length) {
5635 #ifdef ASSERT
5636         if (prev_cast == nullptr) {
5637           prev_cast = init_out;

5639           if (prev_cast->cmp(*init_out) == false) {
5640             prev_cast->dump();
5641             init_out->dump();
5642             assert(false, "not equal CastIINode");
5643           }
5644         }
5645 #endif
5646         C->gvn_replace_by(init_out, alloc_length);
5647       }
5648     }
5649     C->gvn_replace_by(init->proj_out(TypeFunc::Control), alloc->in(0));
5650 
5651     // move the allocation here (after the guards)
5652     _gvn.hash_delete(alloc);
5653     alloc->set_req(TypeFunc::Control, control());
5654     alloc->set_req(TypeFunc::I_O, i_o());
5655     Node *mem = reset_memory();
5656     set_all_memory(mem);
5657     alloc->set_req(TypeFunc::Memory, mem);
5658     set_control(init->proj_out_or_null(TypeFunc::Control));
5659     set_i_o(callprojs->fallthrough_ioproj);
5660 
5661     // Update memory as done in GraphKit::set_output_for_allocation()
5662     const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
5663     const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
5664     if (ary_type->isa_aryptr() && length_type != nullptr) {
5665       ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
5666     }
5667     const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot);
5668     int            elemidx  = C->get_alias_index(telemref);
5669     set_memory(init->proj_out_or_null(TypeFunc::Memory), Compile::AliasIdxRaw);
5670     set_memory(init->proj_out_or_null(TypeFunc::Memory), elemidx);
5671 
5672     Node* allocx = _gvn.transform(alloc);
5673     assert(allocx == alloc, "where has the allocation gone?");
5674     assert(dest->is_CheckCastPP(), "not an allocation result?");
5675 
5676     _gvn.hash_delete(dest);
5677     dest->set_req(0, control());
5678     Node* destx = _gvn.transform(dest);
5679     assert(destx == dest, "where has the allocation result gone?");

5840         top_src  = src_type->isa_aryptr();
5841         has_src = (top_src != nullptr && top_src->elem() != Type::BOTTOM);
5842         src_spec = true;
5843       }
5844       if (!has_dest) {
5845         dest = maybe_cast_profiled_obj(dest, dest_k, true);
5846         dest_type  = _gvn.type(dest);
5847         top_dest  = dest_type->isa_aryptr();
5848         has_dest = (top_dest != nullptr && top_dest->elem() != Type::BOTTOM);
5849         dest_spec = true;
5850       }
5851     }
5852   }
5853 
5854   if (has_src && has_dest && can_emit_guards) {
5855     BasicType src_elem = top_src->isa_aryptr()->elem()->array_element_basic_type();
5856     BasicType dest_elem = top_dest->isa_aryptr()->elem()->array_element_basic_type();
5857     if (is_reference_type(src_elem, true)) src_elem = T_OBJECT;
5858     if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT;
5859 
5860     if (src_elem == dest_elem && top_src->is_flat() == top_dest->is_flat() && src_elem == T_OBJECT) {
5861       // If both arrays are object arrays then having the exact types
5862       // for both will remove the need for a subtype check at runtime
5863       // before the call and may make it possible to pick a faster copy
5864       // routine (without a subtype check on every element)
5865       // Do we have the exact type of src?
5866       bool could_have_src = src_spec;
5867       // Do we have the exact type of dest?
5868       bool could_have_dest = dest_spec;
5869       ciKlass* src_k = nullptr;
5870       ciKlass* dest_k = nullptr;
5871       if (!src_spec) {
5872         src_k = src_type->speculative_type_not_null();
5873         if (src_k != nullptr && src_k->is_array_klass()) {
5874           could_have_src = true;
5875         }
5876       }
5877       if (!dest_spec) {
5878         dest_k = dest_type->speculative_type_not_null();
5879         if (dest_k != nullptr && dest_k->is_array_klass()) {
5880           could_have_dest = true;
5881         }
5882       }
5883       if (could_have_src && could_have_dest) {
5884         // If we can have both exact types, emit the missing guards
5885         if (could_have_src && !src_spec) {
5886           src = maybe_cast_profiled_obj(src, src_k, true);
5887           src_type = _gvn.type(src);
5888           top_src = src_type->isa_aryptr();
5889         }
5890         if (could_have_dest && !dest_spec) {
5891           dest = maybe_cast_profiled_obj(dest, dest_k, true);
5892           dest_type = _gvn.type(dest);
5893           top_dest = dest_type->isa_aryptr();
5894         }
5895       }
5896     }
5897   }
5898 
5899   ciMethod* trap_method = method();
5900   int trap_bci = bci();
5901   if (saved_jvms_before_guards != nullptr) {
5902     trap_method = alloc->jvms()->method();
5903     trap_bci = alloc->jvms()->bci();
5904   }
5905 
5906   bool negative_length_guard_generated = false;
5907 
5908   if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
5909       can_emit_guards && !src->is_top() && !dest->is_top()) {

5910     // validate arguments: enables transformation the ArrayCopyNode
5911     validated = true;
5912 
5913     RegionNode* slow_region = new RegionNode(1);
5914     record_for_igvn(slow_region);
5915 
5916     // (1) src and dest are arrays.
5917     generate_non_array_guard(load_object_klass(src), slow_region);
5918     generate_non_array_guard(load_object_klass(dest), slow_region);
5919 
5920     // (2) src and dest arrays must have elements of the same BasicType
5921     // done at macro expansion or at Ideal transformation time
5922 
5923     // (4) src_offset must not be negative.
5924     generate_negative_guard(src_offset, slow_region);
5925 
5926     // (5) dest_offset must not be negative.
5927     generate_negative_guard(dest_offset, slow_region);
5928 
5929     // (7) src_offset + length must not exceed length of src.

5932                          slow_region);
5933 
5934     // (8) dest_offset + length must not exceed length of dest.
5935     generate_limit_guard(dest_offset, length,
5936                          load_array_length(dest),
5937                          slow_region);
5938 
5939     // (6) length must not be negative.
5940     // This is also checked in generate_arraycopy() during macro expansion, but
5941     // we also have to check it here for the case where the ArrayCopyNode will
5942     // be eliminated by Escape Analysis.
5943     if (EliminateAllocations) {
5944       generate_negative_guard(length, slow_region);
5945       negative_length_guard_generated = true;
5946     }
5947 
5948     // (9) each element of an oop array must be assignable
5949     Node* dest_klass = load_object_klass(dest);
5950     if (src != dest) {
5951       Node* not_subtype_ctrl = gen_subtype_check(src, dest_klass);
5952       slow_region->add_req(not_subtype_ctrl);
5953     }
5954 
5955     const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr();
5956     const Type* toop = dest_klass_t->cast_to_exactness(false)->as_instance_type();
5957     src = _gvn.transform(new CheckCastPPNode(control(), src, toop));
5958     src_type = _gvn.type(src);
5959     top_src  = src_type->isa_aryptr();
5960 
5961     // Handle flat inline type arrays (null-free arrays are handled by the subtype check above)
5962     if (!stopped() && UseFlatArray) {
5963       // If dest is flat, src must be flat as well (guaranteed by src <: dest check). Handle flat src here.
5964       assert(top_dest == nullptr || !top_dest->is_flat() || top_src->is_flat(), "src array must be flat");
5965       if (top_src != nullptr && top_src->is_flat()) {
5966         // Src is flat, check that dest is flat as well
5967         if (top_dest != nullptr && !top_dest->is_flat()) {
5968           generate_fair_guard(flat_array_test(dest_klass, /* flat = */ false), slow_region);
5969           // Since dest is flat and src <: dest, dest must have the same type as src.
5970           top_dest = top_src->cast_to_exactness(false);
5971           assert(top_dest->is_flat(), "dest must be flat");
5972           dest = _gvn.transform(new CheckCastPPNode(control(), dest, top_dest));
5973         }
5974       } else if (top_src == nullptr || !top_src->is_not_flat()) {
5975         // Src might be flat and dest might not be flat. Go to the slow path if src is flat.
5976         // TODO 8251971: Optimize for the case when src/dest are later found to be both flat.
5977         assert(top_dest == nullptr || !top_dest->is_flat(), "dest array must not be flat");
5978         generate_fair_guard(flat_array_test(src), slow_region);
5979         if (top_src != nullptr) {
5980           top_src = top_src->cast_to_not_flat();
5981           src = _gvn.transform(new CheckCastPPNode(control(), src, top_src));
5982         }
5983       }
5984     }
5985 
5986     {
5987       PreserveJVMState pjvms(this);
5988       set_control(_gvn.transform(slow_region));
5989       uncommon_trap(Deoptimization::Reason_intrinsic,
5990                     Deoptimization::Action_make_not_entrant);
5991       assert(stopped(), "Should be stopped");
5992     }




5993     arraycopy_move_allocation_here(alloc, dest, saved_jvms_before_guards, saved_reexecute_sp, new_idx);
5994   }
5995 
5996   if (stopped()) {
5997     return true;
5998   }
5999 
6000   ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != nullptr, negative_length_guard_generated,
6001                                           // Create LoadRange and LoadKlass nodes for use during macro expansion here
6002                                           // so the compiler has a chance to eliminate them: during macro expansion,
6003                                           // we have to set their control (CastPP nodes are eliminated).
6004                                           load_object_klass(src), load_object_klass(dest),
6005                                           load_array_length(src), load_array_length(dest));
6006 
6007   ac->set_arraycopy(validated);
6008 
6009   Node* n = _gvn.transform(ac);
6010   if (n == ac) {
6011     ac->connect_outputs(this);
6012   } else {
< prev index next >