< prev index next >

src/hotspot/share/opto/library_call.cpp

Print this page

   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "asm/macroAssembler.hpp"



  26 #include "ci/ciSymbols.hpp"
  27 #include "ci/ciUtilities.inline.hpp"
  28 #include "classfile/vmIntrinsics.hpp"
  29 #include "compiler/compileBroker.hpp"
  30 #include "compiler/compileLog.hpp"
  31 #include "gc/shared/barrierSet.hpp"

  32 #include "jfr/support/jfrIntrinsics.hpp"
  33 #include "memory/resourceArea.hpp"

  34 #include "oops/klass.inline.hpp"

  35 #include "oops/objArrayKlass.hpp"
  36 #include "opto/addnode.hpp"
  37 #include "opto/arraycopynode.hpp"
  38 #include "opto/c2compiler.hpp"
  39 #include "opto/castnode.hpp"
  40 #include "opto/cfgnode.hpp"
  41 #include "opto/convertnode.hpp"
  42 #include "opto/countbitsnode.hpp"

  43 #include "opto/idealKit.hpp"

  44 #include "opto/library_call.hpp"
  45 #include "opto/mathexactnode.hpp"
  46 #include "opto/mulnode.hpp"
  47 #include "opto/narrowptrnode.hpp"
  48 #include "opto/opaquenode.hpp"

  49 #include "opto/parse.hpp"
  50 #include "opto/rootnode.hpp"
  51 #include "opto/runtime.hpp"
  52 #include "opto/subnode.hpp"

  53 #include "opto/vectornode.hpp"
  54 #include "prims/jvmtiExport.hpp"
  55 #include "prims/jvmtiThreadState.hpp"
  56 #include "prims/unsafe.hpp"

  57 #include "runtime/jniHandles.inline.hpp"
  58 #include "runtime/mountUnmountDisabler.hpp"
  59 #include "runtime/objectMonitor.hpp"
  60 #include "runtime/sharedRuntime.hpp"
  61 #include "runtime/stubRoutines.hpp"

  62 #include "utilities/macros.hpp"
  63 #include "utilities/powerOfTwo.hpp"
  64 
  65 //---------------------------make_vm_intrinsic----------------------------
  66 CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
  67   vmIntrinsicID id = m->intrinsic_id();
  68   assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
  69 
  70   if (!m->is_loaded()) {
  71     // Do not attempt to inline unloaded methods.
  72     return nullptr;
  73   }
  74 
  75   C2Compiler* compiler = (C2Compiler*)CompileBroker::compiler(CompLevel_full_optimization);
  76   bool is_available = false;
  77 
  78   {
  79     // For calling is_intrinsic_supported and is_intrinsic_disabled_by_flag
  80     // the compiler must transition to '_thread_in_vm' state because both
  81     // methods access VM-internal data.

 392   case vmIntrinsics::_getReferenceOpaque:       return inline_unsafe_access(!is_store, T_OBJECT,   Opaque, false);
 393   case vmIntrinsics::_getBooleanOpaque:         return inline_unsafe_access(!is_store, T_BOOLEAN,  Opaque, false);
 394   case vmIntrinsics::_getByteOpaque:            return inline_unsafe_access(!is_store, T_BYTE,     Opaque, false);
 395   case vmIntrinsics::_getShortOpaque:           return inline_unsafe_access(!is_store, T_SHORT,    Opaque, false);
 396   case vmIntrinsics::_getCharOpaque:            return inline_unsafe_access(!is_store, T_CHAR,     Opaque, false);
 397   case vmIntrinsics::_getIntOpaque:             return inline_unsafe_access(!is_store, T_INT,      Opaque, false);
 398   case vmIntrinsics::_getLongOpaque:            return inline_unsafe_access(!is_store, T_LONG,     Opaque, false);
 399   case vmIntrinsics::_getFloatOpaque:           return inline_unsafe_access(!is_store, T_FLOAT,    Opaque, false);
 400   case vmIntrinsics::_getDoubleOpaque:          return inline_unsafe_access(!is_store, T_DOUBLE,   Opaque, false);
 401 
 402   case vmIntrinsics::_putReferenceOpaque:       return inline_unsafe_access( is_store, T_OBJECT,   Opaque, false);
 403   case vmIntrinsics::_putBooleanOpaque:         return inline_unsafe_access( is_store, T_BOOLEAN,  Opaque, false);
 404   case vmIntrinsics::_putByteOpaque:            return inline_unsafe_access( is_store, T_BYTE,     Opaque, false);
 405   case vmIntrinsics::_putShortOpaque:           return inline_unsafe_access( is_store, T_SHORT,    Opaque, false);
 406   case vmIntrinsics::_putCharOpaque:            return inline_unsafe_access( is_store, T_CHAR,     Opaque, false);
 407   case vmIntrinsics::_putIntOpaque:             return inline_unsafe_access( is_store, T_INT,      Opaque, false);
 408   case vmIntrinsics::_putLongOpaque:            return inline_unsafe_access( is_store, T_LONG,     Opaque, false);
 409   case vmIntrinsics::_putFloatOpaque:           return inline_unsafe_access( is_store, T_FLOAT,    Opaque, false);
 410   case vmIntrinsics::_putDoubleOpaque:          return inline_unsafe_access( is_store, T_DOUBLE,   Opaque, false);
 411 



 412   case vmIntrinsics::_compareAndSetReference:   return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap,      Volatile);
 413   case vmIntrinsics::_compareAndSetByte:        return inline_unsafe_load_store(T_BYTE,   LS_cmp_swap,      Volatile);
 414   case vmIntrinsics::_compareAndSetShort:       return inline_unsafe_load_store(T_SHORT,  LS_cmp_swap,      Volatile);
 415   case vmIntrinsics::_compareAndSetInt:         return inline_unsafe_load_store(T_INT,    LS_cmp_swap,      Volatile);
 416   case vmIntrinsics::_compareAndSetLong:        return inline_unsafe_load_store(T_LONG,   LS_cmp_swap,      Volatile);
 417 
 418   case vmIntrinsics::_weakCompareAndSetReferencePlain:     return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Relaxed);
 419   case vmIntrinsics::_weakCompareAndSetReferenceAcquire:   return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Acquire);
 420   case vmIntrinsics::_weakCompareAndSetReferenceRelease:   return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Release);
 421   case vmIntrinsics::_weakCompareAndSetReference:          return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Volatile);
 422   case vmIntrinsics::_weakCompareAndSetBytePlain:          return inline_unsafe_load_store(T_BYTE,   LS_cmp_swap_weak, Relaxed);
 423   case vmIntrinsics::_weakCompareAndSetByteAcquire:        return inline_unsafe_load_store(T_BYTE,   LS_cmp_swap_weak, Acquire);
 424   case vmIntrinsics::_weakCompareAndSetByteRelease:        return inline_unsafe_load_store(T_BYTE,   LS_cmp_swap_weak, Release);
 425   case vmIntrinsics::_weakCompareAndSetByte:               return inline_unsafe_load_store(T_BYTE,   LS_cmp_swap_weak, Volatile);
 426   case vmIntrinsics::_weakCompareAndSetShortPlain:         return inline_unsafe_load_store(T_SHORT,  LS_cmp_swap_weak, Relaxed);
 427   case vmIntrinsics::_weakCompareAndSetShortAcquire:       return inline_unsafe_load_store(T_SHORT,  LS_cmp_swap_weak, Acquire);
 428   case vmIntrinsics::_weakCompareAndSetShortRelease:       return inline_unsafe_load_store(T_SHORT,  LS_cmp_swap_weak, Release);
 429   case vmIntrinsics::_weakCompareAndSetShort:              return inline_unsafe_load_store(T_SHORT,  LS_cmp_swap_weak, Volatile);
 430   case vmIntrinsics::_weakCompareAndSetIntPlain:           return inline_unsafe_load_store(T_INT,    LS_cmp_swap_weak, Relaxed);
 431   case vmIntrinsics::_weakCompareAndSetIntAcquire:         return inline_unsafe_load_store(T_INT,    LS_cmp_swap_weak, Acquire);

 451   case vmIntrinsics::_compareAndExchangeLong:              return inline_unsafe_load_store(T_LONG,   LS_cmp_exchange,  Volatile);
 452   case vmIntrinsics::_compareAndExchangeLongAcquire:       return inline_unsafe_load_store(T_LONG,   LS_cmp_exchange,  Acquire);
 453   case vmIntrinsics::_compareAndExchangeLongRelease:       return inline_unsafe_load_store(T_LONG,   LS_cmp_exchange,  Release);
 454 
 455   case vmIntrinsics::_getAndAddByte:                    return inline_unsafe_load_store(T_BYTE,   LS_get_add,       Volatile);
 456   case vmIntrinsics::_getAndAddShort:                   return inline_unsafe_load_store(T_SHORT,  LS_get_add,       Volatile);
 457   case vmIntrinsics::_getAndAddInt:                     return inline_unsafe_load_store(T_INT,    LS_get_add,       Volatile);
 458   case vmIntrinsics::_getAndAddLong:                    return inline_unsafe_load_store(T_LONG,   LS_get_add,       Volatile);
 459 
 460   case vmIntrinsics::_getAndSetByte:                    return inline_unsafe_load_store(T_BYTE,   LS_get_set,       Volatile);
 461   case vmIntrinsics::_getAndSetShort:                   return inline_unsafe_load_store(T_SHORT,  LS_get_set,       Volatile);
 462   case vmIntrinsics::_getAndSetInt:                     return inline_unsafe_load_store(T_INT,    LS_get_set,       Volatile);
 463   case vmIntrinsics::_getAndSetLong:                    return inline_unsafe_load_store(T_LONG,   LS_get_set,       Volatile);
 464   case vmIntrinsics::_getAndSetReference:               return inline_unsafe_load_store(T_OBJECT, LS_get_set,       Volatile);
 465 
 466   case vmIntrinsics::_loadFence:
 467   case vmIntrinsics::_storeFence:
 468   case vmIntrinsics::_storeStoreFence:
 469   case vmIntrinsics::_fullFence:                return inline_unsafe_fence(intrinsic_id());
 470 





 471   case vmIntrinsics::_onSpinWait:               return inline_onspinwait();
 472 
 473   case vmIntrinsics::_currentCarrierThread:     return inline_native_currentCarrierThread();
 474   case vmIntrinsics::_currentThread:            return inline_native_currentThread();
 475   case vmIntrinsics::_setCurrentThread:         return inline_native_setCurrentThread();
 476 
 477   case vmIntrinsics::_scopedValueCache:          return inline_native_scopedValueCache();
 478   case vmIntrinsics::_setScopedValueCache:       return inline_native_setScopedValueCache();
 479 
 480   case vmIntrinsics::_Continuation_pin:          return inline_native_Continuation_pinning(false);
 481   case vmIntrinsics::_Continuation_unpin:        return inline_native_Continuation_pinning(true);
 482 
 483   case vmIntrinsics::_vthreadEndFirstTransition:    return inline_native_vthread_end_transition(CAST_FROM_FN_PTR(address, OptoRuntime::vthread_end_first_transition_Java()),
 484                                                                                                 "endFirstTransition", true);
 485   case vmIntrinsics::_vthreadStartFinalTransition:  return inline_native_vthread_start_transition(CAST_FROM_FN_PTR(address, OptoRuntime::vthread_start_final_transition_Java()),
 486                                                                                                   "startFinalTransition", true);
 487   case vmIntrinsics::_vthreadStartTransition:       return inline_native_vthread_start_transition(CAST_FROM_FN_PTR(address, OptoRuntime::vthread_start_transition_Java()),
 488                                                                                                   "startTransition", false);
 489   case vmIntrinsics::_vthreadEndTransition:         return inline_native_vthread_end_transition(CAST_FROM_FN_PTR(address, OptoRuntime::vthread_end_transition_Java()),
 490                                                                                                 "endTransition", false);

 499 #endif
 500   case vmIntrinsics::_currentTimeMillis:        return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
 501   case vmIntrinsics::_nanoTime:                 return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
 502   case vmIntrinsics::_writeback0:               return inline_unsafe_writeback0();
 503   case vmIntrinsics::_writebackPreSync0:        return inline_unsafe_writebackSync0(true);
 504   case vmIntrinsics::_writebackPostSync0:       return inline_unsafe_writebackSync0(false);
 505   case vmIntrinsics::_allocateInstance:         return inline_unsafe_allocate();
 506   case vmIntrinsics::_copyMemory:               return inline_unsafe_copyMemory();
 507   case vmIntrinsics::_setMemory:                return inline_unsafe_setMemory();
 508   case vmIntrinsics::_getLength:                return inline_native_getLength();
 509   case vmIntrinsics::_copyOf:                   return inline_array_copyOf(false);
 510   case vmIntrinsics::_copyOfRange:              return inline_array_copyOf(true);
 511   case vmIntrinsics::_equalsB:                  return inline_array_equals(StrIntrinsicNode::LL);
 512   case vmIntrinsics::_equalsC:                  return inline_array_equals(StrIntrinsicNode::UU);
 513   case vmIntrinsics::_Preconditions_checkIndex: return inline_preconditions_checkIndex(T_INT);
 514   case vmIntrinsics::_Preconditions_checkLongIndex: return inline_preconditions_checkIndex(T_LONG);
 515   case vmIntrinsics::_clone:                    return inline_native_clone(intrinsic()->is_virtual());
 516 
 517   case vmIntrinsics::_allocateUninitializedArray: return inline_unsafe_newArray(true);
 518   case vmIntrinsics::_newArray:                   return inline_unsafe_newArray(false);






 519 
 520   case vmIntrinsics::_isAssignableFrom:         return inline_native_subtype_check();
 521 
 522   case vmIntrinsics::_isInstance:
 523   case vmIntrinsics::_isHidden:
 524   case vmIntrinsics::_getSuperclass:            return inline_native_Class_query(intrinsic_id());
 525 
 526   case vmIntrinsics::_floatToRawIntBits:
 527   case vmIntrinsics::_floatToIntBits:
 528   case vmIntrinsics::_intBitsToFloat:
 529   case vmIntrinsics::_doubleToRawLongBits:
 530   case vmIntrinsics::_doubleToLongBits:
 531   case vmIntrinsics::_longBitsToDouble:
 532   case vmIntrinsics::_floatToFloat16:
 533   case vmIntrinsics::_float16ToFloat:           return inline_fp_conversions(intrinsic_id());
 534   case vmIntrinsics::_sqrt_float16:             return inline_fp16_operations(intrinsic_id(), 1);
 535   case vmIntrinsics::_fma_float16:              return inline_fp16_operations(intrinsic_id(), 3);
 536   case vmIntrinsics::_floatIsFinite:
 537   case vmIntrinsics::_floatIsInfinite:
 538   case vmIntrinsics::_doubleIsFinite:

2321     case vmIntrinsics::_remainderUnsigned_l: {
2322       zero_check_long(argument(2));
2323       // Compile-time detect of null-exception
2324       if (stopped()) {
2325         return true; // keep the graph constructed so far
2326       }
2327       n = new UModLNode(control(), argument(0), argument(2));
2328       break;
2329     }
2330     default:  fatal_unexpected_iid(id);  break;
2331   }
2332   set_result(_gvn.transform(n));
2333   return true;
2334 }
2335 
2336 //----------------------------inline_unsafe_access----------------------------
2337 
2338 const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type) {
2339   // Attempt to infer a sharper value type from the offset and base type.
2340   ciKlass* sharpened_klass = nullptr;

2341 
2342   // See if it is an instance field, with an object type.
2343   if (alias_type->field() != nullptr) {
2344     if (alias_type->field()->type()->is_klass()) {
2345       sharpened_klass = alias_type->field()->type()->as_klass();

2346     }
2347   }
2348 
2349   const TypeOopPtr* result = nullptr;
2350   // See if it is a narrow oop array.
2351   if (adr_type->isa_aryptr()) {
2352     if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
2353       const TypeOopPtr* elem_type = adr_type->is_aryptr()->elem()->make_oopptr();

2354       if (elem_type != nullptr && elem_type->is_loaded()) {
2355         // Sharpen the value type.
2356         result = elem_type;
2357       }
2358     }
2359   }
2360 
2361   // The sharpened class might be unloaded if there is no class loader
2362   // contraint in place.
2363   if (result == nullptr && sharpened_klass != nullptr && sharpened_klass->is_loaded()) {
2364     // Sharpen the value type.
2365     result = TypeOopPtr::make_from_klass(sharpened_klass);



2366   }
2367   if (result != nullptr) {
2368 #ifndef PRODUCT
2369     if (C->print_intrinsics() || C->print_inlining()) {
2370       tty->print("  from base type:  ");  adr_type->dump(); tty->cr();
2371       tty->print("  sharpened value: ");  result->dump();    tty->cr();
2372     }
2373 #endif
2374   }
2375   return result;
2376 }
2377 
2378 DecoratorSet LibraryCallKit::mo_decorator_for_access_kind(AccessKind kind) {
2379   switch (kind) {
2380       case Relaxed:
2381         return MO_UNORDERED;
2382       case Opaque:
2383         return MO_RELAXED;
2384       case Acquire:
2385         return MO_ACQUIRE;

2474 #endif // ASSERT
2475  }
2476 #endif //PRODUCT
2477 
2478   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
2479 
2480   Node* receiver = argument(0);  // type: oop
2481 
2482   // Build address expression.
2483   Node* heap_base_oop = top();
2484 
2485   // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2486   Node* base = argument(1);  // type: oop
2487   // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2488   Node* offset = argument(2);  // type: long
2489   // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2490   // to be plain byte offsets, which are also the same as those accepted
2491   // by oopDesc::field_addr.
2492   assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2493          "fieldOffset must be byte-scaled");







































2494   // 32-bit machines ignore the high half!
2495   offset = ConvL2X(offset);
2496 
2497   // Save state and restore on bailout
2498   SavedState old_state(this);
2499 
2500   Node* adr = make_unsafe_address(base, offset, type, kind == Relaxed);
2501   assert(!stopped(), "Inlining of unsafe access failed: address construction stopped unexpectedly");
2502 
2503   if (_gvn.type(base->uncast())->isa_ptr() == TypePtr::NULL_PTR) {
2504     if (type != T_OBJECT) {
2505       decorators |= IN_NATIVE; // off-heap primitive access
2506     } else {
2507       return false; // off-heap oop accesses are not supported
2508     }
2509   } else {
2510     heap_base_oop = base; // on-heap or mixed access
2511   }
2512 
2513   // Can base be null? Otherwise, always on-heap access.

2517     decorators |= IN_HEAP;
2518   }
2519 
2520   Node* val = is_store ? argument(4) : nullptr;
2521 
2522   const TypePtr* adr_type = _gvn.type(adr)->isa_ptr();
2523   if (adr_type == TypePtr::NULL_PTR) {
2524     return false; // off-heap access with zero address
2525   }
2526 
2527   // Try to categorize the address.
2528   Compile::AliasType* alias_type = C->alias_type(adr_type);
2529   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2530 
2531   if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2532       alias_type->adr_type() == TypeAryPtr::RANGE) {
2533     return false; // not supported
2534   }
2535 
2536   bool mismatched = false;
2537   BasicType bt = alias_type->basic_type();




























2538   if (bt != T_ILLEGAL) {
2539     assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
2540     if (bt == T_BYTE && adr_type->isa_aryptr()) {
2541       // Alias type doesn't differentiate between byte[] and boolean[]).
2542       // Use address type to get the element type.
2543       bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2544     }
2545     if (is_reference_type(bt, true)) {
2546       // accessing an array field with getReference is not a mismatch
2547       bt = T_OBJECT;
2548     }
2549     if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2550       // Don't intrinsify mismatched object accesses
2551       return false;
2552     }
2553     mismatched = (bt != type);
2554   } else if (alias_type->adr_type()->isa_oopptr()) {
2555     mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
2556   }
2557 
2558   old_state.discard();
2559   assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2560 
2561   if (mismatched) {
2562     decorators |= C2_MISMATCHED;
2563   }
2564 
2565   // First guess at the value type.
2566   const Type *value_type = Type::get_const_basic_type(type);
2567 
2568   // Figure out the memory ordering.
2569   decorators |= mo_decorator_for_access_kind(kind);
2570 
2571   if (!is_store && type == T_OBJECT) {
2572     const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2573     if (tjp != nullptr) {
2574       value_type = tjp;


2575     }
2576   }
2577 
2578   receiver = null_check(receiver);
2579   if (stopped()) {
2580     return true;
2581   }
2582   // Heap pointers get a null-check from the interpreter,
2583   // as a courtesy.  However, this is not guaranteed by Unsafe,
2584   // and it is not possible to fully distinguish unintended nulls
2585   // from intended ones in this API.
2586 
2587   if (!is_store) {
2588     Node* p = nullptr;
2589     // Try to constant fold a load from a constant field
2590     ciField* field = alias_type->field();
2591     if (heap_base_oop != top() && field != nullptr && field->is_constant() && !mismatched) {
2592       // final or stable field
2593       p = make_constant_from_field(field, heap_base_oop);
2594     }
2595 
2596     if (p == nullptr) { // Could not constant fold the load
2597       p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);





2598       // Normalize the value returned by getBoolean in the following cases
2599       if (type == T_BOOLEAN &&
2600           (mismatched ||
2601            heap_base_oop == top() ||                  // - heap_base_oop is null or
2602            (can_access_non_heap && field == nullptr)) // - heap_base_oop is potentially null
2603                                                       //   and the unsafe access is made to large offset
2604                                                       //   (i.e., larger than the maximum offset necessary for any
2605                                                       //   field access)
2606             ) {
2607           IdealKit ideal = IdealKit(this);
2608 #define __ ideal.
2609           IdealVariable normalized_result(ideal);
2610           __ declarations_done();
2611           __ set(normalized_result, p);
2612           __ if_then(p, BoolTest::ne, ideal.ConI(0));
2613           __ set(normalized_result, ideal.ConI(1));
2614           ideal.end_if();
2615           final_sync(ideal);
2616           p = __ value(normalized_result);
2617 #undef __

2621       p = gvn().transform(new CastP2XNode(nullptr, p));
2622       p = ConvX2UL(p);
2623     }
2624     // The load node has the control of the preceding MemBarCPUOrder.  All
2625     // following nodes will have the control of the MemBarCPUOrder inserted at
2626     // the end of this method.  So, pushing the load onto the stack at a later
2627     // point is fine.
2628     set_result(p);
2629   } else {
2630     if (bt == T_ADDRESS) {
2631       // Repackage the long as a pointer.
2632       val = ConvL2X(val);
2633       val = gvn().transform(new CastX2PNode(val));
2634     }
2635     access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators);
2636   }
2637 
2638   return true;
2639 }
2640 









































































































































































2641 //----------------------------inline_unsafe_load_store----------------------------
2642 // This method serves a couple of different customers (depending on LoadStoreKind):
2643 //
2644 // LS_cmp_swap:
2645 //
2646 //   boolean compareAndSetReference(Object o, long offset, Object expected, Object x);
2647 //   boolean compareAndSetInt(   Object o, long offset, int    expected, int    x);
2648 //   boolean compareAndSetLong(  Object o, long offset, long   expected, long   x);
2649 //
2650 // LS_cmp_swap_weak:
2651 //
2652 //   boolean weakCompareAndSetReference(       Object o, long offset, Object expected, Object x);
2653 //   boolean weakCompareAndSetReferencePlain(  Object o, long offset, Object expected, Object x);
2654 //   boolean weakCompareAndSetReferenceAcquire(Object o, long offset, Object expected, Object x);
2655 //   boolean weakCompareAndSetReferenceRelease(Object o, long offset, Object expected, Object x);
2656 //
2657 //   boolean weakCompareAndSetInt(          Object o, long offset, int    expected, int    x);
2658 //   boolean weakCompareAndSetIntPlain(     Object o, long offset, int    expected, int    x);
2659 //   boolean weakCompareAndSetIntAcquire(   Object o, long offset, int    expected, int    x);
2660 //   boolean weakCompareAndSetIntRelease(   Object o, long offset, int    expected, int    x);

2823     }
2824     case LS_cmp_swap:
2825     case LS_cmp_swap_weak:
2826     case LS_get_add:
2827       break;
2828     default:
2829       ShouldNotReachHere();
2830   }
2831 
2832   // Null check receiver.
2833   receiver = null_check(receiver);
2834   if (stopped()) {
2835     return true;
2836   }
2837 
2838   int alias_idx = C->get_alias_index(adr_type);
2839 
2840   if (is_reference_type(type)) {
2841     decorators |= IN_HEAP | ON_UNKNOWN_OOP_REF;
2842 













2843     // Transformation of a value which could be null pointer (CastPP #null)
2844     // could be delayed during Parse (for example, in adjust_map_after_if()).
2845     // Execute transformation here to avoid barrier generation in such case.
2846     if (_gvn.type(newval) == TypePtr::NULL_PTR)
2847       newval = _gvn.makecon(TypePtr::NULL_PTR);
2848 
2849     if (oldval != nullptr && _gvn.type(oldval) == TypePtr::NULL_PTR) {
2850       // Refine the value to a null constant, when it is known to be null
2851       oldval = _gvn.makecon(TypePtr::NULL_PTR);
2852     }
2853   }
2854 
2855   Node* result = nullptr;
2856   switch (kind) {
2857     case LS_cmp_exchange: {
2858       result = access_atomic_cmpxchg_val_at(base, adr, adr_type, alias_idx,
2859                                             oldval, newval, value_type, type, decorators);
2860       break;
2861     }
2862     case LS_cmp_swap_weak:

2891   insert_mem_bar(Op_MemBarCPUOrder);
2892   switch(id) {
2893     case vmIntrinsics::_loadFence:
2894       insert_mem_bar(Op_LoadFence);
2895       return true;
2896     case vmIntrinsics::_storeFence:
2897       insert_mem_bar(Op_StoreFence);
2898       return true;
2899     case vmIntrinsics::_storeStoreFence:
2900       insert_mem_bar(Op_StoreStoreFence);
2901       return true;
2902     case vmIntrinsics::_fullFence:
2903       insert_mem_bar(Op_MemBarVolatile);
2904       return true;
2905     default:
2906       fatal_unexpected_iid(id);
2907       return false;
2908   }
2909 }
2910 






























































































2911 bool LibraryCallKit::inline_onspinwait() {
2912   insert_mem_bar(Op_OnSpinWait);
2913   return true;
2914 }
2915 
2916 bool LibraryCallKit::klass_needs_init_guard(Node* kls) {
2917   if (!kls->is_Con()) {
2918     return true;
2919   }
2920   const TypeInstKlassPtr* klsptr = kls->bottom_type()->isa_instklassptr();
2921   if (klsptr == nullptr) {
2922     return true;
2923   }
2924   ciInstanceKlass* ik = klsptr->instance_klass();
2925   // don't need a guard for a klass that is already initialized
2926   return !ik->is_initialized();
2927 }
2928 
2929 //----------------------------inline_unsafe_writeback0-------------------------
2930 // public native void Unsafe.writeback0(long address)

3009                     Deoptimization::Action_make_not_entrant);
3010     }
3011     if (stopped()) {
3012       return true;
3013     }
3014 #endif //INCLUDE_JVMTI
3015 
3016   Node* test = nullptr;
3017   if (LibraryCallKit::klass_needs_init_guard(kls)) {
3018     // Note:  The argument might still be an illegal value like
3019     // Serializable.class or Object[].class.   The runtime will handle it.
3020     // But we must make an explicit check for initialization.
3021     Node* insp = basic_plus_adr(top(), kls, in_bytes(InstanceKlass::init_state_offset()));
3022     // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
3023     // can generate code to load it as unsigned byte.
3024     Node* inst = make_load(nullptr, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::acquire);
3025     Node* bits = intcon(InstanceKlass::fully_initialized);
3026     test = _gvn.transform(new SubINode(inst, bits));
3027     // The 'test' is non-zero if we need to take a slow path.
3028   }
3029 
3030   Node* obj = new_instance(kls, test);





3031   set_result(obj);
3032   return true;
3033 }
3034 
3035 //------------------------inline_native_time_funcs--------------
3036 // inline code for System.currentTimeMillis() and System.nanoTime()
3037 // these have the same type and signature
3038 bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
3039   const TypeFunc* tf = OptoRuntime::void_long_Type();
3040   const TypePtr* no_memory_effects = nullptr;
3041   Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
3042   Node* value = _gvn.transform(new ProjNode(time, TypeFunc::Parms+0));
3043 #ifdef ASSERT
3044   Node* value_top = _gvn.transform(new ProjNode(time, TypeFunc::Parms+1));
3045   assert(value_top == top(), "second value must be top");
3046 #endif
3047   set_result(value);
3048   return true;
3049 }
3050 

3825   Node* thread = _gvn.transform(new ThreadLocalNode());
3826   Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::vthread_offset()));
3827   Node* thread_obj_handle
3828     = make_load(nullptr, p, p->bottom_type()->is_ptr(), T_OBJECT, MemNode::unordered);
3829   thread_obj_handle = _gvn.transform(thread_obj_handle);
3830   const TypePtr *adr_type = _gvn.type(thread_obj_handle)->isa_ptr();
3831   access_store_at(nullptr, thread_obj_handle, adr_type, arr, _gvn.type(arr), T_OBJECT, IN_NATIVE | MO_UNORDERED);
3832 
3833   // Change the _monitor_owner_id of the JavaThread
3834   Node* tid = load_field_from_object(arr, "tid", "J");
3835   Node* monitor_owner_id_offset = basic_plus_adr(top(), thread, in_bytes(JavaThread::monitor_owner_id_offset()));
3836   store_to_memory(control(), monitor_owner_id_offset, tid, T_LONG, MemNode::unordered, true);
3837 
3838   JFR_ONLY(extend_setCurrentThread(thread, arr);)
3839   return true;
3840 }
3841 
3842 const Type* LibraryCallKit::scopedValueCache_type() {
3843   ciKlass* objects_klass = ciObjArrayKlass::make(env()->Object_klass());
3844   const TypeOopPtr* etype = TypeOopPtr::make_from_klass(env()->Object_klass());
3845   const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS);
3846 
3847   // Because we create the scopedValue cache lazily we have to make the
3848   // type of the result BotPTR.
3849   bool xk = etype->klass_is_exact();
3850   const Type* objects_type = TypeAryPtr::make(TypePtr::BotPTR, arr0, objects_klass, xk, 0);
3851   return objects_type;
3852 }
3853 
3854 Node* LibraryCallKit::scopedValueCache_helper() {
3855   Node* thread = _gvn.transform(new ThreadLocalNode());
3856   Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::scopedValueCache_offset()));
3857   // We cannot use immutable_memory() because we might flip onto a
3858   // different carrier thread, at which point we'll need to use that
3859   // carrier thread's cache.
3860   // return _gvn.transform(LoadNode::make(_gvn, nullptr, immutable_memory(), p, p->bottom_type()->is_ptr(),
3861   //       TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered));
3862   return make_load(nullptr, p, p->bottom_type()->is_ptr(), T_ADDRESS, MemNode::unordered);
3863 }
3864 
3865 //------------------------inline_native_scopedValueCache------------------
3866 bool LibraryCallKit::inline_native_scopedValueCache() {
3867   Node* cache_obj_handle = scopedValueCache_helper();
3868   const Type* objects_type = scopedValueCache_type();
3869   set_result(access_load(cache_obj_handle, objects_type, T_OBJECT, IN_NATIVE));
3870 

4006   }
4007   return kls;
4008 }
4009 
4010 //--------------------(inline_native_Class_query helpers)---------------------
4011 // Use this for JVM_ACC_INTERFACE.
4012 // Fall through if (mods & mask) == bits, take the guard otherwise.
4013 Node* LibraryCallKit::generate_klass_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region,
4014                                                  ByteSize offset, const Type* type, BasicType bt) {
4015   // Branch around if the given klass has the given modifier bit set.
4016   // Like generate_guard, adds a new path onto the region.
4017   Node* modp = basic_plus_adr(top(), kls, in_bytes(offset));
4018   Node* mods = make_load(nullptr, modp, type, bt, MemNode::unordered);
4019   Node* mask = intcon(modifier_mask);
4020   Node* bits = intcon(modifier_bits);
4021   Node* mbit = _gvn.transform(new AndINode(mods, mask));
4022   Node* cmp  = _gvn.transform(new CmpINode(mbit, bits));
4023   Node* bol  = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
4024   return generate_fair_guard(bol, region);
4025 }

4026 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
4027   return generate_klass_flags_guard(kls, JVM_ACC_INTERFACE, 0, region,
4028                                     InstanceKlass::access_flags_offset(), TypeInt::CHAR, T_CHAR);
4029 }
4030 
4031 // Use this for testing if Klass is_hidden, has_finalizer, and is_cloneable_fast.
4032 Node* LibraryCallKit::generate_misc_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
4033   return generate_klass_flags_guard(kls, modifier_mask, modifier_bits, region,
4034                                     Klass::misc_flags_offset(), TypeInt::UBYTE, T_BOOLEAN);
4035 }
4036 
4037 Node* LibraryCallKit::generate_hidden_class_guard(Node* kls, RegionNode* region) {
4038   return generate_misc_flags_guard(kls, KlassFlags::_misc_is_hidden_class, 0, region);
4039 }
4040 
4041 //-------------------------inline_native_Class_query-------------------
4042 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
4043   const Type* return_type = TypeInt::BOOL;
4044   Node* prim_return_value = top();  // what happens if it's a primitive class?
4045   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);

4131 
4132 
4133   case vmIntrinsics::_getSuperclass:
4134     // The rules here are somewhat unfortunate, but we can still do better
4135     // with random logic than with a JNI call.
4136     // Interfaces store null or Object as _super, but must report null.
4137     // Arrays store an intermediate super as _super, but must report Object.
4138     // Other types can report the actual _super.
4139     // (To verify this code sequence, check the asserts in JVM_IsInterface.)
4140     if (generate_array_guard(kls, region) != nullptr) {
4141       // A guard was added.  If the guard is taken, it was an array.
4142       phi->add_req(makecon(TypeInstPtr::make(env()->Object_klass()->java_mirror())));
4143     }
4144     // Check for interface after array since this checks AccessFlags offset into InstanceKlass.
4145     // In other words, we are accessing subtype-specific information, so we need to determine the subtype first.
4146     if (generate_interface_guard(kls, region) != nullptr) {
4147       // A guard was added.  If the guard is taken, it was an interface.
4148       phi->add_req(null());
4149     }
4150     // If we fall through, it's a plain class.  Get its _super.
4151     p = basic_plus_adr(top(), kls, in_bytes(Klass::super_offset()));
4152     kls = _gvn.transform(LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeInstKlassPtr::OBJECT_OR_NULL));
4153     null_ctl = top();
4154     kls = null_check_oop(kls, &null_ctl);
4155     if (null_ctl != top()) {
4156       // If the guard is taken, Object.superClass is null (both klass and mirror).
4157       region->add_req(null_ctl);
4158       phi   ->add_req(null());
4159     }
4160     if (!stopped()) {
4161       query_value = load_mirror_from_klass(kls);











4162     }
4163     break;
4164 
4165   default:
4166     fatal_unexpected_iid(id);
4167     break;
4168   }
4169 
4170   // Fall-through is the normal case of a query to a real class.
4171   phi->init_req(1, query_value);
4172   region->init_req(1, control());
4173 
4174   C->set_has_split_ifs(true); // Has chance for split-if optimization
4175   set_result(region, phi);
4176   return true;
4177 }
4178 

4179 //-------------------------inline_Class_cast-------------------
4180 bool LibraryCallKit::inline_Class_cast() {
4181   Node* mirror = argument(0); // Class
4182   Node* obj    = argument(1);
4183   const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
4184   if (mirror_con == nullptr) {
4185     return false;  // dead path (mirror->is_top()).
4186   }
4187   if (obj == nullptr || obj->is_top()) {
4188     return false;  // dead path
4189   }
4190   const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr();
4191 
4192   // First, see if Class.cast() can be folded statically.
4193   // java_mirror_type() returns non-null for compile-time Class constants.
4194   ciType* tm = mirror_con->java_mirror_type();
4195   if (tm != nullptr && tm->is_klass() &&
4196       tp != nullptr) {
4197     if (!tp->is_loaded()) {
4198       // Don't use intrinsic when class is not loaded.
4199       return false;
4200     } else {
4201       int static_res = C->static_subtype_check(TypeKlassPtr::make(tm->as_klass(), Type::trust_interfaces), tp->as_klass_type());

4202       if (static_res == Compile::SSC_always_true) {
4203         // isInstance() is true - fold the code.
4204         set_result(obj);
4205         return true;
4206       } else if (static_res == Compile::SSC_always_false) {
4207         // Don't use intrinsic, have to throw ClassCastException.
4208         // If the reference is null, the non-intrinsic bytecode will
4209         // be optimized appropriately.
4210         return false;
4211       }
4212     }
4213   }
4214 
4215   // Bailout intrinsic and do normal inlining if exception path is frequent.
4216   if (too_many_traps(Deoptimization::Reason_intrinsic)) {
4217     return false;
4218   }
4219 
4220   // Generate dynamic checks.
4221   // Class.cast() is java implementation of _checkcast bytecode.
4222   // Do checkcast (Parse::do_checkcast()) optimizations here.
4223 
4224   mirror = null_check(mirror);
4225   // If mirror is dead, only null-path is taken.
4226   if (stopped()) {
4227     return true;
4228   }
4229 
4230   // Not-subtype or the mirror's klass ptr is null (in case it is a primitive).
4231   enum { _bad_type_path = 1, _prim_path = 2, PATH_LIMIT };
4232   RegionNode* region = new RegionNode(PATH_LIMIT);
4233   record_for_igvn(region);
4234 
4235   // Now load the mirror's klass metaobject, and null-check it.
4236   // If kls is null, we have a primitive mirror and
4237   // nothing is an instance of a primitive type.
4238   Node* kls = load_klass_from_mirror(mirror, false, region, _prim_path);
4239 
4240   Node* res = top();


4241   if (!stopped()) {

4242     Node* bad_type_ctrl = top();
4243     // Do checkcast optimizations.
4244     res = gen_checkcast(obj, kls, &bad_type_ctrl);
4245     region->init_req(_bad_type_path, bad_type_ctrl);
4246   }
4247   if (region->in(_prim_path) != top() ||
4248       region->in(_bad_type_path) != top()) {

4249     // Let Interpreter throw ClassCastException.
4250     PreserveJVMState pjvms(this);
4251     set_control(_gvn.transform(region));



4252     uncommon_trap(Deoptimization::Reason_intrinsic,
4253                   Deoptimization::Action_maybe_recompile);
4254   }
4255   if (!stopped()) {
4256     set_result(res);
4257   }
4258   return true;
4259 }
4260 
4261 
4262 //--------------------------inline_native_subtype_check------------------------
4263 // This intrinsic takes the JNI calls out of the heart of
4264 // UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
4265 bool LibraryCallKit::inline_native_subtype_check() {
4266   // Pull both arguments off the stack.
4267   Node* args[2];                // two java.lang.Class mirrors: superc, subc
4268   args[0] = argument(0);
4269   args[1] = argument(1);
4270   Node* klasses[2];             // corresponding Klasses: superk, subk
4271   klasses[0] = klasses[1] = top();
4272 
4273   enum {
4274     // A full decision tree on {superc is prim, subc is prim}:
4275     _prim_0_path = 1,           // {P,N} => false
4276                                 // {P,P} & superc!=subc => false
4277     _prim_same_path,            // {P,P} & superc==subc => true
4278     _prim_1_path,               // {N,P} => false
4279     _ref_subtype_path,          // {N,N} & subtype check wins => true
4280     _both_ref_path,             // {N,N} & subtype check loses => false
4281     PATH_LIMIT
4282   };
4283 
4284   RegionNode* region = new RegionNode(PATH_LIMIT);

4285   Node*       phi    = new PhiNode(region, TypeInt::BOOL);
4286   record_for_igvn(region);

4287 
4288   const TypePtr* adr_type = TypeRawPtr::BOTTOM;   // memory type of loads
4289   const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
4290   int class_klass_offset = java_lang_Class::klass_offset();
4291 
4292   // First null-check both mirrors and load each mirror's klass metaobject.
4293   int which_arg;
4294   for (which_arg = 0; which_arg <= 1; which_arg++) {
4295     Node* arg = args[which_arg];
4296     arg = null_check(arg);
4297     if (stopped())  break;
4298     args[which_arg] = arg;
4299 
4300     Node* p = basic_plus_adr(arg, class_klass_offset);
4301     Node* kls = LoadKlassNode::make(_gvn, immutable_memory(), p, adr_type, kls_type);
4302     klasses[which_arg] = _gvn.transform(kls);
4303   }
4304 
4305   // Having loaded both klasses, test each for null.
4306   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
4307   for (which_arg = 0; which_arg <= 1; which_arg++) {
4308     Node* kls = klasses[which_arg];
4309     Node* null_ctl = top();
4310     kls = null_check_oop(kls, &null_ctl, never_see_null);
4311     int prim_path = (which_arg == 0 ? _prim_0_path : _prim_1_path);
4312     region->init_req(prim_path, null_ctl);



4313     if (stopped())  break;
4314     klasses[which_arg] = kls;
4315   }
4316 
4317   if (!stopped()) {
4318     // now we have two reference types, in klasses[0..1]
4319     Node* subk   = klasses[1];  // the argument to isAssignableFrom
4320     Node* superk = klasses[0];  // the receiver
4321     region->set_req(_both_ref_path, gen_subtype_check(subk, superk));
4322     // now we have a successful reference subtype check
4323     region->set_req(_ref_subtype_path, control());
4324   }
4325 
4326   // If both operands are primitive (both klasses null), then
4327   // we must return true when they are identical primitives.
4328   // It is convenient to test this after the first null klass check.
4329   set_control(region->in(_prim_0_path)); // go back to first null check

4330   if (!stopped()) {
4331     // Since superc is primitive, make a guard for the superc==subc case.
4332     Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1]));
4333     Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq));
4334     generate_guard(bol_eq, region, PROB_FAIR);
4335     if (region->req() == PATH_LIMIT+1) {
4336       // A guard was added.  If the added guard is taken, superc==subc.
4337       region->swap_edges(PATH_LIMIT, _prim_same_path);
4338       region->del_req(PATH_LIMIT);
4339     }
4340     region->set_req(_prim_0_path, control()); // Not equal after all.
4341   }
4342 
4343   // these are the only paths that produce 'true':
4344   phi->set_req(_prim_same_path,   intcon(1));
4345   phi->set_req(_ref_subtype_path, intcon(1));
4346 
4347   // pull together the cases:
4348   assert(region->req() == PATH_LIMIT, "sane region");
4349   for (uint i = 1; i < region->req(); i++) {
4350     Node* ctl = region->in(i);
4351     if (ctl == nullptr || ctl == top()) {
4352       region->set_req(i, top());
4353       phi   ->set_req(i, top());
4354     } else if (phi->in(i) == nullptr) {
4355       phi->set_req(i, intcon(0)); // all other paths produce 'false'
4356     }
4357   }
4358 
4359   set_control(_gvn.transform(region));
4360   set_result(_gvn.transform(phi));
4361   return true;
4362 }
4363 
4364 //---------------------generate_array_guard_common------------------------
4365 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region,
4366                                                   bool obj_array, bool not_array, Node** obj) {
4367 
4368   if (stopped()) {
4369     return nullptr;
4370   }
4371 
4372   // If obj_array/non_array==false/false:
4373   // Branch around if the given klass is in fact an array (either obj or prim).
4374   // If obj_array/non_array==false/true:
4375   // Branch around if the given klass is not an array klass of any kind.
4376   // If obj_array/non_array==true/true:
4377   // Branch around if the kls is not an oop array (kls is int[], String, etc.)
4378   // If obj_array/non_array==true/false:
4379   // Branch around if the kls is an oop array (Object[] or subtype)
4380   //
4381   // Like generate_guard, adds a new path onto the region.
4382   jint  layout_con = 0;
4383   Node* layout_val = get_layout_helper(kls, layout_con);
4384   if (layout_val == nullptr) {
4385     bool query = (obj_array
4386                   ? Klass::layout_helper_is_objArray(layout_con)
4387                   : Klass::layout_helper_is_array(layout_con));
4388     if (query == not_array) {







4389       return nullptr;                       // never a branch
4390     } else {                             // always a branch
4391       Node* always_branch = control();
4392       if (region != nullptr)
4393         region->add_req(always_branch);
4394       set_control(top());
4395       return always_branch;
4396     }
4397   }





















4398   // Now test the correct condition.
4399   jint  nval = (obj_array
4400                 ? (jint)(Klass::_lh_array_tag_type_value
4401                    <<    Klass::_lh_array_tag_shift)
4402                 : Klass::_lh_neutral_value);
4403   Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval)));
4404   BoolTest::mask btest = BoolTest::lt;  // correct for testing is_[obj]array
4405   // invert the test if we are looking for a non-array
4406   if (not_array)  btest = BoolTest(btest).negate();
4407   Node* bol = _gvn.transform(new BoolNode(cmp, btest));
4408   Node* ctrl = generate_fair_guard(bol, region);
4409   Node* is_array_ctrl = not_array ? control() : ctrl;
4410   if (obj != nullptr && is_array_ctrl != nullptr && is_array_ctrl != top()) {
4411     // Keep track of the fact that 'obj' is an array to prevent
4412     // array specific accesses from floating above the guard.
4413     *obj = _gvn.transform(new CastPPNode(is_array_ctrl, *obj, TypeAryPtr::BOTTOM));
4414   }
4415   return ctrl;
4416 }
4417 




































































































































4418 
4419 //-----------------------inline_native_newArray--------------------------
4420 // private static native Object java.lang.reflect.newArray(Class<?> componentType, int length);
4421 // private        native Object Unsafe.allocateUninitializedArray0(Class<?> cls, int size);
4422 bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) {
4423   Node* mirror;
4424   Node* count_val;
4425   if (uninitialized) {
4426     null_check_receiver();
4427     mirror    = argument(1);
4428     count_val = argument(2);
4429   } else {
4430     mirror    = argument(0);
4431     count_val = argument(1);
4432   }
4433 
4434   mirror = null_check(mirror);
4435   // If mirror or obj is dead, only null-path is taken.
4436   if (stopped())  return true;
4437 
4438   enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
4439   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4440   PhiNode*    result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);

4458     CallJavaNode* slow_call = nullptr;
4459     if (uninitialized) {
4460       // Generate optimized virtual call (holder class 'Unsafe' is final)
4461       slow_call = generate_method_call(vmIntrinsics::_allocateUninitializedArray, false, false, true);
4462     } else {
4463       slow_call = generate_method_call_static(vmIntrinsics::_newArray, true);
4464     }
4465     Node* slow_result = set_results_for_java_call(slow_call);
4466     // this->control() comes from set_results_for_java_call
4467     result_reg->set_req(_slow_path, control());
4468     result_val->set_req(_slow_path, slow_result);
4469     result_io ->set_req(_slow_path, i_o());
4470     result_mem->set_req(_slow_path, reset_memory());
4471   }
4472 
4473   set_control(normal_ctl);
4474   if (!stopped()) {
4475     // Normal case:  The array type has been cached in the java.lang.Class.
4476     // The following call works fine even if the array type is polymorphic.
4477     // It could be a dynamic mix of int[], boolean[], Object[], etc.



4478     Node* obj = new_array(klass_node, count_val, 0);  // no arguments to push
4479     result_reg->init_req(_normal_path, control());
4480     result_val->init_req(_normal_path, obj);
4481     result_io ->init_req(_normal_path, i_o());
4482     result_mem->init_req(_normal_path, reset_memory());
4483 
4484     if (uninitialized) {
4485       // Mark the allocation so that zeroing is skipped
4486       AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(obj);
4487       alloc->maybe_set_complete(&_gvn);
4488     }
4489   }
4490 
4491   // Return the combined state.
4492   set_i_o(        _gvn.transform(result_io)  );
4493   set_all_memory( _gvn.transform(result_mem));
4494 
4495   C->set_has_split_ifs(true); // Has chance for split-if optimization
4496   set_result(result_reg, result_val);
4497   return true;

4546   // the bytecode that invokes Arrays.copyOf if deoptimization happens.
4547   { PreserveReexecuteState preexecs(this);
4548     jvms()->set_should_reexecute(true);
4549 
4550     array_type_mirror = null_check(array_type_mirror);
4551     original          = null_check(original);
4552 
4553     // Check if a null path was taken unconditionally.
4554     if (stopped())  return true;
4555 
4556     Node* orig_length = load_array_length(original);
4557 
4558     Node* klass_node = load_klass_from_mirror(array_type_mirror, false, nullptr, 0);
4559     klass_node = null_check(klass_node);
4560 
4561     RegionNode* bailout = new RegionNode(1);
4562     record_for_igvn(bailout);
4563 
4564     // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
4565     // Bail out if that is so.
4566     Node* not_objArray = generate_non_objArray_guard(klass_node, bailout);















4567     if (not_objArray != nullptr) {
4568       // Improve the klass node's type from the new optimistic assumption:
4569       ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
4570       const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/);
4571       Node* cast = new CastPPNode(control(), klass_node, akls);
4572       klass_node = _gvn.transform(cast);


4573     }
4574 
4575     // Bail out if either start or end is negative.
4576     generate_negative_guard(start, bailout, &start);
4577     generate_negative_guard(end,   bailout, &end);
4578 
4579     Node* length = end;
4580     if (_gvn.type(start) != TypeInt::ZERO) {
4581       length = _gvn.transform(new SubINode(end, start));
4582     }
4583 
4584     // Bail out if length is negative (i.e., if start > end).
4585     // Without this the new_array would throw
4586     // NegativeArraySizeException but IllegalArgumentException is what
4587     // should be thrown
4588     generate_negative_guard(length, bailout, &length);
4589 







































4590     // Bail out if start is larger than the original length
4591     Node* orig_tail = _gvn.transform(new SubINode(orig_length, start));
4592     generate_negative_guard(orig_tail, bailout, &orig_tail);
4593 
4594     if (bailout->req() > 1) {
4595       PreserveJVMState pjvms(this);
4596       set_control(_gvn.transform(bailout));
4597       uncommon_trap(Deoptimization::Reason_intrinsic,
4598                     Deoptimization::Action_maybe_recompile);
4599     }
4600 
4601     if (!stopped()) {
4602       // How many elements will we copy from the original?
4603       // The answer is MinI(orig_tail, length).
4604       Node* moved = _gvn.transform(new MinINode(orig_tail, length));
4605 
4606       // Generate a direct call to the right arraycopy function(s).
4607       // We know the copy is disjoint but we might not know if the
4608       // oop stores need checking.
4609       // Extreme case:  Arrays.copyOf((Integer[])x, 10, String[].class).

4615       // to the copyOf to be validated, including that the copy to the
4616       // new array won't trigger an ArrayStoreException. That subtype
4617       // check can be optimized if we know something on the type of
4618       // the input array from type speculation.
4619       if (_gvn.type(klass_node)->singleton()) {
4620         const TypeKlassPtr* subk = _gvn.type(load_object_klass(original))->is_klassptr();
4621         const TypeKlassPtr* superk = _gvn.type(klass_node)->is_klassptr();
4622 
4623         int test = C->static_subtype_check(superk, subk);
4624         if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
4625           const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();
4626           if (t_original->speculative_type() != nullptr) {
4627             original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true);
4628           }
4629         }
4630       }
4631 
4632       bool validated = false;
4633       // Reason_class_check rather than Reason_intrinsic because we
4634       // want to intrinsify even if this traps.
4635       if (!too_many_traps(Deoptimization::Reason_class_check)) {
4636         Node* not_subtype_ctrl = gen_subtype_check(original, klass_node);
4637 
4638         if (not_subtype_ctrl != top()) {
4639           PreserveJVMState pjvms(this);
4640           set_control(not_subtype_ctrl);
4641           uncommon_trap(Deoptimization::Reason_class_check,
4642                         Deoptimization::Action_make_not_entrant);
4643           assert(stopped(), "Should be stopped");
4644         }
4645         validated = true;
4646       }
4647 
4648       if (!stopped()) {
4649         newcopy = new_array(klass_node, length, 0);  // no arguments to push
4650 
4651         ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true, true,
4652                                                 load_object_klass(original), klass_node);
4653         if (!is_copyOfRange) {
4654           ac->set_copyof(validated);
4655         } else {
4656           ac->set_copyofrange(validated);
4657         }
4658         Node* n = _gvn.transform(ac);
4659         if (n == ac) {
4660           ac->connect_outputs(this);
4661         } else {
4662           assert(validated, "shouldn't transform if all arguments not validated");
4663           set_all_memory(n);
4664         }
4665       }
4666     }
4667   } // original reexecute is set back here
4668 
4669   C->set_has_split_ifs(true); // Has chance for split-if optimization

4701 
4702 //-----------------------generate_method_call----------------------------
4703 // Use generate_method_call to make a slow-call to the real
4704 // method if the fast path fails.  An alternative would be to
4705 // use a stub like OptoRuntime::slow_arraycopy_Java.
4706 // This only works for expanding the current library call,
4707 // not another intrinsic.  (E.g., don't use this for making an
4708 // arraycopy call inside of the copyOf intrinsic.)
4709 CallJavaNode*
4710 LibraryCallKit::generate_method_call(vmIntrinsicID method_id, bool is_virtual, bool is_static, bool res_not_null) {
4711   // When compiling the intrinsic method itself, do not use this technique.
4712   guarantee(callee() != C->method(), "cannot make slow-call to self");
4713 
4714   ciMethod* method = callee();
4715   // ensure the JVMS we have will be correct for this call
4716   guarantee(method_id == method->intrinsic_id(), "must match");
4717 
4718   const TypeFunc* tf = TypeFunc::make(method);
4719   if (res_not_null) {
4720     assert(tf->return_type() == T_OBJECT, "");
4721     const TypeTuple* range = tf->range();
4722     const Type** fields = TypeTuple::fields(range->cnt());
4723     fields[TypeFunc::Parms] = range->field_at(TypeFunc::Parms)->filter_speculative(TypePtr::NOTNULL);
4724     const TypeTuple* new_range = TypeTuple::make(range->cnt(), fields);
4725     tf = TypeFunc::make(tf->domain(), new_range);
4726   }
4727   CallJavaNode* slow_call;
4728   if (is_static) {
4729     assert(!is_virtual, "");
4730     slow_call = new CallStaticJavaNode(C, tf,
4731                            SharedRuntime::get_resolve_static_call_stub(), method);
4732   } else if (is_virtual) {
4733     assert(!gvn().type(argument(0))->maybe_null(), "should not be null");
4734     int vtable_index = Method::invalid_vtable_index;
4735     if (UseInlineCaches) {
4736       // Suppress the vtable call
4737     } else {
4738       // hashCode and clone are not a miranda methods,
4739       // so the vtable index is fixed.
4740       // No need to use the linkResolver to get it.
4741        vtable_index = method->vtable_index();
4742        assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
4743               "bad index %d", vtable_index);
4744     }
4745     slow_call = new CallDynamicJavaNode(tf,

4762   set_edges_for_java_call(slow_call);
4763   return slow_call;
4764 }
4765 
4766 
4767 /**
4768  * Build special case code for calls to hashCode on an object. This call may
4769  * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
4770  * slightly different code.
4771  */
4772 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
4773   assert(is_static == callee()->is_static(), "correct intrinsic selection");
4774   assert(!(is_virtual && is_static), "either virtual, special, or static");
4775 
4776   enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
4777 
4778   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4779   PhiNode*    result_val = new PhiNode(result_reg, TypeInt::INT);
4780   PhiNode*    result_io  = new PhiNode(result_reg, Type::ABIO);
4781   PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4782   Node* obj = nullptr;







4783   if (!is_static) {
4784     // Check for hashing null object
4785     obj = null_check_receiver();
4786     if (stopped())  return true;        // unconditionally null
4787     result_reg->init_req(_null_path, top());
4788     result_val->init_req(_null_path, top());
4789   } else {
4790     // Do a null check, and return zero if null.
4791     // System.identityHashCode(null) == 0
4792     obj = argument(0);
4793     Node* null_ctl = top();
4794     obj = null_check_oop(obj, &null_ctl);
4795     result_reg->init_req(_null_path, null_ctl);
4796     result_val->init_req(_null_path, _gvn.intcon(0));
4797   }
4798 
4799   // Unconditionally null?  Then return right away.
4800   if (stopped()) {
4801     set_control( result_reg->in(_null_path));
4802     if (!stopped())
4803       set_result(result_val->in(_null_path));
4804     return true;
4805   }
4806 
4807   // We only go to the fast case code if we pass a number of guards.  The
4808   // paths which do not pass are accumulated in the slow_region.
4809   RegionNode* slow_region = new RegionNode(1);
4810   record_for_igvn(slow_region);
4811 
4812   // If this is a virtual call, we generate a funny guard.  We pull out
4813   // the vtable entry corresponding to hashCode() from the target object.
4814   // If the target method which we are calling happens to be the native
4815   // Object hashCode() method, we pass the guard.  We do not need this
4816   // guard for non-virtual calls -- the caller is known to be the native
4817   // Object hashCode().
4818   if (is_virtual) {
4819     // After null check, get the object's klass.
4820     Node* obj_klass = load_object_klass(obj);
4821     generate_virtual_guard(obj_klass, slow_region);
4822   }
4823 
4824   // Get the header out of the object, use LoadMarkNode when available
4825   Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
4826   // The control of the load must be null. Otherwise, the load can move before
4827   // the null check after castPP removal.
4828   Node* no_ctrl = nullptr;
4829   Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
4830 
4831   if (!UseObjectMonitorTable) {
4832     // Test the header to see if it is safe to read w.r.t. locking.


4833     Node *lock_mask      = _gvn.MakeConX(markWord::lock_mask_in_place);
4834     Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
4835     Node *monitor_val   = _gvn.MakeConX(markWord::monitor_value);
4836     Node *chk_monitor   = _gvn.transform(new CmpXNode(lmasked_header, monitor_val));
4837     Node *test_monitor  = _gvn.transform(new BoolNode(chk_monitor, BoolTest::eq));
4838 
4839     generate_slow_guard(test_monitor, slow_region);
4840   }
4841 
4842   // Get the hash value and check to see that it has been properly assigned.
4843   // We depend on hash_mask being at most 32 bits and avoid the use of
4844   // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
4845   // vm: see markWord.hpp.
4846   Node *hash_mask      = _gvn.intcon(markWord::hash_mask);
4847   Node *hash_shift     = _gvn.intcon(markWord::hash_shift);
4848   Node *hshifted_header= _gvn.transform(new URShiftXNode(header, hash_shift));
4849   // This hack lets the hash bits live anywhere in the mark object now, as long
4850   // as the shift drops the relevant bits into the low 32 bits.  Note that
4851   // Java spec says that HashCode is an int so there's no point in capturing
4852   // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build).

4880     // this->control() comes from set_results_for_java_call
4881     result_reg->init_req(_slow_path, control());
4882     result_val->init_req(_slow_path, slow_result);
4883     result_io  ->set_req(_slow_path, i_o());
4884     result_mem ->set_req(_slow_path, reset_memory());
4885   }
4886 
4887   // Return the combined state.
4888   set_i_o(        _gvn.transform(result_io)  );
4889   set_all_memory( _gvn.transform(result_mem));
4890 
4891   set_result(result_reg, result_val);
4892   return true;
4893 }
4894 
4895 //---------------------------inline_native_getClass----------------------------
4896 // public final native Class<?> java.lang.Object.getClass();
4897 //
4898 // Build special case code for calls to getClass on an object.
4899 bool LibraryCallKit::inline_native_getClass() {
4900   Node* obj = null_check_receiver();









4901   if (stopped())  return true;
4902   set_result(load_mirror_from_klass(load_object_klass(obj)));
4903   return true;
4904 }
4905 
4906 //-----------------inline_native_Reflection_getCallerClass---------------------
4907 // public static native Class<?> sun.reflect.Reflection.getCallerClass();
4908 //
4909 // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
4910 //
4911 // NOTE: This code must perform the same logic as JVM_GetCallerClass
4912 // in that it must skip particular security frames and checks for
4913 // caller sensitive methods.
4914 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
4915 #ifndef PRODUCT
4916   if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4917     tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
4918   }
4919 #endif
4920 

5302 //  not cloneable or finalizer => slow path to out-of-line Object.clone
5303 //
5304 // The general case has two steps, allocation and copying.
5305 // Allocation has two cases, and uses GraphKit::new_instance or new_array.
5306 //
5307 // Copying also has two cases, oop arrays and everything else.
5308 // Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
5309 // Everything else uses the tight inline loop supplied by CopyArrayNode.
5310 //
5311 // These steps fold up nicely if and when the cloned object's klass
5312 // can be sharply typed as an object array, a type array, or an instance.
5313 //
5314 bool LibraryCallKit::inline_native_clone(bool is_virtual) {
5315   PhiNode* result_val;
5316 
5317   // Set the reexecute bit for the interpreter to reexecute
5318   // the bytecode that invokes Object.clone if deoptimization happens.
5319   { PreserveReexecuteState preexecs(this);
5320     jvms()->set_should_reexecute(true);
5321 
5322     Node* obj = null_check_receiver();

5323     if (stopped())  return true;
5324 
5325     const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();






5326 
5327     // If we are going to clone an instance, we need its exact type to
5328     // know the number and types of fields to convert the clone to
5329     // loads/stores. Maybe a speculative type can help us.
5330     if (!obj_type->klass_is_exact() &&
5331         obj_type->speculative_type() != nullptr &&
5332         obj_type->speculative_type()->is_instance_klass()) {

5333       ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass();
5334       if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem &&
5335           !spec_ik->has_injected_fields()) {
5336         if (!obj_type->isa_instptr() ||
5337             obj_type->is_instptr()->instance_klass()->has_subklass()) {
5338           obj = maybe_cast_profiled_obj(obj, obj_type->speculative_type(), false);
5339         }
5340       }
5341     }
5342 
5343     // Conservatively insert a memory barrier on all memory slices.
5344     // Do not let writes into the original float below the clone.
5345     insert_mem_bar(Op_MemBarCPUOrder);
5346 
5347     // paths into result_reg:
5348     enum {
5349       _slow_path = 1,     // out-of-line call to clone method (virtual or not)
5350       _objArray_path,     // plain array allocation, plus arrayof_oop_arraycopy
5351       _array_path,        // plain array allocation, plus arrayof_long_arraycopy
5352       _instance_path,     // plain instance allocation, plus arrayof_long_arraycopy
5353       PATH_LIMIT
5354     };
5355     RegionNode* result_reg = new RegionNode(PATH_LIMIT);
5356     result_val             = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
5357     PhiNode*    result_i_o = new PhiNode(result_reg, Type::ABIO);
5358     PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
5359     record_for_igvn(result_reg);
5360 
5361     Node* obj_klass = load_object_klass(obj);





5362     Node* array_obj = obj;
5363     Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)nullptr, &array_obj);
5364     if (array_ctl != nullptr) {
5365       // It's an array.
5366       PreserveJVMState pjvms(this);
5367       set_control(array_ctl);
5368       Node* obj_length = load_array_length(array_obj);
5369       Node* array_size = nullptr; // Size of the array without object alignment padding.
5370       Node* alloc_obj = new_array(obj_klass, obj_length, 0, &array_size, /*deoptimize_on_exception=*/true);
5371 
5372       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
5373       if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Parsing)) {
5374         // If it is an oop array, it requires very special treatment,
5375         // because gc barriers are required when accessing the array.
5376         Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)nullptr);
5377         if (is_obja != nullptr) {
5378           PreserveJVMState pjvms2(this);
5379           set_control(is_obja);
5380           // Generate a direct call to the right arraycopy function(s).
5381           // Clones are always tightly coupled.
5382           ArrayCopyNode* ac = ArrayCopyNode::make(this, true, array_obj, intcon(0), alloc_obj, intcon(0), obj_length, true, false);
5383           ac->set_clone_oop_array();
5384           Node* n = _gvn.transform(ac);
5385           assert(n == ac, "cannot disappear");
5386           ac->connect_outputs(this, /*deoptimize_on_exception=*/true);
5387 
5388           result_reg->init_req(_objArray_path, control());
5389           result_val->init_req(_objArray_path, alloc_obj);
5390           result_i_o ->set_req(_objArray_path, i_o());
5391           result_mem ->set_req(_objArray_path, reset_memory());
5392         }
5393       }
5394       // Otherwise, there are no barriers to worry about.
5395       // (We can dispense with card marks if we know the allocation
5396       //  comes out of eden (TLAB)...  In fact, ReduceInitialCardMarks
5397       //  causes the non-eden paths to take compensating steps to
5398       //  simulate a fresh allocation, so that no further
5399       //  card marks are required in compiled code to initialize
5400       //  the object.)
5401 
5402       if (!stopped()) {
5403         copy_to_clone(array_obj, alloc_obj, array_size, true);
5404 
5405         // Present the results of the copy.
5406         result_reg->init_req(_array_path, control());
5407         result_val->init_req(_array_path, alloc_obj);
5408         result_i_o ->set_req(_array_path, i_o());
5409         result_mem ->set_req(_array_path, reset_memory());




































5410       }
5411     }
5412 
5413     // We only go to the instance fast case code if we pass a number of guards.
5414     // The paths which do not pass are accumulated in the slow_region.
5415     RegionNode* slow_region = new RegionNode(1);
5416     record_for_igvn(slow_region);
5417     if (!stopped()) {
5418       // It's an instance (we did array above).  Make the slow-path tests.
5419       // If this is a virtual call, we generate a funny guard.  We grab
5420       // the vtable entry corresponding to clone() from the target object.
5421       // If the target method which we are calling happens to be the
5422       // Object clone() method, we pass the guard.  We do not need this
5423       // guard for non-virtual calls; the caller is known to be the native
5424       // Object clone().
5425       if (is_virtual) {
5426         generate_virtual_guard(obj_klass, slow_region);
5427       }
5428 
5429       // The object must be easily cloneable and must not have a finalizer.
5430       // Both of these conditions may be checked in a single test.
5431       // We could optimize the test further, but we don't care.
5432       generate_misc_flags_guard(obj_klass,
5433                                 // Test both conditions:
5434                                 KlassFlags::_misc_is_cloneable_fast | KlassFlags::_misc_has_finalizer,
5435                                 // Must be cloneable but not finalizer:
5436                                 KlassFlags::_misc_is_cloneable_fast,

5528         set_jvms(sfpt->jvms());
5529         _reexecute_sp = jvms()->sp();
5530 
5531         return saved_jvms;
5532       }
5533     }
5534   }
5535   return nullptr;
5536 }
5537 
5538 // Clone the JVMState of the array allocation and create a new safepoint with it. Re-push the array length to the stack
5539 // such that uncommon traps can be emitted to re-execute the array allocation in the interpreter.
5540 SafePointNode* LibraryCallKit::create_safepoint_with_state_before_array_allocation(const AllocateArrayNode* alloc) const {
5541   JVMState* old_jvms = alloc->jvms()->clone_shallow(C);
5542   uint size = alloc->req();
5543   SafePointNode* sfpt = new SafePointNode(size, old_jvms);
5544   old_jvms->set_map(sfpt);
5545   for (uint i = 0; i < size; i++) {
5546     sfpt->init_req(i, alloc->in(i));
5547   }












5548   // re-push array length for deoptimization
5549   sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp(), alloc->in(AllocateNode::ALength));
5550   old_jvms->set_sp(old_jvms->sp()+1);
5551   old_jvms->set_monoff(old_jvms->monoff()+1);
5552   old_jvms->set_scloff(old_jvms->scloff()+1);
5553   old_jvms->set_endoff(old_jvms->endoff()+1);











5554   old_jvms->set_should_reexecute(true);
5555 
5556   sfpt->set_i_o(map()->i_o());
5557   sfpt->set_memory(map()->memory());
5558   sfpt->set_control(map()->control());
5559   return sfpt;
5560 }
5561 
5562 // In case of a deoptimization, we restart execution at the
5563 // allocation, allocating a new array. We would leave an uninitialized
5564 // array in the heap that GCs wouldn't expect. Move the allocation
5565 // after the traps so we don't allocate the array if we
5566 // deoptimize. This is possible because tightly_coupled_allocation()
5567 // guarantees there's no observer of the allocated array at this point
5568 // and the control flow is simple enough.
5569 void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms_before_guards,
5570                                                     int saved_reexecute_sp, uint new_idx) {
5571   if (saved_jvms_before_guards != nullptr && !stopped()) {
5572     replace_unrelated_uncommon_traps_with_alloc_state(alloc, saved_jvms_before_guards);
5573 
5574     assert(alloc != nullptr, "only with a tightly coupled allocation");
5575     // restore JVM state to the state at the arraycopy
5576     saved_jvms_before_guards->map()->set_control(map()->control());
5577     assert(saved_jvms_before_guards->map()->memory() == map()->memory(), "memory state changed?");
5578     assert(saved_jvms_before_guards->map()->i_o() == map()->i_o(), "IO state changed?");
5579     // If we've improved the types of some nodes (null check) while
5580     // emitting the guards, propagate them to the current state
5581     map()->replaced_nodes().apply(saved_jvms_before_guards->map(), new_idx);
5582     set_jvms(saved_jvms_before_guards);
5583     _reexecute_sp = saved_reexecute_sp;
5584 
5585     // Remove the allocation from above the guards
5586     CallProjections callprojs;
5587     alloc->extract_projections(&callprojs, true);
5588     InitializeNode* init = alloc->initialization();
5589     Node* alloc_mem = alloc->in(TypeFunc::Memory);
5590     C->gvn_replace_by(callprojs.fallthrough_ioproj, alloc->in(TypeFunc::I_O));
5591     init->replace_mem_projs_by(alloc_mem, C);
5592 
5593     // The CastIINode created in GraphKit::new_array (in AllocateArrayNode::make_ideal_length) must stay below
5594     // the allocation (i.e. is only valid if the allocation succeeds):
5595     // 1) replace CastIINode with AllocateArrayNode's length here
5596     // 2) Create CastIINode again once allocation has moved (see below) at the end of this method
5597     //
5598     // Multiple identical CastIINodes might exist here. Each GraphKit::load_array_length() call will generate
5599     // new separate CastIINode (arraycopy guard checks or any array length use between array allocation and ararycopy)
5600     Node* init_control = init->proj_out(TypeFunc::Control);
5601     Node* alloc_length = alloc->Ideal_length();
5602 #ifdef ASSERT
5603     Node* prev_cast = nullptr;
5604 #endif
5605     for (uint i = 0; i < init_control->outcnt(); i++) {
5606       Node* init_out = init_control->raw_out(i);
5607       if (init_out->is_CastII() && init_out->in(TypeFunc::Control) == init_control && init_out->in(1) == alloc_length) {
5608 #ifdef ASSERT
5609         if (prev_cast == nullptr) {
5610           prev_cast = init_out;

5612           if (prev_cast->cmp(*init_out) == false) {
5613             prev_cast->dump();
5614             init_out->dump();
5615             assert(false, "not equal CastIINode");
5616           }
5617         }
5618 #endif
5619         C->gvn_replace_by(init_out, alloc_length);
5620       }
5621     }
5622     C->gvn_replace_by(init->proj_out(TypeFunc::Control), alloc->in(0));
5623 
5624     // move the allocation here (after the guards)
5625     _gvn.hash_delete(alloc);
5626     alloc->set_req(TypeFunc::Control, control());
5627     alloc->set_req(TypeFunc::I_O, i_o());
5628     Node *mem = reset_memory();
5629     set_all_memory(mem);
5630     alloc->set_req(TypeFunc::Memory, mem);
5631     set_control(init->proj_out_or_null(TypeFunc::Control));
5632     set_i_o(callprojs.fallthrough_ioproj);
5633 
5634     // Update memory as done in GraphKit::set_output_for_allocation()
5635     const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
5636     const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
5637     if (ary_type->isa_aryptr() && length_type != nullptr) {
5638       ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
5639     }
5640     const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot);
5641     int            elemidx  = C->get_alias_index(telemref);
5642     // Need to properly move every memory projection for the Initialize
5643 #ifdef ASSERT
5644     int mark_idx = C->get_alias_index(ary_type->add_offset(oopDesc::mark_offset_in_bytes()));
5645     int klass_idx = C->get_alias_index(ary_type->add_offset(oopDesc::klass_offset_in_bytes()));
5646 #endif
5647     auto move_proj = [&](ProjNode* proj) {
5648       int alias_idx = C->get_alias_index(proj->adr_type());
5649       assert(alias_idx == Compile::AliasIdxRaw ||
5650              alias_idx == elemidx ||
5651              alias_idx == mark_idx ||
5652              alias_idx == klass_idx, "should be raw memory or array element type");

5962         top_src  = src_type->isa_aryptr();
5963         has_src = (top_src != nullptr && top_src->elem() != Type::BOTTOM);
5964         src_spec = true;
5965       }
5966       if (!has_dest) {
5967         dest = maybe_cast_profiled_obj(dest, dest_k, true);
5968         dest_type  = _gvn.type(dest);
5969         top_dest  = dest_type->isa_aryptr();
5970         has_dest = (top_dest != nullptr && top_dest->elem() != Type::BOTTOM);
5971         dest_spec = true;
5972       }
5973     }
5974   }
5975 
5976   if (has_src && has_dest && can_emit_guards) {
5977     BasicType src_elem = top_src->isa_aryptr()->elem()->array_element_basic_type();
5978     BasicType dest_elem = top_dest->isa_aryptr()->elem()->array_element_basic_type();
5979     if (is_reference_type(src_elem, true)) src_elem = T_OBJECT;
5980     if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT;
5981 
5982     if (src_elem == dest_elem && src_elem == T_OBJECT) {
5983       // If both arrays are object arrays then having the exact types
5984       // for both will remove the need for a subtype check at runtime
5985       // before the call and may make it possible to pick a faster copy
5986       // routine (without a subtype check on every element)
5987       // Do we have the exact type of src?
5988       bool could_have_src = src_spec;
5989       // Do we have the exact type of dest?
5990       bool could_have_dest = dest_spec;
5991       ciKlass* src_k = nullptr;
5992       ciKlass* dest_k = nullptr;
5993       if (!src_spec) {
5994         src_k = src_type->speculative_type_not_null();
5995         if (src_k != nullptr && src_k->is_array_klass()) {
5996           could_have_src = true;
5997         }
5998       }
5999       if (!dest_spec) {
6000         dest_k = dest_type->speculative_type_not_null();
6001         if (dest_k != nullptr && dest_k->is_array_klass()) {
6002           could_have_dest = true;
6003         }
6004       }
6005       if (could_have_src && could_have_dest) {
6006         // If we can have both exact types, emit the missing guards
6007         if (could_have_src && !src_spec) {
6008           src = maybe_cast_profiled_obj(src, src_k, true);


6009         }
6010         if (could_have_dest && !dest_spec) {
6011           dest = maybe_cast_profiled_obj(dest, dest_k, true);


6012         }
6013       }
6014     }
6015   }
6016 
6017   ciMethod* trap_method = method();
6018   int trap_bci = bci();
6019   if (saved_jvms_before_guards != nullptr) {
6020     trap_method = alloc->jvms()->method();
6021     trap_bci = alloc->jvms()->bci();
6022   }
6023 
6024   bool negative_length_guard_generated = false;
6025 
6026   if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
6027       can_emit_guards &&
6028       !src->is_top() && !dest->is_top()) {
6029     // validate arguments: enables transformation the ArrayCopyNode
6030     validated = true;
6031 
6032     RegionNode* slow_region = new RegionNode(1);
6033     record_for_igvn(slow_region);
6034 
6035     // (1) src and dest are arrays.
6036     generate_non_array_guard(load_object_klass(src), slow_region, &src);
6037     generate_non_array_guard(load_object_klass(dest), slow_region, &dest);
6038 
6039     // (2) src and dest arrays must have elements of the same BasicType
6040     // done at macro expansion or at Ideal transformation time
6041 
6042     // (4) src_offset must not be negative.
6043     generate_negative_guard(src_offset, slow_region);
6044 
6045     // (5) dest_offset must not be negative.
6046     generate_negative_guard(dest_offset, slow_region);
6047 
6048     // (7) src_offset + length must not exceed length of src.
6049     generate_limit_guard(src_offset, length,
6050                          load_array_length(src),
6051                          slow_region);
6052 
6053     // (8) dest_offset + length must not exceed length of dest.
6054     generate_limit_guard(dest_offset, length,
6055                          load_array_length(dest),
6056                          slow_region);
6057 
6058     // (6) length must not be negative.
6059     // This is also checked in generate_arraycopy() during macro expansion, but
6060     // we also have to check it here for the case where the ArrayCopyNode will
6061     // be eliminated by Escape Analysis.
6062     if (EliminateAllocations) {
6063       generate_negative_guard(length, slow_region);
6064       negative_length_guard_generated = true;
6065     }
6066 
6067     // (9) each element of an oop array must be assignable
6068     Node* dest_klass = load_object_klass(dest);

6069     if (src != dest) {

6070       Node* not_subtype_ctrl = gen_subtype_check(src, dest_klass);
6071 
6072       if (not_subtype_ctrl != top()) {
6073         PreserveJVMState pjvms(this);
6074         set_control(not_subtype_ctrl);
6075         uncommon_trap(Deoptimization::Reason_intrinsic,
6076                       Deoptimization::Action_make_not_entrant);
6077         assert(stopped(), "Should be stopped");
6078       }
6079     }
























6080     {
6081       PreserveJVMState pjvms(this);
6082       set_control(_gvn.transform(slow_region));
6083       uncommon_trap(Deoptimization::Reason_intrinsic,
6084                     Deoptimization::Action_make_not_entrant);
6085       assert(stopped(), "Should be stopped");
6086     }
6087 
6088     const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr();
6089     const Type *toop = dest_klass_t->cast_to_exactness(false)->as_instance_type();








6090     src = _gvn.transform(new CheckCastPPNode(control(), src, toop));
6091     arraycopy_move_allocation_here(alloc, dest, saved_jvms_before_guards, saved_reexecute_sp, new_idx);
6092   }
6093 
6094   if (stopped()) {
6095     return true;
6096   }
6097 



6098   ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != nullptr, negative_length_guard_generated,
6099                                           // Create LoadRange and LoadKlass nodes for use during macro expansion here
6100                                           // so the compiler has a chance to eliminate them: during macro expansion,
6101                                           // we have to set their control (CastPP nodes are eliminated).
6102                                           load_object_klass(src), load_object_klass(dest),
6103                                           load_array_length(src), load_array_length(dest));
6104 
6105   ac->set_arraycopy(validated);
6106 
6107   Node* n = _gvn.transform(ac);
6108   if (n == ac) {
6109     ac->connect_outputs(this);
6110   } else {
6111     assert(validated, "shouldn't transform if all arguments not validated");
6112     set_all_memory(n);
6113   }
6114   clear_upper_avx();
6115 
6116 
6117   return true;
6118 }
6119 
6120 
6121 // Helper function which determines if an arraycopy immediately follows
6122 // an allocation, with no intervening tests or other escapes for the object.

   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "asm/macroAssembler.hpp"
  26 #include "ci/ciArrayKlass.hpp"
  27 #include "ci/ciFlatArrayKlass.hpp"
  28 #include "ci/ciInstanceKlass.hpp"
  29 #include "ci/ciSymbols.hpp"
  30 #include "ci/ciUtilities.inline.hpp"
  31 #include "classfile/vmIntrinsics.hpp"
  32 #include "compiler/compileBroker.hpp"
  33 #include "compiler/compileLog.hpp"
  34 #include "gc/shared/barrierSet.hpp"
  35 #include "gc/shared/c2/barrierSetC2.hpp"
  36 #include "jfr/support/jfrIntrinsics.hpp"
  37 #include "memory/resourceArea.hpp"
  38 #include "oops/accessDecorators.hpp"
  39 #include "oops/klass.inline.hpp"
  40 #include "oops/layoutKind.hpp"
  41 #include "oops/objArrayKlass.hpp"
  42 #include "opto/addnode.hpp"
  43 #include "opto/arraycopynode.hpp"
  44 #include "opto/c2compiler.hpp"
  45 #include "opto/castnode.hpp"
  46 #include "opto/cfgnode.hpp"
  47 #include "opto/convertnode.hpp"
  48 #include "opto/countbitsnode.hpp"
  49 #include "opto/graphKit.hpp"
  50 #include "opto/idealKit.hpp"
  51 #include "opto/inlinetypenode.hpp"
  52 #include "opto/library_call.hpp"
  53 #include "opto/mathexactnode.hpp"
  54 #include "opto/mulnode.hpp"
  55 #include "opto/narrowptrnode.hpp"
  56 #include "opto/opaquenode.hpp"
  57 #include "opto/opcodes.hpp"
  58 #include "opto/parse.hpp"
  59 #include "opto/rootnode.hpp"
  60 #include "opto/runtime.hpp"
  61 #include "opto/subnode.hpp"
  62 #include "opto/type.hpp"
  63 #include "opto/vectornode.hpp"
  64 #include "prims/jvmtiExport.hpp"
  65 #include "prims/jvmtiThreadState.hpp"
  66 #include "prims/unsafe.hpp"
  67 #include "runtime/globals.hpp"
  68 #include "runtime/jniHandles.inline.hpp"
  69 #include "runtime/mountUnmountDisabler.hpp"
  70 #include "runtime/objectMonitor.hpp"
  71 #include "runtime/sharedRuntime.hpp"
  72 #include "runtime/stubRoutines.hpp"
  73 #include "utilities/globalDefinitions.hpp"
  74 #include "utilities/macros.hpp"
  75 #include "utilities/powerOfTwo.hpp"
  76 
  77 //---------------------------make_vm_intrinsic----------------------------
  78 CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
  79   vmIntrinsicID id = m->intrinsic_id();
  80   assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
  81 
  82   if (!m->is_loaded()) {
  83     // Do not attempt to inline unloaded methods.
  84     return nullptr;
  85   }
  86 
  87   C2Compiler* compiler = (C2Compiler*)CompileBroker::compiler(CompLevel_full_optimization);
  88   bool is_available = false;
  89 
  90   {
  91     // For calling is_intrinsic_supported and is_intrinsic_disabled_by_flag
  92     // the compiler must transition to '_thread_in_vm' state because both
  93     // methods access VM-internal data.

 404   case vmIntrinsics::_getReferenceOpaque:       return inline_unsafe_access(!is_store, T_OBJECT,   Opaque, false);
 405   case vmIntrinsics::_getBooleanOpaque:         return inline_unsafe_access(!is_store, T_BOOLEAN,  Opaque, false);
 406   case vmIntrinsics::_getByteOpaque:            return inline_unsafe_access(!is_store, T_BYTE,     Opaque, false);
 407   case vmIntrinsics::_getShortOpaque:           return inline_unsafe_access(!is_store, T_SHORT,    Opaque, false);
 408   case vmIntrinsics::_getCharOpaque:            return inline_unsafe_access(!is_store, T_CHAR,     Opaque, false);
 409   case vmIntrinsics::_getIntOpaque:             return inline_unsafe_access(!is_store, T_INT,      Opaque, false);
 410   case vmIntrinsics::_getLongOpaque:            return inline_unsafe_access(!is_store, T_LONG,     Opaque, false);
 411   case vmIntrinsics::_getFloatOpaque:           return inline_unsafe_access(!is_store, T_FLOAT,    Opaque, false);
 412   case vmIntrinsics::_getDoubleOpaque:          return inline_unsafe_access(!is_store, T_DOUBLE,   Opaque, false);
 413 
 414   case vmIntrinsics::_putReferenceOpaque:       return inline_unsafe_access( is_store, T_OBJECT,   Opaque, false);
 415   case vmIntrinsics::_putBooleanOpaque:         return inline_unsafe_access( is_store, T_BOOLEAN,  Opaque, false);
 416   case vmIntrinsics::_putByteOpaque:            return inline_unsafe_access( is_store, T_BYTE,     Opaque, false);
 417   case vmIntrinsics::_putShortOpaque:           return inline_unsafe_access( is_store, T_SHORT,    Opaque, false);
 418   case vmIntrinsics::_putCharOpaque:            return inline_unsafe_access( is_store, T_CHAR,     Opaque, false);
 419   case vmIntrinsics::_putIntOpaque:             return inline_unsafe_access( is_store, T_INT,      Opaque, false);
 420   case vmIntrinsics::_putLongOpaque:            return inline_unsafe_access( is_store, T_LONG,     Opaque, false);
 421   case vmIntrinsics::_putFloatOpaque:           return inline_unsafe_access( is_store, T_FLOAT,    Opaque, false);
 422   case vmIntrinsics::_putDoubleOpaque:          return inline_unsafe_access( is_store, T_DOUBLE,   Opaque, false);
 423 
 424   case vmIntrinsics::_getFlatValue:             return inline_unsafe_flat_access(!is_store, Relaxed);
 425   case vmIntrinsics::_putFlatValue:             return inline_unsafe_flat_access( is_store, Relaxed);
 426 
 427   case vmIntrinsics::_compareAndSetReference:   return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap,      Volatile);
 428   case vmIntrinsics::_compareAndSetByte:        return inline_unsafe_load_store(T_BYTE,   LS_cmp_swap,      Volatile);
 429   case vmIntrinsics::_compareAndSetShort:       return inline_unsafe_load_store(T_SHORT,  LS_cmp_swap,      Volatile);
 430   case vmIntrinsics::_compareAndSetInt:         return inline_unsafe_load_store(T_INT,    LS_cmp_swap,      Volatile);
 431   case vmIntrinsics::_compareAndSetLong:        return inline_unsafe_load_store(T_LONG,   LS_cmp_swap,      Volatile);
 432 
 433   case vmIntrinsics::_weakCompareAndSetReferencePlain:     return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Relaxed);
 434   case vmIntrinsics::_weakCompareAndSetReferenceAcquire:   return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Acquire);
 435   case vmIntrinsics::_weakCompareAndSetReferenceRelease:   return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Release);
 436   case vmIntrinsics::_weakCompareAndSetReference:          return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Volatile);
 437   case vmIntrinsics::_weakCompareAndSetBytePlain:          return inline_unsafe_load_store(T_BYTE,   LS_cmp_swap_weak, Relaxed);
 438   case vmIntrinsics::_weakCompareAndSetByteAcquire:        return inline_unsafe_load_store(T_BYTE,   LS_cmp_swap_weak, Acquire);
 439   case vmIntrinsics::_weakCompareAndSetByteRelease:        return inline_unsafe_load_store(T_BYTE,   LS_cmp_swap_weak, Release);
 440   case vmIntrinsics::_weakCompareAndSetByte:               return inline_unsafe_load_store(T_BYTE,   LS_cmp_swap_weak, Volatile);
 441   case vmIntrinsics::_weakCompareAndSetShortPlain:         return inline_unsafe_load_store(T_SHORT,  LS_cmp_swap_weak, Relaxed);
 442   case vmIntrinsics::_weakCompareAndSetShortAcquire:       return inline_unsafe_load_store(T_SHORT,  LS_cmp_swap_weak, Acquire);
 443   case vmIntrinsics::_weakCompareAndSetShortRelease:       return inline_unsafe_load_store(T_SHORT,  LS_cmp_swap_weak, Release);
 444   case vmIntrinsics::_weakCompareAndSetShort:              return inline_unsafe_load_store(T_SHORT,  LS_cmp_swap_weak, Volatile);
 445   case vmIntrinsics::_weakCompareAndSetIntPlain:           return inline_unsafe_load_store(T_INT,    LS_cmp_swap_weak, Relaxed);
 446   case vmIntrinsics::_weakCompareAndSetIntAcquire:         return inline_unsafe_load_store(T_INT,    LS_cmp_swap_weak, Acquire);

 466   case vmIntrinsics::_compareAndExchangeLong:              return inline_unsafe_load_store(T_LONG,   LS_cmp_exchange,  Volatile);
 467   case vmIntrinsics::_compareAndExchangeLongAcquire:       return inline_unsafe_load_store(T_LONG,   LS_cmp_exchange,  Acquire);
 468   case vmIntrinsics::_compareAndExchangeLongRelease:       return inline_unsafe_load_store(T_LONG,   LS_cmp_exchange,  Release);
 469 
 470   case vmIntrinsics::_getAndAddByte:                    return inline_unsafe_load_store(T_BYTE,   LS_get_add,       Volatile);
 471   case vmIntrinsics::_getAndAddShort:                   return inline_unsafe_load_store(T_SHORT,  LS_get_add,       Volatile);
 472   case vmIntrinsics::_getAndAddInt:                     return inline_unsafe_load_store(T_INT,    LS_get_add,       Volatile);
 473   case vmIntrinsics::_getAndAddLong:                    return inline_unsafe_load_store(T_LONG,   LS_get_add,       Volatile);
 474 
 475   case vmIntrinsics::_getAndSetByte:                    return inline_unsafe_load_store(T_BYTE,   LS_get_set,       Volatile);
 476   case vmIntrinsics::_getAndSetShort:                   return inline_unsafe_load_store(T_SHORT,  LS_get_set,       Volatile);
 477   case vmIntrinsics::_getAndSetInt:                     return inline_unsafe_load_store(T_INT,    LS_get_set,       Volatile);
 478   case vmIntrinsics::_getAndSetLong:                    return inline_unsafe_load_store(T_LONG,   LS_get_set,       Volatile);
 479   case vmIntrinsics::_getAndSetReference:               return inline_unsafe_load_store(T_OBJECT, LS_get_set,       Volatile);
 480 
 481   case vmIntrinsics::_loadFence:
 482   case vmIntrinsics::_storeFence:
 483   case vmIntrinsics::_storeStoreFence:
 484   case vmIntrinsics::_fullFence:                return inline_unsafe_fence(intrinsic_id());
 485 
 486   case vmIntrinsics::_arrayInstanceBaseOffset:  return inline_arrayInstanceBaseOffset();
 487   case vmIntrinsics::_arrayInstanceIndexScale:  return inline_arrayInstanceIndexScale();
 488   case vmIntrinsics::_arrayLayout:              return inline_arrayLayout();
 489   case vmIntrinsics::_getFieldMap:              return inline_getFieldMap();
 490 
 491   case vmIntrinsics::_onSpinWait:               return inline_onspinwait();
 492 
 493   case vmIntrinsics::_currentCarrierThread:     return inline_native_currentCarrierThread();
 494   case vmIntrinsics::_currentThread:            return inline_native_currentThread();
 495   case vmIntrinsics::_setCurrentThread:         return inline_native_setCurrentThread();
 496 
 497   case vmIntrinsics::_scopedValueCache:          return inline_native_scopedValueCache();
 498   case vmIntrinsics::_setScopedValueCache:       return inline_native_setScopedValueCache();
 499 
 500   case vmIntrinsics::_Continuation_pin:          return inline_native_Continuation_pinning(false);
 501   case vmIntrinsics::_Continuation_unpin:        return inline_native_Continuation_pinning(true);
 502 
 503   case vmIntrinsics::_vthreadEndFirstTransition:    return inline_native_vthread_end_transition(CAST_FROM_FN_PTR(address, OptoRuntime::vthread_end_first_transition_Java()),
 504                                                                                                 "endFirstTransition", true);
 505   case vmIntrinsics::_vthreadStartFinalTransition:  return inline_native_vthread_start_transition(CAST_FROM_FN_PTR(address, OptoRuntime::vthread_start_final_transition_Java()),
 506                                                                                                   "startFinalTransition", true);
 507   case vmIntrinsics::_vthreadStartTransition:       return inline_native_vthread_start_transition(CAST_FROM_FN_PTR(address, OptoRuntime::vthread_start_transition_Java()),
 508                                                                                                   "startTransition", false);
 509   case vmIntrinsics::_vthreadEndTransition:         return inline_native_vthread_end_transition(CAST_FROM_FN_PTR(address, OptoRuntime::vthread_end_transition_Java()),
 510                                                                                                 "endTransition", false);

 519 #endif
 520   case vmIntrinsics::_currentTimeMillis:        return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
 521   case vmIntrinsics::_nanoTime:                 return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
 522   case vmIntrinsics::_writeback0:               return inline_unsafe_writeback0();
 523   case vmIntrinsics::_writebackPreSync0:        return inline_unsafe_writebackSync0(true);
 524   case vmIntrinsics::_writebackPostSync0:       return inline_unsafe_writebackSync0(false);
 525   case vmIntrinsics::_allocateInstance:         return inline_unsafe_allocate();
 526   case vmIntrinsics::_copyMemory:               return inline_unsafe_copyMemory();
 527   case vmIntrinsics::_setMemory:                return inline_unsafe_setMemory();
 528   case vmIntrinsics::_getLength:                return inline_native_getLength();
 529   case vmIntrinsics::_copyOf:                   return inline_array_copyOf(false);
 530   case vmIntrinsics::_copyOfRange:              return inline_array_copyOf(true);
 531   case vmIntrinsics::_equalsB:                  return inline_array_equals(StrIntrinsicNode::LL);
 532   case vmIntrinsics::_equalsC:                  return inline_array_equals(StrIntrinsicNode::UU);
 533   case vmIntrinsics::_Preconditions_checkIndex: return inline_preconditions_checkIndex(T_INT);
 534   case vmIntrinsics::_Preconditions_checkLongIndex: return inline_preconditions_checkIndex(T_LONG);
 535   case vmIntrinsics::_clone:                    return inline_native_clone(intrinsic()->is_virtual());
 536 
 537   case vmIntrinsics::_allocateUninitializedArray: return inline_unsafe_newArray(true);
 538   case vmIntrinsics::_newArray:                   return inline_unsafe_newArray(false);
 539   case vmIntrinsics::_newNullRestrictedNonAtomicArray: return inline_newArray(/* null_free */ true, /* atomic */ false);
 540   case vmIntrinsics::_newNullRestrictedAtomicArray: return inline_newArray(/* null_free */ true, /* atomic */ true);
 541   case vmIntrinsics::_newNullableAtomicArray:     return inline_newArray(/* null_free */ false, /* atomic */ true);
 542   case vmIntrinsics::_isFlatArray:              return inline_getArrayProperties(IsFlat);
 543   case vmIntrinsics::_isNullRestrictedArray:    return inline_getArrayProperties(IsNullRestricted);
 544   case vmIntrinsics::_isAtomicArray:            return inline_getArrayProperties(IsAtomic);
 545 
 546   case vmIntrinsics::_isAssignableFrom:         return inline_native_subtype_check();
 547 
 548   case vmIntrinsics::_isInstance:
 549   case vmIntrinsics::_isHidden:
 550   case vmIntrinsics::_getSuperclass:            return inline_native_Class_query(intrinsic_id());
 551 
 552   case vmIntrinsics::_floatToRawIntBits:
 553   case vmIntrinsics::_floatToIntBits:
 554   case vmIntrinsics::_intBitsToFloat:
 555   case vmIntrinsics::_doubleToRawLongBits:
 556   case vmIntrinsics::_doubleToLongBits:
 557   case vmIntrinsics::_longBitsToDouble:
 558   case vmIntrinsics::_floatToFloat16:
 559   case vmIntrinsics::_float16ToFloat:           return inline_fp_conversions(intrinsic_id());
 560   case vmIntrinsics::_sqrt_float16:             return inline_fp16_operations(intrinsic_id(), 1);
 561   case vmIntrinsics::_fma_float16:              return inline_fp16_operations(intrinsic_id(), 3);
 562   case vmIntrinsics::_floatIsFinite:
 563   case vmIntrinsics::_floatIsInfinite:
 564   case vmIntrinsics::_doubleIsFinite:

2347     case vmIntrinsics::_remainderUnsigned_l: {
2348       zero_check_long(argument(2));
2349       // Compile-time detect of null-exception
2350       if (stopped()) {
2351         return true; // keep the graph constructed so far
2352       }
2353       n = new UModLNode(control(), argument(0), argument(2));
2354       break;
2355     }
2356     default:  fatal_unexpected_iid(id);  break;
2357   }
2358   set_result(_gvn.transform(n));
2359   return true;
2360 }
2361 
2362 //----------------------------inline_unsafe_access----------------------------
2363 
2364 const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type) {
2365   // Attempt to infer a sharper value type from the offset and base type.
2366   ciKlass* sharpened_klass = nullptr;
2367   bool null_free = false;
2368 
2369   // See if it is an instance field, with an object type.
2370   if (alias_type->field() != nullptr) {
2371     if (alias_type->field()->type()->is_klass()) {
2372       sharpened_klass = alias_type->field()->type()->as_klass();
2373       null_free = alias_type->field()->is_null_free();
2374     }
2375   }
2376 
2377   const TypeOopPtr* result = nullptr;
2378   // See if it is a narrow oop array.
2379   if (adr_type->isa_aryptr()) {
2380     if (adr_type->offset() >= refArrayOopDesc::base_offset_in_bytes()) {
2381       const TypeOopPtr* elem_type = adr_type->is_aryptr()->elem()->make_oopptr();
2382       null_free = adr_type->is_aryptr()->is_null_free();
2383       if (elem_type != nullptr && elem_type->is_loaded()) {
2384         // Sharpen the value type.
2385         result = elem_type;
2386       }
2387     }
2388   }
2389 
2390   // The sharpened class might be unloaded if there is no class loader
2391   // contraint in place.
2392   if (result == nullptr && sharpened_klass != nullptr && sharpened_klass->is_loaded()) {
2393     // Sharpen the value type.
2394     result = TypeOopPtr::make_from_klass(sharpened_klass);
2395     if (null_free) {
2396       result = result->join_speculative(TypePtr::NOTNULL)->is_oopptr();
2397     }
2398   }
2399   if (result != nullptr) {
2400 #ifndef PRODUCT
2401     if (C->print_intrinsics() || C->print_inlining()) {
2402       tty->print("  from base type:  ");  adr_type->dump(); tty->cr();
2403       tty->print("  sharpened value: ");  result->dump();    tty->cr();
2404     }
2405 #endif
2406   }
2407   return result;
2408 }
2409 
2410 DecoratorSet LibraryCallKit::mo_decorator_for_access_kind(AccessKind kind) {
2411   switch (kind) {
2412       case Relaxed:
2413         return MO_UNORDERED;
2414       case Opaque:
2415         return MO_RELAXED;
2416       case Acquire:
2417         return MO_ACQUIRE;

2506 #endif // ASSERT
2507  }
2508 #endif //PRODUCT
2509 
2510   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
2511 
2512   Node* receiver = argument(0);  // type: oop
2513 
2514   // Build address expression.
2515   Node* heap_base_oop = top();
2516 
2517   // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2518   Node* base = argument(1);  // type: oop
2519   // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2520   Node* offset = argument(2);  // type: long
2521   // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2522   // to be plain byte offsets, which are also the same as those accepted
2523   // by oopDesc::field_addr.
2524   assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2525          "fieldOffset must be byte-scaled");
2526 
2527   if (base->is_InlineType()) {
2528     assert(!is_store, "InlineTypeNodes are non-larval value objects");
2529     InlineTypeNode* vt = base->as_InlineType();
2530     if (offset->is_Con()) {
2531       long off = find_long_con(offset, 0);
2532       ciInlineKlass* vk = vt->type()->inline_klass();
2533       if ((long)(int)off != off || !vk->contains_field_offset(off)) {
2534         return false;
2535       }
2536 
2537       ciField* field = vk->get_non_flat_field_by_offset(off);
2538       if (field != nullptr) {
2539         BasicType bt = type2field[field->type()->basic_type()];
2540         if (bt == T_ARRAY || bt == T_NARROWOOP) {
2541           bt = T_OBJECT;
2542         }
2543         if (bt == type && !field->is_flat()) {
2544           Node* value = vt->field_value_by_offset(off, false);
2545           const Type* value_type = _gvn.type(value);
2546           if (value->is_InlineType()) {
2547             value = value->as_InlineType()->adjust_scalarization_depth(this);
2548           } else if (value_type->is_inlinetypeptr()) {
2549             value = InlineTypeNode::make_from_oop(this, value, value_type->inline_klass());
2550           }
2551           set_result(value);
2552           return true;
2553         }
2554       }
2555     }
2556     {
2557       // Re-execute the unsafe access if allocation triggers deoptimization.
2558       PreserveReexecuteState preexecs(this);
2559       jvms()->set_should_reexecute(true);
2560       vt = vt->buffer(this);
2561     }
2562     base = vt->get_oop();
2563   }
2564 
2565   // 32-bit machines ignore the high half!
2566   offset = ConvL2X(offset);
2567 
2568   // Save state and restore on bailout
2569   SavedState old_state(this);
2570 
2571   Node* adr = make_unsafe_address(base, offset, type, kind == Relaxed);
2572   assert(!stopped(), "Inlining of unsafe access failed: address construction stopped unexpectedly");
2573 
2574   if (_gvn.type(base->uncast())->isa_ptr() == TypePtr::NULL_PTR) {
2575     if (type != T_OBJECT) {
2576       decorators |= IN_NATIVE; // off-heap primitive access
2577     } else {
2578       return false; // off-heap oop accesses are not supported
2579     }
2580   } else {
2581     heap_base_oop = base; // on-heap or mixed access
2582   }
2583 
2584   // Can base be null? Otherwise, always on-heap access.

2588     decorators |= IN_HEAP;
2589   }
2590 
2591   Node* val = is_store ? argument(4) : nullptr;
2592 
2593   const TypePtr* adr_type = _gvn.type(adr)->isa_ptr();
2594   if (adr_type == TypePtr::NULL_PTR) {
2595     return false; // off-heap access with zero address
2596   }
2597 
2598   // Try to categorize the address.
2599   Compile::AliasType* alias_type = C->alias_type(adr_type);
2600   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2601 
2602   if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2603       alias_type->adr_type() == TypeAryPtr::RANGE) {
2604     return false; // not supported
2605   }
2606 
2607   bool mismatched = false;
2608   BasicType bt = T_ILLEGAL;
2609   ciField* field = nullptr;
2610   if (adr_type->isa_instptr()) {
2611     const TypeInstPtr* instptr = adr_type->is_instptr();
2612     ciInstanceKlass* k = instptr->instance_klass();
2613     int off = instptr->offset();
2614     if (instptr->const_oop() != nullptr &&
2615         k == ciEnv::current()->Class_klass() &&
2616         instptr->offset() >= (k->size_helper() * wordSize)) {
2617       k = instptr->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
2618       field = k->get_field_by_offset(off, true);
2619     } else {
2620       field = k->get_non_flat_field_by_offset(off);
2621     }
2622     if (field != nullptr) {
2623       bt = type2field[field->type()->basic_type()];
2624     }
2625     if (bt != alias_type->basic_type()) {
2626       // Type mismatch. Is it an access to a nested flat field?
2627       field = k->get_field_by_offset(off, false);
2628       if (field != nullptr) {
2629         bt = type2field[field->type()->basic_type()];
2630       }
2631     }
2632     assert(bt == alias_type->basic_type(), "should match");
2633   } else {
2634     bt = alias_type->basic_type();
2635   }
2636 
2637   if (bt != T_ILLEGAL) {
2638     assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
2639     if (bt == T_BYTE && adr_type->isa_aryptr()) {
2640       // Alias type doesn't differentiate between byte[] and boolean[]).
2641       // Use address type to get the element type.
2642       bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2643     }
2644     if (is_reference_type(bt, true)) {
2645       // accessing an array field with getReference is not a mismatch
2646       bt = T_OBJECT;
2647     }
2648     if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2649       // Don't intrinsify mismatched object accesses
2650       return false;
2651     }
2652     mismatched = (bt != type);
2653   } else if (alias_type->adr_type()->isa_oopptr()) {
2654     mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
2655   }
2656 
2657   old_state.discard();
2658   assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2659 
2660   if (mismatched) {
2661     decorators |= C2_MISMATCHED;
2662   }
2663 
2664   // First guess at the value type.
2665   const Type *value_type = Type::get_const_basic_type(type);
2666 
2667   // Figure out the memory ordering.
2668   decorators |= mo_decorator_for_access_kind(kind);
2669 
2670   if (!is_store) {
2671     if (type == T_OBJECT) {
2672       const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2673       if (tjp != nullptr) {
2674         value_type = tjp;
2675       }
2676     }
2677   }
2678 
2679   receiver = null_check(receiver);
2680   if (stopped()) {
2681     return true;
2682   }
2683   // Heap pointers get a null-check from the interpreter,
2684   // as a courtesy.  However, this is not guaranteed by Unsafe,
2685   // and it is not possible to fully distinguish unintended nulls
2686   // from intended ones in this API.
2687 
2688   if (!is_store) {
2689     Node* p = nullptr;
2690     // Try to constant fold a load from a constant field
2691 
2692     if (heap_base_oop != top() && field != nullptr && field->is_constant() && !field->is_flat() && !mismatched) {
2693       // final or stable field
2694       p = make_constant_from_field(field, heap_base_oop);
2695     }
2696 
2697     if (p == nullptr) { // Could not constant fold the load
2698       p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);
2699       const TypeOopPtr* ptr = value_type->make_oopptr();
2700       if (ptr != nullptr && ptr->is_inlinetypeptr()) {
2701         // Load a non-flattened inline type from memory
2702         p = InlineTypeNode::make_from_oop(this, p, ptr->inline_klass());
2703       }
2704       // Normalize the value returned by getBoolean in the following cases
2705       if (type == T_BOOLEAN &&
2706           (mismatched ||
2707            heap_base_oop == top() ||                  // - heap_base_oop is null or
2708            (can_access_non_heap && field == nullptr)) // - heap_base_oop is potentially null
2709                                                       //   and the unsafe access is made to large offset
2710                                                       //   (i.e., larger than the maximum offset necessary for any
2711                                                       //   field access)
2712             ) {
2713           IdealKit ideal = IdealKit(this);
2714 #define __ ideal.
2715           IdealVariable normalized_result(ideal);
2716           __ declarations_done();
2717           __ set(normalized_result, p);
2718           __ if_then(p, BoolTest::ne, ideal.ConI(0));
2719           __ set(normalized_result, ideal.ConI(1));
2720           ideal.end_if();
2721           final_sync(ideal);
2722           p = __ value(normalized_result);
2723 #undef __

2727       p = gvn().transform(new CastP2XNode(nullptr, p));
2728       p = ConvX2UL(p);
2729     }
2730     // The load node has the control of the preceding MemBarCPUOrder.  All
2731     // following nodes will have the control of the MemBarCPUOrder inserted at
2732     // the end of this method.  So, pushing the load onto the stack at a later
2733     // point is fine.
2734     set_result(p);
2735   } else {
2736     if (bt == T_ADDRESS) {
2737       // Repackage the long as a pointer.
2738       val = ConvL2X(val);
2739       val = gvn().transform(new CastX2PNode(val));
2740     }
2741     access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators);
2742   }
2743 
2744   return true;
2745 }
2746 
2747 bool LibraryCallKit::inline_unsafe_flat_access(bool is_store, AccessKind kind) {
2748 #ifdef ASSERT
2749   {
2750     ResourceMark rm;
2751     // Check the signatures.
2752     ciSignature* sig = callee()->signature();
2753     assert(sig->type_at(0)->basic_type() == T_OBJECT, "base should be object, but is %s", type2name(sig->type_at(0)->basic_type()));
2754     assert(sig->type_at(1)->basic_type() == T_LONG, "offset should be long, but is %s", type2name(sig->type_at(1)->basic_type()));
2755     assert(sig->type_at(2)->basic_type() == T_INT, "layout kind should be int, but is %s", type2name(sig->type_at(3)->basic_type()));
2756     assert(sig->type_at(3)->basic_type() == T_OBJECT, "value klass should be object, but is %s", type2name(sig->type_at(4)->basic_type()));
2757     if (is_store) {
2758       assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value, but returns %s", type2name(sig->return_type()->basic_type()));
2759       assert(sig->count() == 5, "flat putter should have 5 arguments, but has %d", sig->count());
2760       assert(sig->type_at(4)->basic_type() == T_OBJECT, "put value should be object, but is %s", type2name(sig->type_at(5)->basic_type()));
2761     } else {
2762       assert(sig->return_type()->basic_type() == T_OBJECT, "getter must return an object, but returns %s", type2name(sig->return_type()->basic_type()));
2763       assert(sig->count() == 4, "flat getter should have 4 arguments, but has %d", sig->count());
2764     }
2765  }
2766 #endif // ASSERT
2767 
2768   assert(kind == Relaxed, "Only plain accesses for now");
2769   if (callee()->is_static()) {
2770     // caller must have the capability!
2771     return false;
2772   }
2773   C->set_has_unsafe_access(true);
2774 
2775   const TypeInstPtr* value_klass_node = _gvn.type(argument(5))->isa_instptr();
2776   if (value_klass_node == nullptr || value_klass_node->const_oop() == nullptr) {
2777     // parameter valueType is not a constant
2778     return false;
2779   }
2780   ciType* mirror_type = value_klass_node->const_oop()->as_instance()->java_mirror_type();
2781   if (!mirror_type->is_inlinetype()) {
2782     // Dead code
2783     return false;
2784   }
2785   ciInlineKlass* value_klass = mirror_type->as_inline_klass();
2786 
2787   const TypeInt* layout_type = _gvn.type(argument(4))->isa_int();
2788   if (layout_type == nullptr || !layout_type->is_con()) {
2789     // parameter layoutKind is not a constant
2790     return false;
2791   }
2792   assert(layout_type->get_con() >= static_cast<int>(LayoutKind::REFERENCE) &&
2793          layout_type->get_con() < static_cast<int>(LayoutKind::UNKNOWN),
2794          "invalid layoutKind %d", layout_type->get_con());
2795   LayoutKind layout = static_cast<LayoutKind>(layout_type->get_con());
2796   assert(layout == LayoutKind::REFERENCE || layout == LayoutKind::NULL_FREE_NON_ATOMIC_FLAT ||
2797          layout == LayoutKind::NULL_FREE_ATOMIC_FLAT || layout == LayoutKind::NULLABLE_ATOMIC_FLAT,
2798          "unexpected layoutKind %d", layout_type->get_con());
2799 
2800   null_check(argument(0));
2801   if (stopped()) {
2802     return true;
2803   }
2804 
2805   Node* base = must_be_not_null(argument(1), true);
2806   Node* offset = argument(2);
2807   const Type* base_type = _gvn.type(base);
2808 
2809   Node* ptr;
2810   bool immutable_memory = false;
2811   DecoratorSet decorators = C2_UNSAFE_ACCESS | IN_HEAP | MO_UNORDERED;
2812   if (base_type->isa_instptr()) {
2813     const TypeLong* offset_type = _gvn.type(offset)->isa_long();
2814     if (offset_type == nullptr || !offset_type->is_con()) {
2815       // Offset into a non-array should be a constant
2816       decorators |= C2_MISMATCHED;
2817     } else {
2818       int offset_con = checked_cast<int>(offset_type->get_con());
2819       ciInstanceKlass* base_klass = base_type->is_instptr()->instance_klass();
2820       ciField* field = base_klass->get_non_flat_field_by_offset(offset_con);
2821       if (field == nullptr) {
2822         assert(!base_klass->is_final(), "non-existence field at offset %d of class %s", offset_con, base_klass->name()->as_utf8());
2823         decorators |= C2_MISMATCHED;
2824       } else {
2825         assert(field->type() == value_klass, "field at offset %d of %s is of type %s, but valueType is %s",
2826                offset_con, base_klass->name()->as_utf8(), field->type()->name(), value_klass->name()->as_utf8());
2827         immutable_memory = field->is_strict() && field->is_final();
2828 
2829         if (base->is_InlineType()) {
2830           assert(!is_store, "Cannot store into a non-larval value object");
2831           set_result(base->as_InlineType()->field_value_by_offset(offset_con, false));
2832           return true;
2833         }
2834       }
2835     }
2836 
2837     if (base->is_InlineType()) {
2838       assert(!is_store, "Cannot store into a non-larval value object");
2839       base = base->as_InlineType()->buffer(this, true);
2840     }
2841     ptr = basic_plus_adr(base, ConvL2X(offset));
2842   } else if (base_type->isa_aryptr()) {
2843     decorators |= IS_ARRAY;
2844     if (layout == LayoutKind::REFERENCE) {
2845       if (!base_type->is_aryptr()->is_not_flat()) {
2846         const TypeAryPtr* array_type = base_type->is_aryptr()->cast_to_not_flat();
2847         Node* new_base = _gvn.transform(new CastPPNode(control(), base, array_type, ConstraintCastNode::DependencyType::NonFloatingNarrowing));
2848         replace_in_map(base, new_base);
2849         base = new_base;
2850       }
2851       ptr = basic_plus_adr(base, ConvL2X(offset));
2852     } else {
2853       if (UseArrayFlattening) {
2854         // Flat array must have an exact type
2855         bool is_null_free = !LayoutKindHelper::is_nullable_flat(layout);
2856         bool is_atomic = LayoutKindHelper::is_atomic_flat(layout);
2857         Node* new_base = cast_to_flat_array_exact(base, value_klass, is_null_free, is_atomic);
2858         replace_in_map(base, new_base);
2859         base = new_base;
2860         ptr = basic_plus_adr(base, ConvL2X(offset));
2861         const TypeAryPtr* ptr_type = _gvn.type(ptr)->is_aryptr();
2862         if (ptr_type->field_offset().get() != 0) {
2863           ptr = _gvn.transform(new CastPPNode(control(), ptr, ptr_type->with_field_offset(0), ConstraintCastNode::DependencyType::NonFloatingNarrowing));
2864         }
2865       } else {
2866         uncommon_trap(Deoptimization::Reason_intrinsic,
2867                       Deoptimization::Action_none);
2868         return true;
2869       }
2870     }
2871   } else {
2872     decorators |= C2_MISMATCHED;
2873     ptr = basic_plus_adr(base, ConvL2X(offset));
2874   }
2875 
2876   if (is_store) {
2877     Node* value = argument(6);
2878     const Type* value_type = _gvn.type(value);
2879     if (!value_type->is_inlinetypeptr()) {
2880       value_type = Type::get_const_type(value_klass)->filter_speculative(value_type);
2881       Node* new_value = _gvn.transform(new CastPPNode(control(), value, value_type, ConstraintCastNode::DependencyType::NonFloatingNarrowing));
2882       new_value = InlineTypeNode::make_from_oop(this, new_value, value_klass);
2883       replace_in_map(value, new_value);
2884       value = new_value;
2885     }
2886 
2887     assert(value_type->inline_klass() == value_klass, "value is of type %s while valueType is %s", value_type->inline_klass()->name()->as_utf8(), value_klass->name()->as_utf8());
2888     if (layout == LayoutKind::REFERENCE) {
2889       const TypePtr* ptr_type = (decorators & C2_MISMATCHED) != 0 ? TypeRawPtr::BOTTOM : _gvn.type(ptr)->is_ptr();
2890       access_store_at(base, ptr, ptr_type, value, value_type, T_OBJECT, decorators);
2891     } else {
2892       bool atomic = LayoutKindHelper::is_atomic_flat(layout);
2893       bool null_free = !LayoutKindHelper::is_nullable_flat(layout);
2894       value->as_InlineType()->store_flat(this, base, ptr, atomic, immutable_memory, null_free, decorators);
2895     }
2896 
2897     return true;
2898   } else {
2899     decorators |= (C2_CONTROL_DEPENDENT_LOAD | C2_UNKNOWN_CONTROL_LOAD);
2900     InlineTypeNode* result;
2901     if (layout == LayoutKind::REFERENCE) {
2902       const TypePtr* ptr_type = (decorators & C2_MISMATCHED) != 0 ? TypeRawPtr::BOTTOM : _gvn.type(ptr)->is_ptr();
2903       Node* oop = access_load_at(base, ptr, ptr_type, Type::get_const_type(value_klass), T_OBJECT, decorators);
2904       result = InlineTypeNode::make_from_oop(this, oop, value_klass);
2905     } else {
2906       bool atomic = LayoutKindHelper::is_atomic_flat(layout);
2907       bool null_free = !LayoutKindHelper::is_nullable_flat(layout);
2908       result = InlineTypeNode::make_from_flat(this, value_klass, base, ptr, atomic, immutable_memory, null_free, decorators);
2909     }
2910 
2911     set_result(result);
2912     return true;
2913   }
2914 }
2915 
2916 //----------------------------inline_unsafe_load_store----------------------------
2917 // This method serves a couple of different customers (depending on LoadStoreKind):
2918 //
2919 // LS_cmp_swap:
2920 //
2921 //   boolean compareAndSetReference(Object o, long offset, Object expected, Object x);
2922 //   boolean compareAndSetInt(   Object o, long offset, int    expected, int    x);
2923 //   boolean compareAndSetLong(  Object o, long offset, long   expected, long   x);
2924 //
2925 // LS_cmp_swap_weak:
2926 //
2927 //   boolean weakCompareAndSetReference(       Object o, long offset, Object expected, Object x);
2928 //   boolean weakCompareAndSetReferencePlain(  Object o, long offset, Object expected, Object x);
2929 //   boolean weakCompareAndSetReferenceAcquire(Object o, long offset, Object expected, Object x);
2930 //   boolean weakCompareAndSetReferenceRelease(Object o, long offset, Object expected, Object x);
2931 //
2932 //   boolean weakCompareAndSetInt(          Object o, long offset, int    expected, int    x);
2933 //   boolean weakCompareAndSetIntPlain(     Object o, long offset, int    expected, int    x);
2934 //   boolean weakCompareAndSetIntAcquire(   Object o, long offset, int    expected, int    x);
2935 //   boolean weakCompareAndSetIntRelease(   Object o, long offset, int    expected, int    x);

3098     }
3099     case LS_cmp_swap:
3100     case LS_cmp_swap_weak:
3101     case LS_get_add:
3102       break;
3103     default:
3104       ShouldNotReachHere();
3105   }
3106 
3107   // Null check receiver.
3108   receiver = null_check(receiver);
3109   if (stopped()) {
3110     return true;
3111   }
3112 
3113   int alias_idx = C->get_alias_index(adr_type);
3114 
3115   if (is_reference_type(type)) {
3116     decorators |= IN_HEAP | ON_UNKNOWN_OOP_REF;
3117 
3118     if (oldval != nullptr && oldval->is_InlineType()) {
3119       // Re-execute the unsafe access if allocation triggers deoptimization.
3120       PreserveReexecuteState preexecs(this);
3121       jvms()->set_should_reexecute(true);
3122       oldval = oldval->as_InlineType()->buffer(this)->get_oop();
3123     }
3124     if (newval != nullptr && newval->is_InlineType()) {
3125       // Re-execute the unsafe access if allocation triggers deoptimization.
3126       PreserveReexecuteState preexecs(this);
3127       jvms()->set_should_reexecute(true);
3128       newval = newval->as_InlineType()->buffer(this)->get_oop();
3129     }
3130 
3131     // Transformation of a value which could be null pointer (CastPP #null)
3132     // could be delayed during Parse (for example, in adjust_map_after_if()).
3133     // Execute transformation here to avoid barrier generation in such case.
3134     if (_gvn.type(newval) == TypePtr::NULL_PTR)
3135       newval = _gvn.makecon(TypePtr::NULL_PTR);
3136 
3137     if (oldval != nullptr && _gvn.type(oldval) == TypePtr::NULL_PTR) {
3138       // Refine the value to a null constant, when it is known to be null
3139       oldval = _gvn.makecon(TypePtr::NULL_PTR);
3140     }
3141   }
3142 
3143   Node* result = nullptr;
3144   switch (kind) {
3145     case LS_cmp_exchange: {
3146       result = access_atomic_cmpxchg_val_at(base, adr, adr_type, alias_idx,
3147                                             oldval, newval, value_type, type, decorators);
3148       break;
3149     }
3150     case LS_cmp_swap_weak:

3179   insert_mem_bar(Op_MemBarCPUOrder);
3180   switch(id) {
3181     case vmIntrinsics::_loadFence:
3182       insert_mem_bar(Op_LoadFence);
3183       return true;
3184     case vmIntrinsics::_storeFence:
3185       insert_mem_bar(Op_StoreFence);
3186       return true;
3187     case vmIntrinsics::_storeStoreFence:
3188       insert_mem_bar(Op_StoreStoreFence);
3189       return true;
3190     case vmIntrinsics::_fullFence:
3191       insert_mem_bar(Op_MemBarVolatile);
3192       return true;
3193     default:
3194       fatal_unexpected_iid(id);
3195       return false;
3196   }
3197 }
3198 
3199 // private native int arrayInstanceBaseOffset0(Object[] array);
3200 bool LibraryCallKit::inline_arrayInstanceBaseOffset() {
3201   Node* array = argument(1);
3202   Node* klass_node = load_object_klass(array);
3203 
3204   jint  layout_con = Klass::_lh_neutral_value;
3205   Node* layout_val = get_layout_helper(klass_node, layout_con);
3206   int   layout_is_con = (layout_val == nullptr);
3207 
3208   Node* header_size = nullptr;
3209   if (layout_is_con) {
3210     int hsize = Klass::layout_helper_header_size(layout_con);
3211     header_size = intcon(hsize);
3212   } else {
3213     Node* hss = intcon(Klass::_lh_header_size_shift);
3214     Node* hsm = intcon(Klass::_lh_header_size_mask);
3215     header_size = _gvn.transform(new URShiftINode(layout_val, hss));
3216     header_size = _gvn.transform(new AndINode(header_size, hsm));
3217   }
3218   set_result(header_size);
3219   return true;
3220 }
3221 
3222 // private native int arrayInstanceIndexScale0(Object[] array);
3223 bool LibraryCallKit::inline_arrayInstanceIndexScale() {
3224   Node* array = argument(1);
3225   Node* klass_node = load_object_klass(array);
3226 
3227   jint  layout_con = Klass::_lh_neutral_value;
3228   Node* layout_val = get_layout_helper(klass_node, layout_con);
3229   int   layout_is_con = (layout_val == nullptr);
3230 
3231   Node* element_size = nullptr;
3232   if (layout_is_con) {
3233     int log_element_size  = Klass::layout_helper_log2_element_size(layout_con);
3234     int elem_size = 1 << log_element_size;
3235     element_size = intcon(elem_size);
3236   } else {
3237     Node* ess = intcon(Klass::_lh_log2_element_size_shift);
3238     Node* esm = intcon(Klass::_lh_log2_element_size_mask);
3239     Node* log_element_size = _gvn.transform(new URShiftINode(layout_val, ess));
3240     log_element_size = _gvn.transform(new AndINode(log_element_size, esm));
3241     element_size = _gvn.transform(new LShiftINode(intcon(1), log_element_size));
3242   }
3243   set_result(element_size);
3244   return true;
3245 }
3246 
3247 // private native int arrayLayout0(Object[] array);
3248 bool LibraryCallKit::inline_arrayLayout() {
3249   RegionNode* region = new RegionNode(2);
3250   Node* phi = new PhiNode(region, TypeInt::POS);
3251 
3252   Node* array = argument(1);
3253   Node* klass_node = load_object_klass(array);
3254   generate_refArray_guard(klass_node, region);
3255   if (region->req() == 3) {
3256     phi->add_req(intcon((jint)LayoutKind::REFERENCE));
3257   }
3258 
3259   int layout_kind_offset = in_bytes(FlatArrayKlass::layout_kind_offset());
3260   Node* layout_kind_addr = basic_plus_adr(top(), klass_node, layout_kind_offset);
3261   Node* layout_kind = make_load(nullptr, layout_kind_addr, TypeInt::POS, T_INT, MemNode::unordered);
3262 
3263   region->init_req(1, control());
3264   phi->init_req(1, layout_kind);
3265 
3266   set_control(_gvn.transform(region));
3267   set_result(_gvn.transform(phi));
3268   return true;
3269 }
3270 
3271 // private native int[] getFieldMap0(Class <?> c);
3272 //   int offset = c._klass._acmp_maps_offset;
3273 //   return (int[])c.obj_field(offset);
3274 bool LibraryCallKit::inline_getFieldMap() {
3275   Node* mirror = argument(1);
3276   Node* klass = load_klass_from_mirror(mirror, false, nullptr, 0);
3277 
3278   int field_map_offset_offset = in_bytes(InstanceKlass::acmp_maps_offset_offset());
3279   Node* field_map_offset_addr = basic_plus_adr(top(), klass, field_map_offset_offset);
3280   Node* field_map_offset = make_load(nullptr, field_map_offset_addr, TypeInt::INT, T_INT, MemNode::unordered);
3281   field_map_offset = _gvn.transform(ConvI2L(field_map_offset));
3282 
3283   Node* map_addr = basic_plus_adr(mirror, field_map_offset);
3284   const TypeAryPtr* val_type = TypeAryPtr::INTS->cast_to_ptr_type(TypePtr::NotNull)->with_offset(0);
3285   // TODO 8350865 Remove this
3286   val_type = val_type->cast_to_not_flat(true)->cast_to_not_null_free(true);
3287   Node* map = access_load_at(mirror, map_addr, TypeAryPtr::INTS, val_type, T_ARRAY, IN_HEAP | MO_UNORDERED);
3288 
3289   set_result(map);
3290   return true;
3291 }
3292 
3293 bool LibraryCallKit::inline_onspinwait() {
3294   insert_mem_bar(Op_OnSpinWait);
3295   return true;
3296 }
3297 
3298 bool LibraryCallKit::klass_needs_init_guard(Node* kls) {
3299   if (!kls->is_Con()) {
3300     return true;
3301   }
3302   const TypeInstKlassPtr* klsptr = kls->bottom_type()->isa_instklassptr();
3303   if (klsptr == nullptr) {
3304     return true;
3305   }
3306   ciInstanceKlass* ik = klsptr->instance_klass();
3307   // don't need a guard for a klass that is already initialized
3308   return !ik->is_initialized();
3309 }
3310 
3311 //----------------------------inline_unsafe_writeback0-------------------------
3312 // public native void Unsafe.writeback0(long address)

3391                     Deoptimization::Action_make_not_entrant);
3392     }
3393     if (stopped()) {
3394       return true;
3395     }
3396 #endif //INCLUDE_JVMTI
3397 
3398   Node* test = nullptr;
3399   if (LibraryCallKit::klass_needs_init_guard(kls)) {
3400     // Note:  The argument might still be an illegal value like
3401     // Serializable.class or Object[].class.   The runtime will handle it.
3402     // But we must make an explicit check for initialization.
3403     Node* insp = basic_plus_adr(top(), kls, in_bytes(InstanceKlass::init_state_offset()));
3404     // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
3405     // can generate code to load it as unsigned byte.
3406     Node* inst = make_load(nullptr, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::acquire);
3407     Node* bits = intcon(InstanceKlass::fully_initialized);
3408     test = _gvn.transform(new SubINode(inst, bits));
3409     // The 'test' is non-zero if we need to take a slow path.
3410   }
3411   Node* obj = nullptr;
3412   const TypeInstKlassPtr* tkls = _gvn.type(kls)->isa_instklassptr();
3413   if (tkls != nullptr && tkls->instance_klass()->is_inlinetype()) {
3414     obj = InlineTypeNode::make_all_zero(_gvn, tkls->instance_klass()->as_inline_klass())->buffer(this);
3415   } else {
3416     obj = new_instance(kls, test);
3417   }
3418   set_result(obj);
3419   return true;
3420 }
3421 
3422 //------------------------inline_native_time_funcs--------------
3423 // inline code for System.currentTimeMillis() and System.nanoTime()
3424 // these have the same type and signature
3425 bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
3426   const TypeFunc* tf = OptoRuntime::void_long_Type();
3427   const TypePtr* no_memory_effects = nullptr;
3428   Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
3429   Node* value = _gvn.transform(new ProjNode(time, TypeFunc::Parms+0));
3430 #ifdef ASSERT
3431   Node* value_top = _gvn.transform(new ProjNode(time, TypeFunc::Parms+1));
3432   assert(value_top == top(), "second value must be top");
3433 #endif
3434   set_result(value);
3435   return true;
3436 }
3437 

4212   Node* thread = _gvn.transform(new ThreadLocalNode());
4213   Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::vthread_offset()));
4214   Node* thread_obj_handle
4215     = make_load(nullptr, p, p->bottom_type()->is_ptr(), T_OBJECT, MemNode::unordered);
4216   thread_obj_handle = _gvn.transform(thread_obj_handle);
4217   const TypePtr *adr_type = _gvn.type(thread_obj_handle)->isa_ptr();
4218   access_store_at(nullptr, thread_obj_handle, adr_type, arr, _gvn.type(arr), T_OBJECT, IN_NATIVE | MO_UNORDERED);
4219 
4220   // Change the _monitor_owner_id of the JavaThread
4221   Node* tid = load_field_from_object(arr, "tid", "J");
4222   Node* monitor_owner_id_offset = basic_plus_adr(top(), thread, in_bytes(JavaThread::monitor_owner_id_offset()));
4223   store_to_memory(control(), monitor_owner_id_offset, tid, T_LONG, MemNode::unordered, true);
4224 
4225   JFR_ONLY(extend_setCurrentThread(thread, arr);)
4226   return true;
4227 }
4228 
4229 const Type* LibraryCallKit::scopedValueCache_type() {
4230   ciKlass* objects_klass = ciObjArrayKlass::make(env()->Object_klass());
4231   const TypeOopPtr* etype = TypeOopPtr::make_from_klass(env()->Object_klass());
4232   const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS, /* stable= */ false, /* flat= */ false, /* not_flat= */ true, /* not_null_free= */ true, true);
4233 
4234   // Because we create the scopedValue cache lazily we have to make the
4235   // type of the result BotPTR.
4236   bool xk = etype->klass_is_exact();
4237   const Type* objects_type = TypeAryPtr::make(TypePtr::BotPTR, arr0, objects_klass, xk, TypeAryPtr::Offset(0));
4238   return objects_type;
4239 }
4240 
4241 Node* LibraryCallKit::scopedValueCache_helper() {
4242   Node* thread = _gvn.transform(new ThreadLocalNode());
4243   Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::scopedValueCache_offset()));
4244   // We cannot use immutable_memory() because we might flip onto a
4245   // different carrier thread, at which point we'll need to use that
4246   // carrier thread's cache.
4247   // return _gvn.transform(LoadNode::make(_gvn, nullptr, immutable_memory(), p, p->bottom_type()->is_ptr(),
4248   //       TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered));
4249   return make_load(nullptr, p, p->bottom_type()->is_ptr(), T_ADDRESS, MemNode::unordered);
4250 }
4251 
4252 //------------------------inline_native_scopedValueCache------------------
4253 bool LibraryCallKit::inline_native_scopedValueCache() {
4254   Node* cache_obj_handle = scopedValueCache_helper();
4255   const Type* objects_type = scopedValueCache_type();
4256   set_result(access_load(cache_obj_handle, objects_type, T_OBJECT, IN_NATIVE));
4257 

4393   }
4394   return kls;
4395 }
4396 
4397 //--------------------(inline_native_Class_query helpers)---------------------
4398 // Use this for JVM_ACC_INTERFACE.
4399 // Fall through if (mods & mask) == bits, take the guard otherwise.
4400 Node* LibraryCallKit::generate_klass_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region,
4401                                                  ByteSize offset, const Type* type, BasicType bt) {
4402   // Branch around if the given klass has the given modifier bit set.
4403   // Like generate_guard, adds a new path onto the region.
4404   Node* modp = basic_plus_adr(top(), kls, in_bytes(offset));
4405   Node* mods = make_load(nullptr, modp, type, bt, MemNode::unordered);
4406   Node* mask = intcon(modifier_mask);
4407   Node* bits = intcon(modifier_bits);
4408   Node* mbit = _gvn.transform(new AndINode(mods, mask));
4409   Node* cmp  = _gvn.transform(new CmpINode(mbit, bits));
4410   Node* bol  = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
4411   return generate_fair_guard(bol, region);
4412 }
4413 
4414 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
4415   return generate_klass_flags_guard(kls, JVM_ACC_INTERFACE, 0, region,
4416                                     InstanceKlass::access_flags_offset(), TypeInt::CHAR, T_CHAR);
4417 }
4418 
4419 // Use this for testing if Klass is_hidden, has_finalizer, and is_cloneable_fast.
4420 Node* LibraryCallKit::generate_misc_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
4421   return generate_klass_flags_guard(kls, modifier_mask, modifier_bits, region,
4422                                     Klass::misc_flags_offset(), TypeInt::UBYTE, T_BOOLEAN);
4423 }
4424 
4425 Node* LibraryCallKit::generate_hidden_class_guard(Node* kls, RegionNode* region) {
4426   return generate_misc_flags_guard(kls, KlassFlags::_misc_is_hidden_class, 0, region);
4427 }
4428 
4429 //-------------------------inline_native_Class_query-------------------
4430 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
4431   const Type* return_type = TypeInt::BOOL;
4432   Node* prim_return_value = top();  // what happens if it's a primitive class?
4433   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);

4519 
4520 
4521   case vmIntrinsics::_getSuperclass:
4522     // The rules here are somewhat unfortunate, but we can still do better
4523     // with random logic than with a JNI call.
4524     // Interfaces store null or Object as _super, but must report null.
4525     // Arrays store an intermediate super as _super, but must report Object.
4526     // Other types can report the actual _super.
4527     // (To verify this code sequence, check the asserts in JVM_IsInterface.)
4528     if (generate_array_guard(kls, region) != nullptr) {
4529       // A guard was added.  If the guard is taken, it was an array.
4530       phi->add_req(makecon(TypeInstPtr::make(env()->Object_klass()->java_mirror())));
4531     }
4532     // Check for interface after array since this checks AccessFlags offset into InstanceKlass.
4533     // In other words, we are accessing subtype-specific information, so we need to determine the subtype first.
4534     if (generate_interface_guard(kls, region) != nullptr) {
4535       // A guard was added.  If the guard is taken, it was an interface.
4536       phi->add_req(null());
4537     }
4538     // If we fall through, it's a plain class.  Get its _super.









4539     if (!stopped()) {
4540       p = basic_plus_adr(top(), kls, in_bytes(Klass::super_offset()));
4541       kls = _gvn.transform(LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeInstKlassPtr::OBJECT_OR_NULL));
4542       null_ctl = top();
4543       kls = null_check_oop(kls, &null_ctl);
4544       if (null_ctl != top()) {
4545         // If the guard is taken, Object.superClass is null (both klass and mirror).
4546         region->add_req(null_ctl);
4547         phi   ->add_req(null());
4548       }
4549       if (!stopped()) {
4550         query_value = load_mirror_from_klass(kls);
4551       }
4552     }
4553     break;
4554 
4555   default:
4556     fatal_unexpected_iid(id);
4557     break;
4558   }
4559 
4560   // Fall-through is the normal case of a query to a real class.
4561   phi->init_req(1, query_value);
4562   region->init_req(1, control());
4563 
4564   C->set_has_split_ifs(true); // Has chance for split-if optimization
4565   set_result(region, phi);
4566   return true;
4567 }
4568 
4569 
4570 //-------------------------inline_Class_cast-------------------
4571 bool LibraryCallKit::inline_Class_cast() {
4572   Node* mirror = argument(0); // Class
4573   Node* obj    = argument(1);
4574   const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
4575   if (mirror_con == nullptr) {
4576     return false;  // dead path (mirror->is_top()).
4577   }
4578   if (obj == nullptr || obj->is_top()) {
4579     return false;  // dead path
4580   }
4581   const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr();
4582 
4583   // First, see if Class.cast() can be folded statically.
4584   // java_mirror_type() returns non-null for compile-time Class constants.
4585   ciType* tm = mirror_con->java_mirror_type();
4586   if (tm != nullptr && tm->is_klass() &&
4587       tp != nullptr) {
4588     if (!tp->is_loaded()) {
4589       // Don't use intrinsic when class is not loaded.
4590       return false;
4591     } else {
4592       const TypeKlassPtr* tklass = TypeKlassPtr::make(tm->as_klass(), Type::trust_interfaces);
4593       int static_res = C->static_subtype_check(tklass, tp->as_klass_type());
4594       if (static_res == Compile::SSC_always_true) {
4595         // isInstance() is true - fold the code.
4596         set_result(obj);
4597         return true;
4598       } else if (static_res == Compile::SSC_always_false) {
4599         // Don't use intrinsic, have to throw ClassCastException.
4600         // If the reference is null, the non-intrinsic bytecode will
4601         // be optimized appropriately.
4602         return false;
4603       }
4604     }
4605   }
4606 
4607   // Bailout intrinsic and do normal inlining if exception path is frequent.
4608   if (too_many_traps(Deoptimization::Reason_intrinsic)) {
4609     return false;
4610   }
4611 
4612   // Generate dynamic checks.
4613   // Class.cast() is java implementation of _checkcast bytecode.
4614   // Do checkcast (Parse::do_checkcast()) optimizations here.
4615 
4616   mirror = null_check(mirror);
4617   // If mirror is dead, only null-path is taken.
4618   if (stopped()) {
4619     return true;
4620   }
4621 
4622   // Not-subtype or the mirror's klass ptr is nullptr (in case it is a primitive).
4623   enum { _bad_type_path = 1, _prim_path = 2, _npe_path = 3, PATH_LIMIT };
4624   RegionNode* region = new RegionNode(PATH_LIMIT);
4625   record_for_igvn(region);
4626 
4627   // Now load the mirror's klass metaobject, and null-check it.
4628   // If kls is null, we have a primitive mirror and
4629   // nothing is an instance of a primitive type.
4630   Node* kls = load_klass_from_mirror(mirror, false, region, _prim_path);
4631 
4632   Node* res = top();
4633   Node* io = i_o();
4634   Node* mem = merged_memory();
4635   if (!stopped()) {
4636 
4637     Node* bad_type_ctrl = top();
4638     // Do checkcast optimizations.
4639     res = gen_checkcast(obj, kls, &bad_type_ctrl);
4640     region->init_req(_bad_type_path, bad_type_ctrl);
4641   }
4642   if (region->in(_prim_path) != top() ||
4643       region->in(_bad_type_path) != top() ||
4644       region->in(_npe_path) != top()) {
4645     // Let Interpreter throw ClassCastException.
4646     PreserveJVMState pjvms(this);
4647     set_control(_gvn.transform(region));
4648     // Set IO and memory because gen_checkcast may override them when buffering inline types
4649     set_i_o(io);
4650     set_all_memory(mem);
4651     uncommon_trap(Deoptimization::Reason_intrinsic,
4652                   Deoptimization::Action_maybe_recompile);
4653   }
4654   if (!stopped()) {
4655     set_result(res);
4656   }
4657   return true;
4658 }
4659 
4660 
4661 //--------------------------inline_native_subtype_check------------------------
4662 // This intrinsic takes the JNI calls out of the heart of
4663 // UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
4664 bool LibraryCallKit::inline_native_subtype_check() {
4665   // Pull both arguments off the stack.
4666   Node* args[2];                // two java.lang.Class mirrors: superc, subc
4667   args[0] = argument(0);
4668   args[1] = argument(1);
4669   Node* klasses[2];             // corresponding Klasses: superk, subk
4670   klasses[0] = klasses[1] = top();
4671 
4672   enum {
4673     // A full decision tree on {superc is prim, subc is prim}:
4674     _prim_0_path = 1,           // {P,N} => false
4675                                 // {P,P} & superc!=subc => false
4676     _prim_same_path,            // {P,P} & superc==subc => true
4677     _prim_1_path,               // {N,P} => false
4678     _ref_subtype_path,          // {N,N} & subtype check wins => true
4679     _both_ref_path,             // {N,N} & subtype check loses => false
4680     PATH_LIMIT
4681   };
4682 
4683   RegionNode* region = new RegionNode(PATH_LIMIT);
4684   RegionNode* prim_region = new RegionNode(2);
4685   Node*       phi    = new PhiNode(region, TypeInt::BOOL);
4686   record_for_igvn(region);
4687   record_for_igvn(prim_region);
4688 
4689   const TypePtr* adr_type = TypeRawPtr::BOTTOM;   // memory type of loads
4690   const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
4691   int class_klass_offset = java_lang_Class::klass_offset();
4692 
4693   // First null-check both mirrors and load each mirror's klass metaobject.
4694   int which_arg;
4695   for (which_arg = 0; which_arg <= 1; which_arg++) {
4696     Node* arg = args[which_arg];
4697     arg = null_check(arg);
4698     if (stopped())  break;
4699     args[which_arg] = arg;
4700 
4701     Node* p = basic_plus_adr(arg, class_klass_offset);
4702     Node* kls = LoadKlassNode::make(_gvn, immutable_memory(), p, adr_type, kls_type);
4703     klasses[which_arg] = _gvn.transform(kls);
4704   }
4705 
4706   // Having loaded both klasses, test each for null.
4707   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
4708   for (which_arg = 0; which_arg <= 1; which_arg++) {
4709     Node* kls = klasses[which_arg];
4710     Node* null_ctl = top();
4711     kls = null_check_oop(kls, &null_ctl, never_see_null);
4712     if (which_arg == 0) {
4713       prim_region->init_req(1, null_ctl);
4714     } else {
4715       region->init_req(_prim_1_path, null_ctl);
4716     }
4717     if (stopped())  break;
4718     klasses[which_arg] = kls;
4719   }
4720 
4721   if (!stopped()) {
4722     // now we have two reference types, in klasses[0..1]
4723     Node* subk   = klasses[1];  // the argument to isAssignableFrom
4724     Node* superk = klasses[0];  // the receiver
4725     region->set_req(_both_ref_path, gen_subtype_check(subk, superk));

4726     region->set_req(_ref_subtype_path, control());
4727   }
4728 
4729   // If both operands are primitive (both klasses null), then
4730   // we must return true when they are identical primitives.
4731   // It is convenient to test this after the first null klass check.
4732   // This path is also used if superc is a value mirror.
4733   set_control(_gvn.transform(prim_region));
4734   if (!stopped()) {
4735     // Since superc is primitive, make a guard for the superc==subc case.
4736     Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1]));
4737     Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq));
4738     generate_fair_guard(bol_eq, region);
4739     if (region->req() == PATH_LIMIT+1) {
4740       // A guard was added.  If the added guard is taken, superc==subc.
4741       region->swap_edges(PATH_LIMIT, _prim_same_path);
4742       region->del_req(PATH_LIMIT);
4743     }
4744     region->set_req(_prim_0_path, control()); // Not equal after all.
4745   }
4746 
4747   // these are the only paths that produce 'true':
4748   phi->set_req(_prim_same_path,   intcon(1));
4749   phi->set_req(_ref_subtype_path, intcon(1));
4750 
4751   // pull together the cases:
4752   assert(region->req() == PATH_LIMIT, "sane region");
4753   for (uint i = 1; i < region->req(); i++) {
4754     Node* ctl = region->in(i);
4755     if (ctl == nullptr || ctl == top()) {
4756       region->set_req(i, top());
4757       phi   ->set_req(i, top());
4758     } else if (phi->in(i) == nullptr) {
4759       phi->set_req(i, intcon(0)); // all other paths produce 'false'
4760     }
4761   }
4762 
4763   set_control(_gvn.transform(region));
4764   set_result(_gvn.transform(phi));
4765   return true;
4766 }
4767 
4768 //---------------------generate_array_guard_common------------------------
4769 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region, ArrayKind kind, Node** obj) {

4770 
4771   if (stopped()) {
4772     return nullptr;
4773   }
4774 









4775   // Like generate_guard, adds a new path onto the region.
4776   jint  layout_con = 0;
4777   Node* layout_val = get_layout_helper(kls, layout_con);
4778   if (layout_val == nullptr) {
4779     bool query = 0;
4780     switch(kind) {
4781       case RefArray:       query = Klass::layout_helper_is_refArray(layout_con); break;
4782       case NonRefArray:    query = !Klass::layout_helper_is_refArray(layout_con); break;
4783       case TypeArray:      query = Klass::layout_helper_is_typeArray(layout_con); break;
4784       case AnyArray:       query = Klass::layout_helper_is_array(layout_con); break;
4785       case NonArray:       query = !Klass::layout_helper_is_array(layout_con); break;
4786       default:
4787         ShouldNotReachHere();
4788     }
4789     if (!query) {
4790       return nullptr;                       // never a branch
4791     } else {                             // always a branch
4792       Node* always_branch = control();
4793       if (region != nullptr)
4794         region->add_req(always_branch);
4795       set_control(top());
4796       return always_branch;
4797     }
4798   }
4799   unsigned int value = 0;
4800   BoolTest::mask btest = BoolTest::illegal;
4801   switch(kind) {
4802     case RefArray:
4803     case NonRefArray: {
4804       value = Klass::_lh_array_tag_ref_value;
4805       layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
4806       btest = (kind == RefArray) ? BoolTest::eq : BoolTest::ne;
4807       break;
4808     }
4809     case TypeArray: {
4810       value = Klass::_lh_array_tag_type_value;
4811       layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
4812       btest = BoolTest::eq;
4813       break;
4814     }
4815     case AnyArray:    value = Klass::_lh_neutral_value; btest = BoolTest::lt; break;
4816     case NonArray:    value = Klass::_lh_neutral_value; btest = BoolTest::gt; break;
4817     default:
4818       ShouldNotReachHere();
4819   }
4820   // Now test the correct condition.
4821   jint nval = (jint)value;



4822   Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval)));



4823   Node* bol = _gvn.transform(new BoolNode(cmp, btest));
4824   Node* ctrl = generate_fair_guard(bol, region);
4825   Node* is_array_ctrl = kind == NonArray ? control() : ctrl;
4826   if (obj != nullptr && is_array_ctrl != nullptr && is_array_ctrl != top()) {
4827     // Keep track of the fact that 'obj' is an array to prevent
4828     // array specific accesses from floating above the guard.
4829     *obj = _gvn.transform(new CastPPNode(is_array_ctrl, *obj, TypeAryPtr::BOTTOM));
4830   }
4831   return ctrl;
4832 }
4833 
4834 // public static native Object[] ValueClass::newNullRestrictedAtomicArray(Class<?> componentType, int length, Object initVal);
4835 // public static native Object[] ValueClass::newNullRestrictedNonAtomicArray(Class<?> componentType, int length, Object initVal);
4836 // public static native Object[] ValueClass::newNullableAtomicArray(Class<?> componentType, int length);
4837 bool LibraryCallKit::inline_newArray(bool null_free, bool atomic) {
4838   assert(null_free || atomic, "nullable implies atomic");
4839   Node* componentType = argument(0);
4840   Node* length = argument(1);
4841   Node* init_val = null_free ? argument(2) : nullptr;
4842 
4843   const TypeInstPtr* tp = _gvn.type(componentType)->isa_instptr();
4844   if (tp != nullptr) {
4845     ciInstanceKlass* ik = tp->instance_klass();
4846     if (ik == C->env()->Class_klass()) {
4847       ciType* t = tp->java_mirror_type();
4848       if (t != nullptr && t->is_inlinetype()) {
4849 
4850         ciArrayKlass* array_klass = ciArrayKlass::make(t, null_free, atomic, true);
4851         assert(array_klass->is_elem_null_free() == null_free, "inconsistency");
4852 
4853         // TOOD 8350865 ZGC needs card marks on initializing oop stores
4854         if (UseZGC && null_free && !array_klass->is_flat_array_klass()) {
4855           return false;
4856         }
4857 
4858         if (array_klass->is_loaded() && array_klass->element_klass()->as_inline_klass()->is_initialized()) {
4859           const TypeAryKlassPtr* array_klass_type = TypeAryKlassPtr::make(array_klass, Type::trust_interfaces);
4860           if (null_free) {
4861             if (init_val->is_InlineType()) {
4862               if (array_klass_type->is_flat() && init_val->as_InlineType()->is_all_zero(&gvn(), /* flat */ true)) {
4863                 // Zeroing is enough because the init value is the all-zero value
4864                 init_val = nullptr;
4865               } else {
4866                 init_val = init_val->as_InlineType()->buffer(this);
4867               }
4868             }
4869             // TODO 8350865 Should we add a check of the init_val type (maybe in debug only + halt)?
4870             // If we insert a checkcast here, we can be sure that init_val is an InlineTypeNode, so
4871             // when we folded a field load from an allocation (e.g. during escape analysis), we can
4872             // remove the check init_val->is_InlineType().
4873           }
4874           Node* obj = new_array(makecon(array_klass_type), length, 0, nullptr, false, init_val);
4875           const TypeAryPtr* arytype = gvn().type(obj)->is_aryptr();
4876           assert(arytype->is_null_free() == null_free, "inconsistency");
4877           assert(arytype->is_not_null_free() == !null_free, "inconsistency");
4878           set_result(obj);
4879           return true;
4880         }
4881       }
4882     }
4883   }
4884   return false;
4885 }
4886 
4887 // public static native boolean ValueClass::isFlatArray(Object array);
4888 // public static native boolean ValueClass::isNullRestrictedArray(Object array);
4889 // public static native boolean ValueClass::isAtomicArray(Object array);
4890 bool LibraryCallKit::inline_getArrayProperties(ArrayPropertiesCheck check) {
4891   Node* array = argument(0);
4892 
4893   Node* bol;
4894   switch(check) {
4895     case IsFlat:
4896       // TODO 8350865 Use the object version here instead of loading the klass
4897       // The problem is that PhaseMacroExpand::expand_flatarraycheck_node can only handle some IR shapes and will fail, for example, if the bol is directly wired to a ReturnNode
4898       bol = flat_array_test(load_object_klass(array));
4899       break;
4900     case IsNullRestricted:
4901       bol = null_free_array_test(array);
4902       break;
4903     case IsAtomic:
4904       // TODO 8350865 Implement this. It's a bit more complicated, see conditions in JVM_IsAtomicArray
4905       // Enable TestIntrinsics::test87/88 once this is implemented
4906       // bol = null_free_atomic_array_test
4907       return false;
4908     default:
4909       ShouldNotReachHere();
4910   }
4911 
4912   Node* res = gvn().transform(new CMoveINode(bol, intcon(0), intcon(1), TypeInt::BOOL));
4913   set_result(res);
4914   return true;
4915 }
4916 
4917 // Load the default refined array klass from an ObjArrayKlass. This relies on the first entry in the
4918 // '_next_refined_array_klass' linked list being the default (see ObjArrayKlass::klass_with_properties).
4919 Node* LibraryCallKit::load_default_refined_array_klass(Node* klass_node, bool type_array_guard) {
4920   RegionNode* region = new RegionNode(2);
4921   Node* phi = new PhiNode(region, TypeInstKlassPtr::OBJECT_OR_NULL);
4922 
4923   if (type_array_guard) {
4924     generate_typeArray_guard(klass_node, region);
4925     if (region->req() == 3) {
4926       phi->add_req(klass_node);
4927     }
4928   }
4929   Node* adr_refined_klass = basic_plus_adr(top(), klass_node, in_bytes(ObjArrayKlass::next_refined_array_klass_offset()));
4930   Node* refined_klass = _gvn.transform(LoadKlassNode::make(_gvn, immutable_memory(), adr_refined_klass, TypeRawPtr::BOTTOM, TypeInstKlassPtr::OBJECT_OR_NULL));
4931 
4932   // Can be null if not initialized yet, just deopt
4933   Node* null_ctl = top();
4934   refined_klass = null_check_oop(refined_klass, &null_ctl, /* never_see_null= */ true);
4935 
4936   region->init_req(1, control());
4937   phi->init_req(1, refined_klass);
4938 
4939   set_control(_gvn.transform(region));
4940   return _gvn.transform(phi);
4941 }
4942 
4943 // Load the non-refined array klass from an ObjArrayKlass.
4944 Node* LibraryCallKit::load_non_refined_array_klass(Node* klass_node) {
4945   const TypeAryKlassPtr* ary_klass_ptr = _gvn.type(klass_node)->isa_aryklassptr();
4946   if (ary_klass_ptr != nullptr && ary_klass_ptr->klass_is_exact()) {
4947     return _gvn.makecon(ary_klass_ptr->cast_to_refined_array_klass_ptr(false));
4948   }
4949 
4950   RegionNode* region = new RegionNode(2);
4951   Node* phi = new PhiNode(region, TypeInstKlassPtr::OBJECT);
4952 
4953   generate_typeArray_guard(klass_node, region);
4954   if (region->req() == 3) {
4955     phi->add_req(klass_node);
4956   }
4957   Node* super_adr = basic_plus_adr(top(), klass_node, in_bytes(Klass::super_offset()));
4958   Node* super_klass = _gvn.transform(LoadKlassNode::make(_gvn, immutable_memory(), super_adr, TypeRawPtr::BOTTOM, TypeInstKlassPtr::OBJECT));
4959 
4960   region->init_req(1, control());
4961   phi->init_req(1, super_klass);
4962 
4963   set_control(_gvn.transform(region));
4964   return _gvn.transform(phi);
4965 }
4966 
4967 //-----------------------inline_native_newArray--------------------------
4968 // private static native Object java.lang.reflect.Array.newArray(Class<?> componentType, int length);
4969 // private        native Object Unsafe.allocateUninitializedArray0(Class<?> cls, int size);
4970 bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) {
4971   Node* mirror;
4972   Node* count_val;
4973   if (uninitialized) {
4974     null_check_receiver();
4975     mirror    = argument(1);
4976     count_val = argument(2);
4977   } else {
4978     mirror    = argument(0);
4979     count_val = argument(1);
4980   }
4981 
4982   mirror = null_check(mirror);
4983   // If mirror or obj is dead, only null-path is taken.
4984   if (stopped())  return true;
4985 
4986   enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
4987   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4988   PhiNode*    result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);

5006     CallJavaNode* slow_call = nullptr;
5007     if (uninitialized) {
5008       // Generate optimized virtual call (holder class 'Unsafe' is final)
5009       slow_call = generate_method_call(vmIntrinsics::_allocateUninitializedArray, false, false, true);
5010     } else {
5011       slow_call = generate_method_call_static(vmIntrinsics::_newArray, true);
5012     }
5013     Node* slow_result = set_results_for_java_call(slow_call);
5014     // this->control() comes from set_results_for_java_call
5015     result_reg->set_req(_slow_path, control());
5016     result_val->set_req(_slow_path, slow_result);
5017     result_io ->set_req(_slow_path, i_o());
5018     result_mem->set_req(_slow_path, reset_memory());
5019   }
5020 
5021   set_control(normal_ctl);
5022   if (!stopped()) {
5023     // Normal case:  The array type has been cached in the java.lang.Class.
5024     // The following call works fine even if the array type is polymorphic.
5025     // It could be a dynamic mix of int[], boolean[], Object[], etc.
5026 
5027     klass_node = load_default_refined_array_klass(klass_node);
5028 
5029     Node* obj = new_array(klass_node, count_val, 0);  // no arguments to push
5030     result_reg->init_req(_normal_path, control());
5031     result_val->init_req(_normal_path, obj);
5032     result_io ->init_req(_normal_path, i_o());
5033     result_mem->init_req(_normal_path, reset_memory());
5034 
5035     if (uninitialized) {
5036       // Mark the allocation so that zeroing is skipped
5037       AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(obj);
5038       alloc->maybe_set_complete(&_gvn);
5039     }
5040   }
5041 
5042   // Return the combined state.
5043   set_i_o(        _gvn.transform(result_io)  );
5044   set_all_memory( _gvn.transform(result_mem));
5045 
5046   C->set_has_split_ifs(true); // Has chance for split-if optimization
5047   set_result(result_reg, result_val);
5048   return true;

5097   // the bytecode that invokes Arrays.copyOf if deoptimization happens.
5098   { PreserveReexecuteState preexecs(this);
5099     jvms()->set_should_reexecute(true);
5100 
5101     array_type_mirror = null_check(array_type_mirror);
5102     original          = null_check(original);
5103 
5104     // Check if a null path was taken unconditionally.
5105     if (stopped())  return true;
5106 
5107     Node* orig_length = load_array_length(original);
5108 
5109     Node* klass_node = load_klass_from_mirror(array_type_mirror, false, nullptr, 0);
5110     klass_node = null_check(klass_node);
5111 
5112     RegionNode* bailout = new RegionNode(1);
5113     record_for_igvn(bailout);
5114 
5115     // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
5116     // Bail out if that is so.
5117     // Inline type array may have object field that would require a
5118     // write barrier. Conservatively, go to slow path.
5119     // TODO 8251971: Optimize for the case when flat src/dst are later found
5120     // to not contain oops (i.e., move this check to the macro expansion phase).
5121     BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
5122     const TypeAryPtr* orig_t = _gvn.type(original)->isa_aryptr();
5123     const TypeKlassPtr* tklass = _gvn.type(klass_node)->is_klassptr();
5124     bool exclude_flat = UseArrayFlattening && bs->array_copy_requires_gc_barriers(true, T_OBJECT, false, false, BarrierSetC2::Parsing) &&
5125                         // Can src array be flat and contain oops?
5126                         (orig_t == nullptr || (!orig_t->is_not_flat() && (!orig_t->is_flat() || orig_t->elem()->inline_klass()->contains_oops()))) &&
5127                         // Can dest array be flat and contain oops?
5128                         tklass->can_be_inline_array() && (!tklass->is_flat() || tklass->is_aryklassptr()->elem()->is_instklassptr()->instance_klass()->as_inline_klass()->contains_oops());
5129     Node* not_objArray = exclude_flat ? generate_non_refArray_guard(klass_node, bailout) : generate_typeArray_guard(klass_node, bailout);
5130 
5131     Node* refined_klass_node = load_default_refined_array_klass(klass_node, /* type_array_guard= */ false);
5132 
5133     if (not_objArray != nullptr) {
5134       // Improve the klass node's type from the new optimistic assumption:
5135       ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
5136       bool not_flat = !UseArrayFlattening;
5137       bool not_null_free = !Arguments::is_valhalla_enabled();
5138       const Type* akls = TypeAryKlassPtr::make(TypePtr::NotNull, ak, Type::Offset(0), Type::trust_interfaces, not_flat, not_null_free, false, false, not_flat, true);
5139       Node* cast = new CastPPNode(control(), refined_klass_node, akls);
5140       refined_klass_node = _gvn.transform(cast);
5141     }
5142 
5143     // Bail out if either start or end is negative.
5144     generate_negative_guard(start, bailout, &start);
5145     generate_negative_guard(end,   bailout, &end);
5146 
5147     Node* length = end;
5148     if (_gvn.type(start) != TypeInt::ZERO) {
5149       length = _gvn.transform(new SubINode(end, start));
5150     }
5151 
5152     // Bail out if length is negative (i.e., if start > end).
5153     // Without this the new_array would throw
5154     // NegativeArraySizeException but IllegalArgumentException is what
5155     // should be thrown
5156     generate_negative_guard(length, bailout, &length);
5157 
5158     // Handle inline type arrays
5159     bool can_validate = !too_many_traps(Deoptimization::Reason_class_check);
5160     if (!stopped()) {
5161       // TODO 8251971
5162       if (!orig_t->is_null_free()) {
5163         // Not statically known to be null free, add a check
5164         generate_fair_guard(null_free_array_test(original), bailout);
5165       }
5166       orig_t = _gvn.type(original)->isa_aryptr();
5167       if (orig_t != nullptr && orig_t->is_flat()) {
5168         // Src is flat, check that dest is flat as well
5169         if (exclude_flat) {
5170           // Dest can't be flat, bail out
5171           bailout->add_req(control());
5172           set_control(top());
5173         } else {
5174           generate_fair_guard(flat_array_test(refined_klass_node, /* flat = */ false), bailout);
5175         }
5176         // TODO 8350865 This is not correct anymore. Write tests and fix logic similar to arraycopy.
5177       } else if (UseArrayFlattening && (orig_t == nullptr || !orig_t->is_not_flat()) &&
5178                  // If dest is flat, src must be flat as well (guaranteed by src <: dest check if validated).
5179                  ((!tklass->is_flat() && tklass->can_be_inline_array()) || !can_validate)) {
5180         // Src might be flat and dest might not be flat. Go to the slow path if src is flat.
5181         // TODO 8251971: Optimize for the case when src/dest are later found to be both flat.
5182         generate_fair_guard(flat_array_test(load_object_klass(original)), bailout);
5183         if (orig_t != nullptr) {
5184           orig_t = orig_t->cast_to_not_flat();
5185           original = _gvn.transform(new CheckCastPPNode(control(), original, orig_t));
5186         }
5187       }
5188       if (!can_validate) {
5189         // No validation. The subtype check emitted at macro expansion time will not go to the slow
5190         // path but call checkcast_arraycopy which can not handle flat/null-free inline type arrays.
5191         // TODO 8251971: Optimize for the case when src/dest are later found to be both flat/null-free.
5192         generate_fair_guard(flat_array_test(refined_klass_node), bailout);
5193         generate_fair_guard(null_free_array_test(original), bailout);
5194       }
5195     }
5196 
5197     // Bail out if start is larger than the original length
5198     Node* orig_tail = _gvn.transform(new SubINode(orig_length, start));
5199     generate_negative_guard(orig_tail, bailout, &orig_tail);
5200 
5201     if (bailout->req() > 1) {
5202       PreserveJVMState pjvms(this);
5203       set_control(_gvn.transform(bailout));
5204       uncommon_trap(Deoptimization::Reason_intrinsic,
5205                     Deoptimization::Action_maybe_recompile);
5206     }
5207 
5208     if (!stopped()) {
5209       // How many elements will we copy from the original?
5210       // The answer is MinI(orig_tail, length).
5211       Node* moved = _gvn.transform(new MinINode(orig_tail, length));
5212 
5213       // Generate a direct call to the right arraycopy function(s).
5214       // We know the copy is disjoint but we might not know if the
5215       // oop stores need checking.
5216       // Extreme case:  Arrays.copyOf((Integer[])x, 10, String[].class).

5222       // to the copyOf to be validated, including that the copy to the
5223       // new array won't trigger an ArrayStoreException. That subtype
5224       // check can be optimized if we know something on the type of
5225       // the input array from type speculation.
5226       if (_gvn.type(klass_node)->singleton()) {
5227         const TypeKlassPtr* subk = _gvn.type(load_object_klass(original))->is_klassptr();
5228         const TypeKlassPtr* superk = _gvn.type(klass_node)->is_klassptr();
5229 
5230         int test = C->static_subtype_check(superk, subk);
5231         if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
5232           const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();
5233           if (t_original->speculative_type() != nullptr) {
5234             original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true);
5235           }
5236         }
5237       }
5238 
5239       bool validated = false;
5240       // Reason_class_check rather than Reason_intrinsic because we
5241       // want to intrinsify even if this traps.
5242       if (can_validate) {
5243         Node* not_subtype_ctrl = gen_subtype_check(original, klass_node);
5244 
5245         if (not_subtype_ctrl != top()) {
5246           PreserveJVMState pjvms(this);
5247           set_control(not_subtype_ctrl);
5248           uncommon_trap(Deoptimization::Reason_class_check,
5249                         Deoptimization::Action_make_not_entrant);
5250           assert(stopped(), "Should be stopped");
5251         }
5252         validated = true;
5253       }
5254 
5255       if (!stopped()) {
5256         newcopy = new_array(refined_klass_node, length, 0);  // no arguments to push
5257 
5258         ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true, true,
5259                                                 load_object_klass(original), klass_node);
5260         if (!is_copyOfRange) {
5261           ac->set_copyof(validated);
5262         } else {
5263           ac->set_copyofrange(validated);
5264         }
5265         Node* n = _gvn.transform(ac);
5266         if (n == ac) {
5267           ac->connect_outputs(this);
5268         } else {
5269           assert(validated, "shouldn't transform if all arguments not validated");
5270           set_all_memory(n);
5271         }
5272       }
5273     }
5274   } // original reexecute is set back here
5275 
5276   C->set_has_split_ifs(true); // Has chance for split-if optimization

5308 
5309 //-----------------------generate_method_call----------------------------
5310 // Use generate_method_call to make a slow-call to the real
5311 // method if the fast path fails.  An alternative would be to
5312 // use a stub like OptoRuntime::slow_arraycopy_Java.
5313 // This only works for expanding the current library call,
5314 // not another intrinsic.  (E.g., don't use this for making an
5315 // arraycopy call inside of the copyOf intrinsic.)
5316 CallJavaNode*
5317 LibraryCallKit::generate_method_call(vmIntrinsicID method_id, bool is_virtual, bool is_static, bool res_not_null) {
5318   // When compiling the intrinsic method itself, do not use this technique.
5319   guarantee(callee() != C->method(), "cannot make slow-call to self");
5320 
5321   ciMethod* method = callee();
5322   // ensure the JVMS we have will be correct for this call
5323   guarantee(method_id == method->intrinsic_id(), "must match");
5324 
5325   const TypeFunc* tf = TypeFunc::make(method);
5326   if (res_not_null) {
5327     assert(tf->return_type() == T_OBJECT, "");
5328     const TypeTuple* range = tf->range_cc();
5329     const Type** fields = TypeTuple::fields(range->cnt());
5330     fields[TypeFunc::Parms] = range->field_at(TypeFunc::Parms)->filter_speculative(TypePtr::NOTNULL);
5331     const TypeTuple* new_range = TypeTuple::make(range->cnt(), fields);
5332     tf = TypeFunc::make(tf->domain_cc(), new_range);
5333   }
5334   CallJavaNode* slow_call;
5335   if (is_static) {
5336     assert(!is_virtual, "");
5337     slow_call = new CallStaticJavaNode(C, tf,
5338                            SharedRuntime::get_resolve_static_call_stub(), method);
5339   } else if (is_virtual) {
5340     assert(!gvn().type(argument(0))->maybe_null(), "should not be null");
5341     int vtable_index = Method::invalid_vtable_index;
5342     if (UseInlineCaches) {
5343       // Suppress the vtable call
5344     } else {
5345       // hashCode and clone are not a miranda methods,
5346       // so the vtable index is fixed.
5347       // No need to use the linkResolver to get it.
5348        vtable_index = method->vtable_index();
5349        assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
5350               "bad index %d", vtable_index);
5351     }
5352     slow_call = new CallDynamicJavaNode(tf,

5369   set_edges_for_java_call(slow_call);
5370   return slow_call;
5371 }
5372 
5373 
5374 /**
5375  * Build special case code for calls to hashCode on an object. This call may
5376  * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
5377  * slightly different code.
5378  */
5379 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
5380   assert(is_static == callee()->is_static(), "correct intrinsic selection");
5381   assert(!(is_virtual && is_static), "either virtual, special, or static");
5382 
5383   enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
5384 
5385   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
5386   PhiNode*    result_val = new PhiNode(result_reg, TypeInt::INT);
5387   PhiNode*    result_io  = new PhiNode(result_reg, Type::ABIO);
5388   PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
5389   Node* obj = argument(0);
5390 
5391   // Don't intrinsify hashcode on inline types for now.
5392   // The "is locked" runtime check also subsumes the inline type check (as inline types cannot be locked) and goes to the slow path.
5393   if (gvn().type(obj)->is_inlinetypeptr()) {
5394     return false;
5395   }
5396 
5397   if (!is_static) {
5398     // Check for hashing null object
5399     obj = null_check_receiver();
5400     if (stopped())  return true;        // unconditionally null
5401     result_reg->init_req(_null_path, top());
5402     result_val->init_req(_null_path, top());
5403   } else {
5404     // Do a null check, and return zero if null.
5405     // System.identityHashCode(null) == 0

5406     Node* null_ctl = top();
5407     obj = null_check_oop(obj, &null_ctl);
5408     result_reg->init_req(_null_path, null_ctl);
5409     result_val->init_req(_null_path, _gvn.intcon(0));
5410   }
5411 
5412   // Unconditionally null?  Then return right away.
5413   if (stopped()) {
5414     set_control( result_reg->in(_null_path));
5415     if (!stopped())
5416       set_result(result_val->in(_null_path));
5417     return true;
5418   }
5419 
5420   // We only go to the fast case code if we pass a number of guards.  The
5421   // paths which do not pass are accumulated in the slow_region.
5422   RegionNode* slow_region = new RegionNode(1);
5423   record_for_igvn(slow_region);
5424 
5425   // If this is a virtual call, we generate a funny guard.  We pull out
5426   // the vtable entry corresponding to hashCode() from the target object.
5427   // If the target method which we are calling happens to be the native
5428   // Object hashCode() method, we pass the guard.  We do not need this
5429   // guard for non-virtual calls -- the caller is known to be the native
5430   // Object hashCode().
5431   if (is_virtual) {
5432     // After null check, get the object's klass.
5433     Node* obj_klass = load_object_klass(obj);
5434     generate_virtual_guard(obj_klass, slow_region);
5435   }
5436 
5437   // Get the header out of the object, use LoadMarkNode when available
5438   Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
5439   // The control of the load must be null. Otherwise, the load can move before
5440   // the null check after castPP removal.
5441   Node* no_ctrl = nullptr;
5442   Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
5443 
5444   if (!UseObjectMonitorTable) {
5445     // Test the header to see if it is safe to read w.r.t. locking.
5446     // We cannot use the inline type mask as this may check bits that are overriden
5447     // by an object monitor's pointer when inflating locking.
5448     Node *lock_mask      = _gvn.MakeConX(markWord::lock_mask_in_place);
5449     Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
5450     Node *monitor_val   = _gvn.MakeConX(markWord::monitor_value);
5451     Node *chk_monitor   = _gvn.transform(new CmpXNode(lmasked_header, monitor_val));
5452     Node *test_monitor  = _gvn.transform(new BoolNode(chk_monitor, BoolTest::eq));
5453 
5454     generate_slow_guard(test_monitor, slow_region);
5455   }
5456 
5457   // Get the hash value and check to see that it has been properly assigned.
5458   // We depend on hash_mask being at most 32 bits and avoid the use of
5459   // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
5460   // vm: see markWord.hpp.
5461   Node *hash_mask      = _gvn.intcon(markWord::hash_mask);
5462   Node *hash_shift     = _gvn.intcon(markWord::hash_shift);
5463   Node *hshifted_header= _gvn.transform(new URShiftXNode(header, hash_shift));
5464   // This hack lets the hash bits live anywhere in the mark object now, as long
5465   // as the shift drops the relevant bits into the low 32 bits.  Note that
5466   // Java spec says that HashCode is an int so there's no point in capturing
5467   // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build).

5495     // this->control() comes from set_results_for_java_call
5496     result_reg->init_req(_slow_path, control());
5497     result_val->init_req(_slow_path, slow_result);
5498     result_io  ->set_req(_slow_path, i_o());
5499     result_mem ->set_req(_slow_path, reset_memory());
5500   }
5501 
5502   // Return the combined state.
5503   set_i_o(        _gvn.transform(result_io)  );
5504   set_all_memory( _gvn.transform(result_mem));
5505 
5506   set_result(result_reg, result_val);
5507   return true;
5508 }
5509 
5510 //---------------------------inline_native_getClass----------------------------
5511 // public final native Class<?> java.lang.Object.getClass();
5512 //
5513 // Build special case code for calls to getClass on an object.
5514 bool LibraryCallKit::inline_native_getClass() {
5515   Node* obj = argument(0);
5516   if (obj->is_InlineType()) {
5517     const Type* t = _gvn.type(obj);
5518     if (t->maybe_null()) {
5519       null_check(obj);
5520     }
5521     set_result(makecon(TypeInstPtr::make(t->inline_klass()->java_mirror())));
5522     return true;
5523   }
5524   obj = null_check_receiver();
5525   if (stopped())  return true;
5526   set_result(load_mirror_from_klass(load_object_klass(obj)));
5527   return true;
5528 }
5529 
5530 //-----------------inline_native_Reflection_getCallerClass---------------------
5531 // public static native Class<?> sun.reflect.Reflection.getCallerClass();
5532 //
5533 // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
5534 //
5535 // NOTE: This code must perform the same logic as JVM_GetCallerClass
5536 // in that it must skip particular security frames and checks for
5537 // caller sensitive methods.
5538 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
5539 #ifndef PRODUCT
5540   if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
5541     tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
5542   }
5543 #endif
5544 

5926 //  not cloneable or finalizer => slow path to out-of-line Object.clone
5927 //
5928 // The general case has two steps, allocation and copying.
5929 // Allocation has two cases, and uses GraphKit::new_instance or new_array.
5930 //
5931 // Copying also has two cases, oop arrays and everything else.
5932 // Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
5933 // Everything else uses the tight inline loop supplied by CopyArrayNode.
5934 //
5935 // These steps fold up nicely if and when the cloned object's klass
5936 // can be sharply typed as an object array, a type array, or an instance.
5937 //
5938 bool LibraryCallKit::inline_native_clone(bool is_virtual) {
5939   PhiNode* result_val;
5940 
5941   // Set the reexecute bit for the interpreter to reexecute
5942   // the bytecode that invokes Object.clone if deoptimization happens.
5943   { PreserveReexecuteState preexecs(this);
5944     jvms()->set_should_reexecute(true);
5945 
5946     Node* obj = argument(0);
5947     obj = null_check_receiver();
5948     if (stopped())  return true;
5949 
5950     const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
5951     if (obj_type->is_inlinetypeptr()) {
5952       // If the object to clone is an inline type, we can simply return it (i.e. a nop) since inline types have
5953       // no identity.
5954       set_result(obj);
5955       return true;
5956     }
5957 
5958     // If we are going to clone an instance, we need its exact type to
5959     // know the number and types of fields to convert the clone to
5960     // loads/stores. Maybe a speculative type can help us.
5961     if (!obj_type->klass_is_exact() &&
5962         obj_type->speculative_type() != nullptr &&
5963         obj_type->speculative_type()->is_instance_klass() &&
5964         !obj_type->speculative_type()->is_inlinetype()) {
5965       ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass();
5966       if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem &&
5967           !spec_ik->has_injected_fields()) {
5968         if (!obj_type->isa_instptr() ||
5969             obj_type->is_instptr()->instance_klass()->has_subklass()) {
5970           obj = maybe_cast_profiled_obj(obj, obj_type->speculative_type(), false);
5971         }
5972       }
5973     }
5974 
5975     // Conservatively insert a memory barrier on all memory slices.
5976     // Do not let writes into the original float below the clone.
5977     insert_mem_bar(Op_MemBarCPUOrder);
5978 
5979     // paths into result_reg:
5980     enum {
5981       _slow_path = 1,     // out-of-line call to clone method (virtual or not)
5982       _objArray_path,     // plain array allocation, plus arrayof_oop_arraycopy
5983       _array_path,        // plain array allocation, plus arrayof_long_arraycopy
5984       _instance_path,     // plain instance allocation, plus arrayof_long_arraycopy
5985       PATH_LIMIT
5986     };
5987     RegionNode* result_reg = new RegionNode(PATH_LIMIT);
5988     result_val             = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
5989     PhiNode*    result_i_o = new PhiNode(result_reg, Type::ABIO);
5990     PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
5991     record_for_igvn(result_reg);
5992 
5993     Node* obj_klass = load_object_klass(obj);
5994     // We only go to the fast case code if we pass a number of guards.
5995     // The paths which do not pass are accumulated in the slow_region.
5996     RegionNode* slow_region = new RegionNode(1);
5997     record_for_igvn(slow_region);
5998 
5999     Node* array_obj = obj;
6000     Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)nullptr, &array_obj);
6001     if (array_ctl != nullptr) {
6002       // It's an array.
6003       PreserveJVMState pjvms(this);
6004       set_control(array_ctl);



6005 
6006       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
6007       const TypeAryPtr* ary_ptr = obj_type->isa_aryptr();
6008       if (UseArrayFlattening && bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Expansion) &&
6009           obj_type->can_be_inline_array() &&
6010           (ary_ptr == nullptr || (!ary_ptr->is_not_flat() && (!ary_ptr->is_flat() || ary_ptr->elem()->inline_klass()->contains_oops())))) {
6011         // Flat inline type array may have object field that would require a
6012         // write barrier. Conservatively, go to slow path.
6013         generate_fair_guard(flat_array_test(obj_klass), slow_region);













6014       }







6015 
6016       if (!stopped()) {
6017         Node* obj_length = load_array_length(array_obj);
6018         Node* array_size = nullptr; // Size of the array without object alignment padding.
6019         Node* alloc_obj = new_array(obj_klass, obj_length, 0, &array_size, /*deoptimize_on_exception=*/true);
6020 
6021         BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
6022         if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Parsing)) {
6023           // If it is an oop array, it requires very special treatment,
6024           // because gc barriers are required when accessing the array.
6025           Node* is_obja = generate_refArray_guard(obj_klass, (RegionNode*)nullptr);
6026           if (is_obja != nullptr) {
6027             PreserveJVMState pjvms2(this);
6028             set_control(is_obja);
6029             // Generate a direct call to the right arraycopy function(s).
6030             // Clones are always tightly coupled.
6031             ArrayCopyNode* ac = ArrayCopyNode::make(this, true, array_obj, intcon(0), alloc_obj, intcon(0), obj_length, true, false);
6032             ac->set_clone_oop_array();
6033             Node* n = _gvn.transform(ac);
6034             assert(n == ac, "cannot disappear");
6035             ac->connect_outputs(this, /*deoptimize_on_exception=*/true);
6036 
6037             result_reg->init_req(_objArray_path, control());
6038             result_val->init_req(_objArray_path, alloc_obj);
6039             result_i_o ->set_req(_objArray_path, i_o());
6040             result_mem ->set_req(_objArray_path, reset_memory());
6041           }
6042         }
6043         // Otherwise, there are no barriers to worry about.
6044         // (We can dispense with card marks if we know the allocation
6045         //  comes out of eden (TLAB)...  In fact, ReduceInitialCardMarks
6046         //  causes the non-eden paths to take compensating steps to
6047         //  simulate a fresh allocation, so that no further
6048         //  card marks are required in compiled code to initialize
6049         //  the object.)
6050 
6051         if (!stopped()) {
6052           copy_to_clone(obj, alloc_obj, array_size, true);
6053 
6054           // Present the results of the copy.
6055           result_reg->init_req(_array_path, control());
6056           result_val->init_req(_array_path, alloc_obj);
6057           result_i_o ->set_req(_array_path, i_o());
6058           result_mem ->set_req(_array_path, reset_memory());
6059         }
6060       }
6061     }
6062 




6063     if (!stopped()) {
6064       // It's an instance (we did array above).  Make the slow-path tests.
6065       // If this is a virtual call, we generate a funny guard.  We grab
6066       // the vtable entry corresponding to clone() from the target object.
6067       // If the target method which we are calling happens to be the
6068       // Object clone() method, we pass the guard.  We do not need this
6069       // guard for non-virtual calls; the caller is known to be the native
6070       // Object clone().
6071       if (is_virtual) {
6072         generate_virtual_guard(obj_klass, slow_region);
6073       }
6074 
6075       // The object must be easily cloneable and must not have a finalizer.
6076       // Both of these conditions may be checked in a single test.
6077       // We could optimize the test further, but we don't care.
6078       generate_misc_flags_guard(obj_klass,
6079                                 // Test both conditions:
6080                                 KlassFlags::_misc_is_cloneable_fast | KlassFlags::_misc_has_finalizer,
6081                                 // Must be cloneable but not finalizer:
6082                                 KlassFlags::_misc_is_cloneable_fast,

6174         set_jvms(sfpt->jvms());
6175         _reexecute_sp = jvms()->sp();
6176 
6177         return saved_jvms;
6178       }
6179     }
6180   }
6181   return nullptr;
6182 }
6183 
6184 // Clone the JVMState of the array allocation and create a new safepoint with it. Re-push the array length to the stack
6185 // such that uncommon traps can be emitted to re-execute the array allocation in the interpreter.
6186 SafePointNode* LibraryCallKit::create_safepoint_with_state_before_array_allocation(const AllocateArrayNode* alloc) const {
6187   JVMState* old_jvms = alloc->jvms()->clone_shallow(C);
6188   uint size = alloc->req();
6189   SafePointNode* sfpt = new SafePointNode(size, old_jvms);
6190   old_jvms->set_map(sfpt);
6191   for (uint i = 0; i < size; i++) {
6192     sfpt->init_req(i, alloc->in(i));
6193   }
6194   int adjustment = 1;
6195   const TypeAryKlassPtr* ary_klass_ptr = alloc->in(AllocateNode::KlassNode)->bottom_type()->is_aryklassptr();
6196   if (ary_klass_ptr->is_null_free()) {
6197     // A null-free, tightly coupled array allocation can only come from LibraryCallKit::inline_newArray which
6198     // also requires the componentType and initVal on stack for re-execution.
6199     // Re-create and push the componentType.
6200     ciArrayKlass* klass = ary_klass_ptr->exact_klass()->as_array_klass();
6201     ciInstance* instance = klass->component_mirror_instance();
6202     const TypeInstPtr* t_instance = TypeInstPtr::make(instance);
6203     sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp(), makecon(t_instance));
6204     adjustment++;
6205   }
6206   // re-push array length for deoptimization
6207   sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp() + adjustment - 1, alloc->in(AllocateNode::ALength));
6208   if (ary_klass_ptr->is_null_free()) {
6209     // Re-create and push the initVal.
6210     Node* init_val = alloc->in(AllocateNode::InitValue);
6211     if (init_val == nullptr) {
6212       init_val = InlineTypeNode::make_all_zero(_gvn, ary_klass_ptr->elem()->is_instklassptr()->instance_klass()->as_inline_klass());
6213     } else if (UseCompressedOops) {
6214       init_val = _gvn.transform(new DecodeNNode(init_val, init_val->bottom_type()->make_ptr()));
6215     }
6216     sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp() + adjustment, init_val);
6217     adjustment++;
6218   }
6219   old_jvms->set_sp(old_jvms->sp() + adjustment);
6220   old_jvms->set_monoff(old_jvms->monoff() + adjustment);
6221   old_jvms->set_scloff(old_jvms->scloff() + adjustment);
6222   old_jvms->set_endoff(old_jvms->endoff() + adjustment);
6223   old_jvms->set_should_reexecute(true);
6224 
6225   sfpt->set_i_o(map()->i_o());
6226   sfpt->set_memory(map()->memory());
6227   sfpt->set_control(map()->control());
6228   return sfpt;
6229 }
6230 
6231 // In case of a deoptimization, we restart execution at the
6232 // allocation, allocating a new array. We would leave an uninitialized
6233 // array in the heap that GCs wouldn't expect. Move the allocation
6234 // after the traps so we don't allocate the array if we
6235 // deoptimize. This is possible because tightly_coupled_allocation()
6236 // guarantees there's no observer of the allocated array at this point
6237 // and the control flow is simple enough.
6238 void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms_before_guards,
6239                                                     int saved_reexecute_sp, uint new_idx) {
6240   if (saved_jvms_before_guards != nullptr && !stopped()) {
6241     replace_unrelated_uncommon_traps_with_alloc_state(alloc, saved_jvms_before_guards);
6242 
6243     assert(alloc != nullptr, "only with a tightly coupled allocation");
6244     // restore JVM state to the state at the arraycopy
6245     saved_jvms_before_guards->map()->set_control(map()->control());
6246     assert(saved_jvms_before_guards->map()->memory() == map()->memory(), "memory state changed?");
6247     assert(saved_jvms_before_guards->map()->i_o() == map()->i_o(), "IO state changed?");
6248     // If we've improved the types of some nodes (null check) while
6249     // emitting the guards, propagate them to the current state
6250     map()->replaced_nodes().apply(saved_jvms_before_guards->map(), new_idx);
6251     set_jvms(saved_jvms_before_guards);
6252     _reexecute_sp = saved_reexecute_sp;
6253 
6254     // Remove the allocation from above the guards
6255     CallProjections* callprojs = alloc->extract_projections(true);

6256     InitializeNode* init = alloc->initialization();
6257     Node* alloc_mem = alloc->in(TypeFunc::Memory);
6258     C->gvn_replace_by(callprojs->fallthrough_ioproj, alloc->in(TypeFunc::I_O));
6259     init->replace_mem_projs_by(alloc_mem, C);
6260 
6261     // The CastIINode created in GraphKit::new_array (in AllocateArrayNode::make_ideal_length) must stay below
6262     // the allocation (i.e. is only valid if the allocation succeeds):
6263     // 1) replace CastIINode with AllocateArrayNode's length here
6264     // 2) Create CastIINode again once allocation has moved (see below) at the end of this method
6265     //
6266     // Multiple identical CastIINodes might exist here. Each GraphKit::load_array_length() call will generate
6267     // new separate CastIINode (arraycopy guard checks or any array length use between array allocation and ararycopy)
6268     Node* init_control = init->proj_out(TypeFunc::Control);
6269     Node* alloc_length = alloc->Ideal_length();
6270 #ifdef ASSERT
6271     Node* prev_cast = nullptr;
6272 #endif
6273     for (uint i = 0; i < init_control->outcnt(); i++) {
6274       Node* init_out = init_control->raw_out(i);
6275       if (init_out->is_CastII() && init_out->in(TypeFunc::Control) == init_control && init_out->in(1) == alloc_length) {
6276 #ifdef ASSERT
6277         if (prev_cast == nullptr) {
6278           prev_cast = init_out;

6280           if (prev_cast->cmp(*init_out) == false) {
6281             prev_cast->dump();
6282             init_out->dump();
6283             assert(false, "not equal CastIINode");
6284           }
6285         }
6286 #endif
6287         C->gvn_replace_by(init_out, alloc_length);
6288       }
6289     }
6290     C->gvn_replace_by(init->proj_out(TypeFunc::Control), alloc->in(0));
6291 
6292     // move the allocation here (after the guards)
6293     _gvn.hash_delete(alloc);
6294     alloc->set_req(TypeFunc::Control, control());
6295     alloc->set_req(TypeFunc::I_O, i_o());
6296     Node *mem = reset_memory();
6297     set_all_memory(mem);
6298     alloc->set_req(TypeFunc::Memory, mem);
6299     set_control(init->proj_out_or_null(TypeFunc::Control));
6300     set_i_o(callprojs->fallthrough_ioproj);
6301 
6302     // Update memory as done in GraphKit::set_output_for_allocation()
6303     const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
6304     const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
6305     if (ary_type->isa_aryptr() && length_type != nullptr) {
6306       ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
6307     }
6308     const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot);
6309     int            elemidx  = C->get_alias_index(telemref);
6310     // Need to properly move every memory projection for the Initialize
6311 #ifdef ASSERT
6312     int mark_idx = C->get_alias_index(ary_type->add_offset(oopDesc::mark_offset_in_bytes()));
6313     int klass_idx = C->get_alias_index(ary_type->add_offset(oopDesc::klass_offset_in_bytes()));
6314 #endif
6315     auto move_proj = [&](ProjNode* proj) {
6316       int alias_idx = C->get_alias_index(proj->adr_type());
6317       assert(alias_idx == Compile::AliasIdxRaw ||
6318              alias_idx == elemidx ||
6319              alias_idx == mark_idx ||
6320              alias_idx == klass_idx, "should be raw memory or array element type");

6630         top_src  = src_type->isa_aryptr();
6631         has_src = (top_src != nullptr && top_src->elem() != Type::BOTTOM);
6632         src_spec = true;
6633       }
6634       if (!has_dest) {
6635         dest = maybe_cast_profiled_obj(dest, dest_k, true);
6636         dest_type  = _gvn.type(dest);
6637         top_dest  = dest_type->isa_aryptr();
6638         has_dest = (top_dest != nullptr && top_dest->elem() != Type::BOTTOM);
6639         dest_spec = true;
6640       }
6641     }
6642   }
6643 
6644   if (has_src && has_dest && can_emit_guards) {
6645     BasicType src_elem = top_src->isa_aryptr()->elem()->array_element_basic_type();
6646     BasicType dest_elem = top_dest->isa_aryptr()->elem()->array_element_basic_type();
6647     if (is_reference_type(src_elem, true)) src_elem = T_OBJECT;
6648     if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT;
6649 
6650     if (src_elem == dest_elem && top_src->is_flat() == top_dest->is_flat() && src_elem == T_OBJECT) {
6651       // If both arrays are object arrays then having the exact types
6652       // for both will remove the need for a subtype check at runtime
6653       // before the call and may make it possible to pick a faster copy
6654       // routine (without a subtype check on every element)
6655       // Do we have the exact type of src?
6656       bool could_have_src = src_spec;
6657       // Do we have the exact type of dest?
6658       bool could_have_dest = dest_spec;
6659       ciKlass* src_k = nullptr;
6660       ciKlass* dest_k = nullptr;
6661       if (!src_spec) {
6662         src_k = src_type->speculative_type_not_null();
6663         if (src_k != nullptr && src_k->is_array_klass()) {
6664           could_have_src = true;
6665         }
6666       }
6667       if (!dest_spec) {
6668         dest_k = dest_type->speculative_type_not_null();
6669         if (dest_k != nullptr && dest_k->is_array_klass()) {
6670           could_have_dest = true;
6671         }
6672       }
6673       if (could_have_src && could_have_dest) {
6674         // If we can have both exact types, emit the missing guards
6675         if (could_have_src && !src_spec) {
6676           src = maybe_cast_profiled_obj(src, src_k, true);
6677           src_type = _gvn.type(src);
6678           top_src = src_type->isa_aryptr();
6679         }
6680         if (could_have_dest && !dest_spec) {
6681           dest = maybe_cast_profiled_obj(dest, dest_k, true);
6682           dest_type = _gvn.type(dest);
6683           top_dest = dest_type->isa_aryptr();
6684         }
6685       }
6686     }
6687   }
6688 
6689   ciMethod* trap_method = method();
6690   int trap_bci = bci();
6691   if (saved_jvms_before_guards != nullptr) {
6692     trap_method = alloc->jvms()->method();
6693     trap_bci = alloc->jvms()->bci();
6694   }
6695 
6696   bool negative_length_guard_generated = false;
6697 
6698   if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
6699       can_emit_guards && !src->is_top() && !dest->is_top()) {

6700     // validate arguments: enables transformation the ArrayCopyNode
6701     validated = true;
6702 
6703     RegionNode* slow_region = new RegionNode(1);
6704     record_for_igvn(slow_region);
6705 
6706     // (1) src and dest are arrays.
6707     generate_non_array_guard(load_object_klass(src), slow_region, &src);
6708     generate_non_array_guard(load_object_klass(dest), slow_region, &dest);
6709 
6710     // (2) src and dest arrays must have elements of the same BasicType
6711     // done at macro expansion or at Ideal transformation time
6712 
6713     // (4) src_offset must not be negative.
6714     generate_negative_guard(src_offset, slow_region);
6715 
6716     // (5) dest_offset must not be negative.
6717     generate_negative_guard(dest_offset, slow_region);
6718 
6719     // (7) src_offset + length must not exceed length of src.
6720     generate_limit_guard(src_offset, length,
6721                          load_array_length(src),
6722                          slow_region);
6723 
6724     // (8) dest_offset + length must not exceed length of dest.
6725     generate_limit_guard(dest_offset, length,
6726                          load_array_length(dest),
6727                          slow_region);
6728 
6729     // (6) length must not be negative.
6730     // This is also checked in generate_arraycopy() during macro expansion, but
6731     // we also have to check it here for the case where the ArrayCopyNode will
6732     // be eliminated by Escape Analysis.
6733     if (EliminateAllocations) {
6734       generate_negative_guard(length, slow_region);
6735       negative_length_guard_generated = true;
6736     }
6737 
6738     // (9) each element of an oop array must be assignable
6739     Node* dest_klass = load_object_klass(dest);
6740     Node* refined_dest_klass = dest_klass;
6741     if (src != dest) {
6742       dest_klass = load_non_refined_array_klass(refined_dest_klass);
6743       Node* not_subtype_ctrl = gen_subtype_check(src, dest_klass);
6744       slow_region->add_req(not_subtype_ctrl);







6745     }
6746 
6747     // TODO 8350865 Improve this. What about atomicity? Make sure this is always folded for type arrays.
6748     // If destination is null-restricted, source must be null-restricted as well: src_null_restricted || !dst_null_restricted
6749     Node* src_klass = load_object_klass(src);
6750     Node* adr_prop_src = basic_plus_adr(top(), src_klass, in_bytes(ArrayKlass::properties_offset()));
6751     Node* prop_src = _gvn.transform(LoadNode::make(_gvn, control(), immutable_memory(), adr_prop_src, TypeRawPtr::BOTTOM, TypeInt::INT, T_INT, MemNode::unordered));
6752     Node* adr_prop_dest = basic_plus_adr(top(), refined_dest_klass, in_bytes(ArrayKlass::properties_offset()));
6753     Node* prop_dest = _gvn.transform(LoadNode::make(_gvn, control(), immutable_memory(), adr_prop_dest, TypeRawPtr::BOTTOM, TypeInt::INT, T_INT, MemNode::unordered));
6754 
6755     const ArrayProperties props_null_restricted = ArrayProperties::Default().with_null_restricted();
6756     jint props_value = (jint)props_null_restricted.value();
6757 
6758     prop_dest = _gvn.transform(new XorINode(prop_dest, intcon(props_value)));
6759     prop_src = _gvn.transform(new OrINode(prop_dest, prop_src));
6760     prop_src = _gvn.transform(new AndINode(prop_src, intcon(props_value)));
6761 
6762     Node* chk = _gvn.transform(new CmpINode(prop_src, intcon(props_value)));
6763     Node* tst = _gvn.transform(new BoolNode(chk, BoolTest::ne));
6764     generate_fair_guard(tst, slow_region);
6765 
6766     // TODO 8350865 This is too strong
6767     generate_fair_guard(flat_array_test(src), slow_region);
6768     generate_fair_guard(flat_array_test(dest), slow_region);
6769 
6770     {
6771       PreserveJVMState pjvms(this);
6772       set_control(_gvn.transform(slow_region));
6773       uncommon_trap(Deoptimization::Reason_intrinsic,
6774                     Deoptimization::Action_make_not_entrant);
6775       assert(stopped(), "Should be stopped");
6776     }
6777 
6778     const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->isa_klassptr();
6779     if (dest_klass_t == nullptr) {
6780       // refined_dest_klass may not be an array, which leads to dest_klass being top. This means we
6781       // are in a dead path.
6782       uncommon_trap(Deoptimization::Reason_intrinsic,
6783                     Deoptimization::Action_make_not_entrant);
6784       return true;
6785     }
6786 
6787     const Type* toop = dest_klass_t->cast_to_exactness(false)->as_instance_type();
6788     src = _gvn.transform(new CheckCastPPNode(control(), src, toop));
6789     arraycopy_move_allocation_here(alloc, dest, saved_jvms_before_guards, saved_reexecute_sp, new_idx);
6790   }
6791 
6792   if (stopped()) {
6793     return true;
6794   }
6795 
6796   Node* dest_klass = load_object_klass(dest);
6797   dest_klass = load_non_refined_array_klass(dest_klass);
6798 
6799   ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != nullptr, negative_length_guard_generated,
6800                                           // Create LoadRange and LoadKlass nodes for use during macro expansion here
6801                                           // so the compiler has a chance to eliminate them: during macro expansion,
6802                                           // we have to set their control (CastPP nodes are eliminated).
6803                                           load_object_klass(src), dest_klass,
6804                                           load_array_length(src), load_array_length(dest));
6805 
6806   ac->set_arraycopy(validated);
6807 
6808   Node* n = _gvn.transform(ac);
6809   if (n == ac) {
6810     ac->connect_outputs(this);
6811   } else {
6812     assert(validated, "shouldn't transform if all arguments not validated");
6813     set_all_memory(n);
6814   }
6815   clear_upper_avx();
6816 
6817 
6818   return true;
6819 }
6820 
6821 
6822 // Helper function which determines if an arraycopy immediately follows
6823 // an allocation, with no intervening tests or other escapes for the object.
< prev index next >