< prev index next >

src/hotspot/share/opto/library_call.cpp

Print this page

   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"

  27 #include "ci/ciUtilities.inline.hpp"
  28 #include "classfile/vmIntrinsics.hpp"
  29 #include "compiler/compileBroker.hpp"
  30 #include "compiler/compileLog.hpp"
  31 #include "gc/shared/barrierSet.hpp"
  32 #include "jfr/support/jfrIntrinsics.hpp"
  33 #include "memory/resourceArea.hpp"
  34 #include "oops/klass.inline.hpp"
  35 #include "oops/objArrayKlass.hpp"
  36 #include "opto/addnode.hpp"
  37 #include "opto/arraycopynode.hpp"
  38 #include "opto/c2compiler.hpp"
  39 #include "opto/castnode.hpp"
  40 #include "opto/cfgnode.hpp"
  41 #include "opto/convertnode.hpp"
  42 #include "opto/countbitsnode.hpp"
  43 #include "opto/idealKit.hpp"
  44 #include "opto/library_call.hpp"
  45 #include "opto/mathexactnode.hpp"
  46 #include "opto/mulnode.hpp"

 302   case vmIntrinsics::_indexOfUL:                return inline_string_indexOf(StrIntrinsicNode::UL);
 303   case vmIntrinsics::_indexOfIL:                return inline_string_indexOfI(StrIntrinsicNode::LL);
 304   case vmIntrinsics::_indexOfIU:                return inline_string_indexOfI(StrIntrinsicNode::UU);
 305   case vmIntrinsics::_indexOfIUL:               return inline_string_indexOfI(StrIntrinsicNode::UL);
 306   case vmIntrinsics::_indexOfU_char:            return inline_string_indexOfChar(StrIntrinsicNode::U);
 307   case vmIntrinsics::_indexOfL_char:            return inline_string_indexOfChar(StrIntrinsicNode::L);
 308 
 309   case vmIntrinsics::_equalsL:                  return inline_string_equals(StrIntrinsicNode::LL);
 310   case vmIntrinsics::_equalsU:                  return inline_string_equals(StrIntrinsicNode::UU);
 311 
 312   case vmIntrinsics::_toBytesStringU:           return inline_string_toBytesU();
 313   case vmIntrinsics::_getCharsStringU:          return inline_string_getCharsU();
 314   case vmIntrinsics::_getCharStringU:           return inline_string_char_access(!is_store);
 315   case vmIntrinsics::_putCharStringU:           return inline_string_char_access( is_store);
 316 
 317   case vmIntrinsics::_compressStringC:
 318   case vmIntrinsics::_compressStringB:          return inline_string_copy( is_compress);
 319   case vmIntrinsics::_inflateStringC:
 320   case vmIntrinsics::_inflateStringB:           return inline_string_copy(!is_compress);
 321 


 322   case vmIntrinsics::_getReference:             return inline_unsafe_access(!is_store, T_OBJECT,   Relaxed, false);
 323   case vmIntrinsics::_getBoolean:               return inline_unsafe_access(!is_store, T_BOOLEAN,  Relaxed, false);
 324   case vmIntrinsics::_getByte:                  return inline_unsafe_access(!is_store, T_BYTE,     Relaxed, false);
 325   case vmIntrinsics::_getShort:                 return inline_unsafe_access(!is_store, T_SHORT,    Relaxed, false);
 326   case vmIntrinsics::_getChar:                  return inline_unsafe_access(!is_store, T_CHAR,     Relaxed, false);
 327   case vmIntrinsics::_getInt:                   return inline_unsafe_access(!is_store, T_INT,      Relaxed, false);
 328   case vmIntrinsics::_getLong:                  return inline_unsafe_access(!is_store, T_LONG,     Relaxed, false);
 329   case vmIntrinsics::_getFloat:                 return inline_unsafe_access(!is_store, T_FLOAT,    Relaxed, false);
 330   case vmIntrinsics::_getDouble:                return inline_unsafe_access(!is_store, T_DOUBLE,   Relaxed, false);

 331 
 332   case vmIntrinsics::_putReference:             return inline_unsafe_access( is_store, T_OBJECT,   Relaxed, false);
 333   case vmIntrinsics::_putBoolean:               return inline_unsafe_access( is_store, T_BOOLEAN,  Relaxed, false);
 334   case vmIntrinsics::_putByte:                  return inline_unsafe_access( is_store, T_BYTE,     Relaxed, false);
 335   case vmIntrinsics::_putShort:                 return inline_unsafe_access( is_store, T_SHORT,    Relaxed, false);
 336   case vmIntrinsics::_putChar:                  return inline_unsafe_access( is_store, T_CHAR,     Relaxed, false);
 337   case vmIntrinsics::_putInt:                   return inline_unsafe_access( is_store, T_INT,      Relaxed, false);
 338   case vmIntrinsics::_putLong:                  return inline_unsafe_access( is_store, T_LONG,     Relaxed, false);
 339   case vmIntrinsics::_putFloat:                 return inline_unsafe_access( is_store, T_FLOAT,    Relaxed, false);
 340   case vmIntrinsics::_putDouble:                return inline_unsafe_access( is_store, T_DOUBLE,   Relaxed, false);

 341 
 342   case vmIntrinsics::_getReferenceVolatile:     return inline_unsafe_access(!is_store, T_OBJECT,   Volatile, false);
 343   case vmIntrinsics::_getBooleanVolatile:       return inline_unsafe_access(!is_store, T_BOOLEAN,  Volatile, false);
 344   case vmIntrinsics::_getByteVolatile:          return inline_unsafe_access(!is_store, T_BYTE,     Volatile, false);
 345   case vmIntrinsics::_getShortVolatile:         return inline_unsafe_access(!is_store, T_SHORT,    Volatile, false);
 346   case vmIntrinsics::_getCharVolatile:          return inline_unsafe_access(!is_store, T_CHAR,     Volatile, false);
 347   case vmIntrinsics::_getIntVolatile:           return inline_unsafe_access(!is_store, T_INT,      Volatile, false);
 348   case vmIntrinsics::_getLongVolatile:          return inline_unsafe_access(!is_store, T_LONG,     Volatile, false);
 349   case vmIntrinsics::_getFloatVolatile:         return inline_unsafe_access(!is_store, T_FLOAT,    Volatile, false);
 350   case vmIntrinsics::_getDoubleVolatile:        return inline_unsafe_access(!is_store, T_DOUBLE,   Volatile, false);
 351 
 352   case vmIntrinsics::_putReferenceVolatile:     return inline_unsafe_access( is_store, T_OBJECT,   Volatile, false);
 353   case vmIntrinsics::_putBooleanVolatile:       return inline_unsafe_access( is_store, T_BOOLEAN,  Volatile, false);
 354   case vmIntrinsics::_putByteVolatile:          return inline_unsafe_access( is_store, T_BYTE,     Volatile, false);
 355   case vmIntrinsics::_putShortVolatile:         return inline_unsafe_access( is_store, T_SHORT,    Volatile, false);
 356   case vmIntrinsics::_putCharVolatile:          return inline_unsafe_access( is_store, T_CHAR,     Volatile, false);
 357   case vmIntrinsics::_putIntVolatile:           return inline_unsafe_access( is_store, T_INT,      Volatile, false);
 358   case vmIntrinsics::_putLongVolatile:          return inline_unsafe_access( is_store, T_LONG,     Volatile, false);
 359   case vmIntrinsics::_putFloatVolatile:         return inline_unsafe_access( is_store, T_FLOAT,    Volatile, false);
 360   case vmIntrinsics::_putDoubleVolatile:        return inline_unsafe_access( is_store, T_DOUBLE,   Volatile, false);

 490   case vmIntrinsics::_equalsB:                  return inline_array_equals(StrIntrinsicNode::LL);
 491   case vmIntrinsics::_equalsC:                  return inline_array_equals(StrIntrinsicNode::UU);
 492   case vmIntrinsics::_Preconditions_checkIndex: return inline_preconditions_checkIndex(T_INT);
 493   case vmIntrinsics::_Preconditions_checkLongIndex: return inline_preconditions_checkIndex(T_LONG);
 494   case vmIntrinsics::_clone:                    return inline_native_clone(intrinsic()->is_virtual());
 495 
 496   case vmIntrinsics::_allocateUninitializedArray: return inline_unsafe_newArray(true);
 497   case vmIntrinsics::_newArray:                   return inline_unsafe_newArray(false);
 498 
 499   case vmIntrinsics::_isAssignableFrom:         return inline_native_subtype_check();
 500 
 501   case vmIntrinsics::_isInstance:
 502   case vmIntrinsics::_getModifiers:
 503   case vmIntrinsics::_isInterface:
 504   case vmIntrinsics::_isArray:
 505   case vmIntrinsics::_isPrimitive:
 506   case vmIntrinsics::_isHidden:
 507   case vmIntrinsics::_getSuperclass:
 508   case vmIntrinsics::_getClassAccessFlags:      return inline_native_Class_query(intrinsic_id());
 509 



 510   case vmIntrinsics::_floatToRawIntBits:
 511   case vmIntrinsics::_floatToIntBits:
 512   case vmIntrinsics::_intBitsToFloat:
 513   case vmIntrinsics::_doubleToRawLongBits:
 514   case vmIntrinsics::_doubleToLongBits:
 515   case vmIntrinsics::_longBitsToDouble:         return inline_fp_conversions(intrinsic_id());
 516 
 517   case vmIntrinsics::_numberOfLeadingZeros_i:
 518   case vmIntrinsics::_numberOfLeadingZeros_l:
 519   case vmIntrinsics::_numberOfTrailingZeros_i:
 520   case vmIntrinsics::_numberOfTrailingZeros_l:
 521   case vmIntrinsics::_bitCount_i:
 522   case vmIntrinsics::_bitCount_l:
 523   case vmIntrinsics::_reverseBytes_i:
 524   case vmIntrinsics::_reverseBytes_l:
 525   case vmIntrinsics::_reverseBytes_s:
 526   case vmIntrinsics::_reverseBytes_c:           return inline_number_methods(intrinsic_id());
 527 
 528   case vmIntrinsics::_getCallerClass:           return inline_native_Reflection_getCallerClass();
 529 

2168   case vmIntrinsics::_numberOfLeadingZeros_l:   n = new CountLeadingZerosLNode( arg);  break;
2169   case vmIntrinsics::_numberOfTrailingZeros_i:  n = new CountTrailingZerosINode(arg);  break;
2170   case vmIntrinsics::_numberOfTrailingZeros_l:  n = new CountTrailingZerosLNode(arg);  break;
2171   case vmIntrinsics::_bitCount_i:               n = new PopCountINode(          arg);  break;
2172   case vmIntrinsics::_bitCount_l:               n = new PopCountLNode(          arg);  break;
2173   case vmIntrinsics::_reverseBytes_c:           n = new ReverseBytesUSNode(0,   arg);  break;
2174   case vmIntrinsics::_reverseBytes_s:           n = new ReverseBytesSNode( 0,   arg);  break;
2175   case vmIntrinsics::_reverseBytes_i:           n = new ReverseBytesINode( 0,   arg);  break;
2176   case vmIntrinsics::_reverseBytes_l:           n = new ReverseBytesLNode( 0,   arg);  break;
2177   default:  fatal_unexpected_iid(id);  break;
2178   }
2179   set_result(_gvn.transform(n));
2180   return true;
2181 }
2182 
2183 //----------------------------inline_unsafe_access----------------------------
2184 
2185 const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type) {
2186   // Attempt to infer a sharper value type from the offset and base type.
2187   ciKlass* sharpened_klass = NULL;

2188 
2189   // See if it is an instance field, with an object type.
2190   if (alias_type->field() != NULL) {
2191     if (alias_type->field()->type()->is_klass()) {
2192       sharpened_klass = alias_type->field()->type()->as_klass();

2193     }
2194   }
2195 
2196   // See if it is a narrow oop array.
2197   if (adr_type->isa_aryptr()) {
2198     if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
2199       const TypeOopPtr* elem_type = adr_type->is_aryptr()->elem()->make_oopptr();

2200       if (elem_type != NULL) {
2201         sharpened_klass = elem_type->klass();
2202       }
2203     }
2204   }
2205 
2206   // The sharpened class might be unloaded if there is no class loader
2207   // contraint in place.
2208   if (sharpened_klass != NULL && sharpened_klass->is_loaded()) {
2209     const TypeOopPtr* tjp = TypeOopPtr::make_from_klass(sharpened_klass);



2210 
2211 #ifndef PRODUCT
2212     if (C->print_intrinsics() || C->print_inlining()) {
2213       tty->print("  from base type:  ");  adr_type->dump(); tty->cr();
2214       tty->print("  sharpened value: ");  tjp->dump();      tty->cr();
2215     }
2216 #endif
2217     // Sharpen the value type.
2218     return tjp;
2219   }
2220   return NULL;
2221 }
2222 
2223 DecoratorSet LibraryCallKit::mo_decorator_for_access_kind(AccessKind kind) {
2224   switch (kind) {
2225       case Relaxed:
2226         return MO_UNORDERED;
2227       case Opaque:
2228         return MO_RELAXED;
2229       case Acquire:

2245   guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
2246   assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
2247 
2248   if (is_reference_type(type)) {
2249     decorators |= ON_UNKNOWN_OOP_REF;
2250   }
2251 
2252   if (unaligned) {
2253     decorators |= C2_UNALIGNED;
2254   }
2255 
2256 #ifndef PRODUCT
2257   {
2258     ResourceMark rm;
2259     // Check the signatures.
2260     ciSignature* sig = callee()->signature();
2261 #ifdef ASSERT
2262     if (!is_store) {
2263       // Object getReference(Object base, int/long offset), etc.
2264       BasicType rtype = sig->return_type()->basic_type();
2265       assert(rtype == type, "getter must return the expected value");
2266       assert(sig->count() == 2, "oop getter has 2 arguments");
2267       assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2268       assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2269     } else {
2270       // void putReference(Object base, int/long offset, Object x), etc.
2271       assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
2272       assert(sig->count() == 3, "oop putter has 3 arguments");
2273       assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
2274       assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
2275       BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
2276       assert(vtype == type, "putter must accept the expected value");
2277     }
2278 #endif // ASSERT
2279  }
2280 #endif //PRODUCT
2281 
2282   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
2283 
2284   Node* receiver = argument(0);  // type: oop
2285 
2286   // Build address expression.
2287   Node* heap_base_oop = top();
2288 
2289   // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2290   Node* base = argument(1);  // type: oop
2291   // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2292   Node* offset = argument(2);  // type: long
2293   // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2294   // to be plain byte offsets, which are also the same as those accepted
2295   // by oopDesc::field_addr.
2296   assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2297          "fieldOffset must be byte-scaled");



















































2298   // 32-bit machines ignore the high half!
2299   offset = ConvL2X(offset);
2300 
2301   // Save state and restore on bailout
2302   uint old_sp = sp();
2303   SafePointNode* old_map = clone_map();
2304 
2305   Node* adr = make_unsafe_address(base, offset, type, kind == Relaxed);
2306 
2307   if (_gvn.type(base)->isa_ptr() == TypePtr::NULL_PTR) {
2308     if (type != T_OBJECT) {
2309       decorators |= IN_NATIVE; // off-heap primitive access
2310     } else {
2311       set_map(old_map);
2312       set_sp(old_sp);
2313       return false; // off-heap oop accesses are not supported
2314     }
2315   } else {
2316     heap_base_oop = base; // on-heap or mixed access
2317   }
2318 
2319   // Can base be NULL? Otherwise, always on-heap access.
2320   bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(base));
2321 
2322   if (!can_access_non_heap) {
2323     decorators |= IN_HEAP;
2324   }
2325 
2326   Node* val = is_store ? argument(4) : NULL;
2327 
2328   const TypePtr* adr_type = _gvn.type(adr)->isa_ptr();
2329   if (adr_type == TypePtr::NULL_PTR) {
2330     set_map(old_map);
2331     set_sp(old_sp);
2332     return false; // off-heap access with zero address
2333   }
2334 
2335   // Try to categorize the address.
2336   Compile::AliasType* alias_type = C->alias_type(adr_type);
2337   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2338 
2339   if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2340       alias_type->adr_type() == TypeAryPtr::RANGE) {
2341     set_map(old_map);
2342     set_sp(old_sp);
2343     return false; // not supported
2344   }
2345 
2346   bool mismatched = false;
2347   BasicType bt = alias_type->basic_type();
























2348   if (bt != T_ILLEGAL) {
2349     assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
2350     if (bt == T_BYTE && adr_type->isa_aryptr()) {
2351       // Alias type doesn't differentiate between byte[] and boolean[]).
2352       // Use address type to get the element type.
2353       bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2354     }
2355     if (bt == T_ARRAY || bt == T_NARROWOOP) {
2356       // accessing an array field with getReference is not a mismatch
2357       bt = T_OBJECT;
2358     }
2359     if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2360       // Don't intrinsify mismatched object accesses
2361       set_map(old_map);
2362       set_sp(old_sp);
2363       return false;
2364     }
2365     mismatched = (bt != type);
2366   } else if (alias_type->adr_type()->isa_oopptr()) {
2367     mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
2368   }
2369 

























2370   old_map->destruct(&_gvn);
2371   assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2372 
2373   if (mismatched) {
2374     decorators |= C2_MISMATCHED;
2375   }
2376 
2377   // First guess at the value type.
2378   const Type *value_type = Type::get_const_basic_type(type);
2379 
2380   // Figure out the memory ordering.
2381   decorators |= mo_decorator_for_access_kind(kind);
2382 
2383   if (!is_store && type == T_OBJECT) {
2384     const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2385     if (tjp != NULL) {
2386       value_type = tjp;




2387     }
2388   }
2389 
2390   receiver = null_check(receiver);
2391   if (stopped()) {
2392     return true;
2393   }
2394   // Heap pointers get a null-check from the interpreter,
2395   // as a courtesy.  However, this is not guaranteed by Unsafe,
2396   // and it is not possible to fully distinguish unintended nulls
2397   // from intended ones in this API.
2398 
2399   if (!is_store) {
2400     Node* p = NULL;
2401     // Try to constant fold a load from a constant field
2402     ciField* field = alias_type->field();
2403     if (heap_base_oop != top() && field != NULL && field->is_constant() && !mismatched) {
2404       // final or stable field
2405       p = make_constant_from_field(field, heap_base_oop);
2406     }
2407 
2408     if (p == NULL) { // Could not constant fold the load
2409       p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);















2410       // Normalize the value returned by getBoolean in the following cases
2411       if (type == T_BOOLEAN &&
2412           (mismatched ||
2413            heap_base_oop == top() ||                  // - heap_base_oop is NULL or
2414            (can_access_non_heap && field == NULL))    // - heap_base_oop is potentially NULL
2415                                                       //   and the unsafe access is made to large offset
2416                                                       //   (i.e., larger than the maximum offset necessary for any
2417                                                       //   field access)
2418             ) {
2419           IdealKit ideal = IdealKit(this);
2420 #define __ ideal.
2421           IdealVariable normalized_result(ideal);
2422           __ declarations_done();
2423           __ set(normalized_result, p);
2424           __ if_then(p, BoolTest::ne, ideal.ConI(0));
2425           __ set(normalized_result, ideal.ConI(1));
2426           ideal.end_if();
2427           final_sync(ideal);
2428           p = __ value(normalized_result);
2429 #undef __
2430       }
2431     }
2432     if (type == T_ADDRESS) {
2433       p = gvn().transform(new CastP2XNode(NULL, p));
2434       p = ConvX2UL(p);
2435     }
2436     // The load node has the control of the preceding MemBarCPUOrder.  All
2437     // following nodes will have the control of the MemBarCPUOrder inserted at
2438     // the end of this method.  So, pushing the load onto the stack at a later
2439     // point is fine.
2440     set_result(p);
2441   } else {
2442     if (bt == T_ADDRESS) {
2443       // Repackage the long as a pointer.
2444       val = ConvL2X(val);
2445       val = gvn().transform(new CastX2PNode(val));
2446     }
2447     access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators);































2448   }
2449 





















2450   return true;
2451 }
2452 
2453 //----------------------------inline_unsafe_load_store----------------------------
2454 // This method serves a couple of different customers (depending on LoadStoreKind):
2455 //
2456 // LS_cmp_swap:
2457 //
2458 //   boolean compareAndSetReference(Object o, long offset, Object expected, Object x);
2459 //   boolean compareAndSetInt(   Object o, long offset, int    expected, int    x);
2460 //   boolean compareAndSetLong(  Object o, long offset, long   expected, long   x);
2461 //
2462 // LS_cmp_swap_weak:
2463 //
2464 //   boolean weakCompareAndSetReference(       Object o, long offset, Object expected, Object x);
2465 //   boolean weakCompareAndSetReferencePlain(  Object o, long offset, Object expected, Object x);
2466 //   boolean weakCompareAndSetReferenceAcquire(Object o, long offset, Object expected, Object x);
2467 //   boolean weakCompareAndSetReferenceRelease(Object o, long offset, Object expected, Object x);
2468 //
2469 //   boolean weakCompareAndSetInt(          Object o, long offset, int    expected, int    x);

2638     }
2639     case LS_cmp_swap:
2640     case LS_cmp_swap_weak:
2641     case LS_get_add:
2642       break;
2643     default:
2644       ShouldNotReachHere();
2645   }
2646 
2647   // Null check receiver.
2648   receiver = null_check(receiver);
2649   if (stopped()) {
2650     return true;
2651   }
2652 
2653   int alias_idx = C->get_alias_index(adr_type);
2654 
2655   if (is_reference_type(type)) {
2656     decorators |= IN_HEAP | ON_UNKNOWN_OOP_REF;
2657 













2658     // Transformation of a value which could be NULL pointer (CastPP #NULL)
2659     // could be delayed during Parse (for example, in adjust_map_after_if()).
2660     // Execute transformation here to avoid barrier generation in such case.
2661     if (_gvn.type(newval) == TypePtr::NULL_PTR)
2662       newval = _gvn.makecon(TypePtr::NULL_PTR);
2663 
2664     if (oldval != NULL && _gvn.type(oldval) == TypePtr::NULL_PTR) {
2665       // Refine the value to a null constant, when it is known to be null
2666       oldval = _gvn.makecon(TypePtr::NULL_PTR);
2667     }
2668   }
2669 
2670   Node* result = NULL;
2671   switch (kind) {
2672     case LS_cmp_exchange: {
2673       result = access_atomic_cmpxchg_val_at(base, adr, adr_type, alias_idx,
2674                                             oldval, newval, value_type, type, decorators);
2675       break;
2676     }
2677     case LS_cmp_swap_weak:

2799   Node* cls = null_check(argument(1));
2800   if (stopped())  return true;
2801 
2802   Node* kls = load_klass_from_mirror(cls, false, NULL, 0);
2803   kls = null_check(kls);
2804   if (stopped())  return true;  // argument was like int.class
2805 
2806   Node* test = NULL;
2807   if (LibraryCallKit::klass_needs_init_guard(kls)) {
2808     // Note:  The argument might still be an illegal value like
2809     // Serializable.class or Object[].class.   The runtime will handle it.
2810     // But we must make an explicit check for initialization.
2811     Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset()));
2812     // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
2813     // can generate code to load it as unsigned byte.
2814     Node* inst = make_load(NULL, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered);
2815     Node* bits = intcon(InstanceKlass::fully_initialized);
2816     test = _gvn.transform(new SubINode(inst, bits));
2817     // The 'test' is non-zero if we need to take a slow path.
2818   }
2819 
2820   Node* obj = new_instance(kls, test);





2821   set_result(obj);
2822   return true;
2823 }
2824 
2825 //------------------------inline_native_time_funcs--------------
2826 // inline code for System.currentTimeMillis() and System.nanoTime()
2827 // these have the same type and signature
2828 bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
2829   const TypeFunc* tf = OptoRuntime::void_long_Type();
2830   const TypePtr* no_memory_effects = NULL;
2831   Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
2832   Node* value = _gvn.transform(new ProjNode(time, TypeFunc::Parms+0));
2833 #ifdef ASSERT
2834   Node* value_top = _gvn.transform(new ProjNode(time, TypeFunc::Parms+1));
2835   assert(value_top == top(), "second value must be top");
2836 #endif
2837   set_result(value);
2838   return true;
2839 }
2840 

2948   set_control(jobj_is_not_null);
2949   Node* res = access_load(jobj, TypeInstPtr::NOTNULL, T_OBJECT,
2950                           IN_NATIVE | C2_CONTROL_DEPENDENT_LOAD);
2951   result_rgn->init_req(_normal_path, control());
2952   result_val->init_req(_normal_path, res);
2953 
2954   set_result(result_rgn, result_val);
2955 
2956   return true;
2957 }
2958 
2959 #endif // JFR_HAVE_INTRINSICS
2960 
2961 //------------------------inline_native_currentThread------------------
2962 bool LibraryCallKit::inline_native_currentThread() {
2963   Node* junk = NULL;
2964   set_result(generate_current_thread(junk));
2965   return true;
2966 }
2967 
2968 //---------------------------load_mirror_from_klass----------------------------
2969 // Given a klass oop, load its java mirror (a java.lang.Class oop).
2970 Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
2971   Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
2972   Node* load = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
2973   // mirror = ((OopHandle)mirror)->resolve();
2974   return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE);
2975 }
2976 
2977 //-----------------------load_klass_from_mirror_common-------------------------
2978 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
2979 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
2980 // and branch to the given path on the region.
2981 // If never_see_null, take an uncommon trap on null, so we can optimistically
2982 // compile for the non-null case.
2983 // If the region is NULL, force never_see_null = true.
2984 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
2985                                                     bool never_see_null,
2986                                                     RegionNode* region,
2987                                                     int null_path,
2988                                                     int offset) {
2989   if (region == NULL)  never_see_null = true;
2990   Node* p = basic_plus_adr(mirror, offset);
2991   const TypeKlassPtr*  kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
2992   Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
2993   Node* null_ctl = top();
2994   kls = null_check_oop(kls, &null_ctl, never_see_null);
2995   if (region != NULL) {
2996     // Set region->in(null_path) if the mirror is a primitive (e.g, int.class).

2999     assert(null_ctl == top(), "no loose ends");
3000   }
3001   return kls;
3002 }
3003 
3004 //--------------------(inline_native_Class_query helpers)---------------------
3005 // Use this for JVM_ACC_INTERFACE, JVM_ACC_IS_CLONEABLE_FAST, JVM_ACC_HAS_FINALIZER.
3006 // Fall through if (mods & mask) == bits, take the guard otherwise.
3007 Node* LibraryCallKit::generate_access_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
3008   // Branch around if the given klass has the given modifier bit set.
3009   // Like generate_guard, adds a new path onto the region.
3010   Node* modp = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3011   Node* mods = make_load(NULL, modp, TypeInt::INT, T_INT, MemNode::unordered);
3012   Node* mask = intcon(modifier_mask);
3013   Node* bits = intcon(modifier_bits);
3014   Node* mbit = _gvn.transform(new AndINode(mods, mask));
3015   Node* cmp  = _gvn.transform(new CmpINode(mbit, bits));
3016   Node* bol  = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
3017   return generate_fair_guard(bol, region);
3018 }

3019 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
3020   return generate_access_flags_guard(kls, JVM_ACC_INTERFACE, 0, region);
3021 }
3022 Node* LibraryCallKit::generate_hidden_class_guard(Node* kls, RegionNode* region) {
3023   return generate_access_flags_guard(kls, JVM_ACC_IS_HIDDEN_CLASS, 0, region);
3024 }
3025 
3026 //-------------------------inline_native_Class_query-------------------
3027 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
3028   const Type* return_type = TypeInt::BOOL;
3029   Node* prim_return_value = top();  // what happens if it's a primitive class?
3030   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3031   bool expect_prim = false;     // most of these guys expect to work on refs
3032 
3033   enum { _normal_path = 1, _prim_path = 2, PATH_LIMIT };
3034 
3035   Node* mirror = argument(0);
3036   Node* obj    = top();
3037 
3038   switch (id) {

3192 
3193   case vmIntrinsics::_getClassAccessFlags:
3194     p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3195     query_value = make_load(NULL, p, TypeInt::INT, T_INT, MemNode::unordered);
3196     break;
3197 
3198   default:
3199     fatal_unexpected_iid(id);
3200     break;
3201   }
3202 
3203   // Fall-through is the normal case of a query to a real class.
3204   phi->init_req(1, query_value);
3205   region->init_req(1, control());
3206 
3207   C->set_has_split_ifs(true); // Has chance for split-if optimization
3208   set_result(region, phi);
3209   return true;
3210 }
3211 





























3212 //-------------------------inline_Class_cast-------------------
3213 bool LibraryCallKit::inline_Class_cast() {
3214   Node* mirror = argument(0); // Class
3215   Node* obj    = argument(1);
3216   const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
3217   if (mirror_con == NULL) {
3218     return false;  // dead path (mirror->is_top()).
3219   }
3220   if (obj == NULL || obj->is_top()) {
3221     return false;  // dead path
3222   }
3223   const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr();






3224 
3225   // First, see if Class.cast() can be folded statically.
3226   // java_mirror_type() returns non-null for compile-time Class constants.
3227   ciType* tm = mirror_con->java_mirror_type();
3228   if (tm != NULL && tm->is_klass() &&
3229       tp != NULL && tp->klass() != NULL) {
3230     if (!tp->klass()->is_loaded()) {

3231       // Don't use intrinsic when class is not loaded.
3232       return false;
3233     } else {
3234       int static_res = C->static_subtype_check(tm->as_klass(), tp->klass());
3235       if (static_res == Compile::SSC_always_true) {
3236         // isInstance() is true - fold the code.



3237         set_result(obj);
3238         return true;
3239       } else if (static_res == Compile::SSC_always_false) {
3240         // Don't use intrinsic, have to throw ClassCastException.
3241         // If the reference is null, the non-intrinsic bytecode will
3242         // be optimized appropriately.
3243         return false;
3244       }
3245     }
3246   }
3247 
3248   // Bailout intrinsic and do normal inlining if exception path is frequent.
3249   if (too_many_traps(Deoptimization::Reason_intrinsic)) {
3250     return false;
3251   }
3252 
3253   // Generate dynamic checks.
3254   // Class.cast() is java implementation of _checkcast bytecode.
3255   // Do checkcast (Parse::do_checkcast()) optimizations here.
3256 



3257   mirror = null_check(mirror);
3258   // If mirror is dead, only null-path is taken.
3259   if (stopped()) {
3260     return true;
3261   }
3262 
3263   // Not-subtype or the mirror's klass ptr is NULL (in case it is a primitive).
3264   enum { _bad_type_path = 1, _prim_path = 2, PATH_LIMIT };
3265   RegionNode* region = new RegionNode(PATH_LIMIT);
3266   record_for_igvn(region);
3267 
3268   // Now load the mirror's klass metaobject, and null-check it.
3269   // If kls is null, we have a primitive mirror and
3270   // nothing is an instance of a primitive type.
3271   Node* kls = load_klass_from_mirror(mirror, false, region, _prim_path);
3272 
3273   Node* res = top();
3274   if (!stopped()) {





















3275     Node* bad_type_ctrl = top();
3276     // Do checkcast optimizations.
3277     res = gen_checkcast(obj, kls, &bad_type_ctrl);
3278     region->init_req(_bad_type_path, bad_type_ctrl);
3279   }
3280   if (region->in(_prim_path) != top() ||
3281       region->in(_bad_type_path) != top()) {

3282     // Let Interpreter throw ClassCastException.
3283     PreserveJVMState pjvms(this);
3284     set_control(_gvn.transform(region));
3285     uncommon_trap(Deoptimization::Reason_intrinsic,
3286                   Deoptimization::Action_maybe_recompile);
3287   }
3288   if (!stopped()) {
3289     set_result(res);
3290   }
3291   return true;
3292 }
3293 
3294 
3295 //--------------------------inline_native_subtype_check------------------------
3296 // This intrinsic takes the JNI calls out of the heart of
3297 // UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
3298 bool LibraryCallKit::inline_native_subtype_check() {
3299   // Pull both arguments off the stack.
3300   Node* args[2];                // two java.lang.Class mirrors: superc, subc
3301   args[0] = argument(0);
3302   args[1] = argument(1);
3303   Node* klasses[2];             // corresponding Klasses: superk, subk
3304   klasses[0] = klasses[1] = top();
3305 
3306   enum {
3307     // A full decision tree on {superc is prim, subc is prim}:
3308     _prim_0_path = 1,           // {P,N} => false
3309                                 // {P,P} & superc!=subc => false
3310     _prim_same_path,            // {P,P} & superc==subc => true
3311     _prim_1_path,               // {N,P} => false
3312     _ref_subtype_path,          // {N,N} & subtype check wins => true
3313     _both_ref_path,             // {N,N} & subtype check loses => false
3314     PATH_LIMIT
3315   };
3316 
3317   RegionNode* region = new RegionNode(PATH_LIMIT);

3318   Node*       phi    = new PhiNode(region, TypeInt::BOOL);
3319   record_for_igvn(region);

3320 
3321   const TypePtr* adr_type = TypeRawPtr::BOTTOM;   // memory type of loads
3322   const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
3323   int class_klass_offset = java_lang_Class::klass_offset();
3324 
3325   // First null-check both mirrors and load each mirror's klass metaobject.
3326   int which_arg;
3327   for (which_arg = 0; which_arg <= 1; which_arg++) {
3328     Node* arg = args[which_arg];
3329     arg = null_check(arg);
3330     if (stopped())  break;
3331     args[which_arg] = arg;
3332 
3333     Node* p = basic_plus_adr(arg, class_klass_offset);
3334     Node* kls = LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, adr_type, kls_type);
3335     klasses[which_arg] = _gvn.transform(kls);
3336   }
3337 
3338   // Having loaded both klasses, test each for null.
3339   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3340   for (which_arg = 0; which_arg <= 1; which_arg++) {
3341     Node* kls = klasses[which_arg];
3342     Node* null_ctl = top();
3343     kls = null_check_oop(kls, &null_ctl, never_see_null);
3344     int prim_path = (which_arg == 0 ? _prim_0_path : _prim_1_path);
3345     region->init_req(prim_path, null_ctl);



3346     if (stopped())  break;
3347     klasses[which_arg] = kls;
3348   }
3349 
3350   if (!stopped()) {
3351     // now we have two reference types, in klasses[0..1]
3352     Node* subk   = klasses[1];  // the argument to isAssignableFrom
3353     Node* superk = klasses[0];  // the receiver
3354     region->set_req(_both_ref_path, gen_subtype_check(subk, superk));



3355     // now we have a successful reference subtype check
3356     region->set_req(_ref_subtype_path, control());
3357   }
3358 
3359   // If both operands are primitive (both klasses null), then
3360   // we must return true when they are identical primitives.
3361   // It is convenient to test this after the first null klass check.
3362   set_control(region->in(_prim_0_path)); // go back to first null check

3363   if (!stopped()) {
3364     // Since superc is primitive, make a guard for the superc==subc case.
3365     Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1]));
3366     Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq));
3367     generate_guard(bol_eq, region, PROB_FAIR);
3368     if (region->req() == PATH_LIMIT+1) {
3369       // A guard was added.  If the added guard is taken, superc==subc.
3370       region->swap_edges(PATH_LIMIT, _prim_same_path);
3371       region->del_req(PATH_LIMIT);
3372     }
3373     region->set_req(_prim_0_path, control()); // Not equal after all.
3374   }
3375 
3376   // these are the only paths that produce 'true':
3377   phi->set_req(_prim_same_path,   intcon(1));
3378   phi->set_req(_ref_subtype_path, intcon(1));
3379 
3380   // pull together the cases:
3381   assert(region->req() == PATH_LIMIT, "sane region");
3382   for (uint i = 1; i < region->req(); i++) {
3383     Node* ctl = region->in(i);
3384     if (ctl == NULL || ctl == top()) {
3385       region->set_req(i, top());
3386       phi   ->set_req(i, top());
3387     } else if (phi->in(i) == NULL) {
3388       phi->set_req(i, intcon(0)); // all other paths produce 'false'
3389     }
3390   }
3391 
3392   set_control(_gvn.transform(region));
3393   set_result(_gvn.transform(phi));
3394   return true;
3395 }
3396 
3397 //---------------------generate_array_guard_common------------------------
3398 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region,
3399                                                   bool obj_array, bool not_array) {
3400 
3401   if (stopped()) {
3402     return NULL;
3403   }
3404 
3405   // If obj_array/non_array==false/false:
3406   // Branch around if the given klass is in fact an array (either obj or prim).
3407   // If obj_array/non_array==false/true:
3408   // Branch around if the given klass is not an array klass of any kind.
3409   // If obj_array/non_array==true/true:
3410   // Branch around if the kls is not an oop array (kls is int[], String, etc.)
3411   // If obj_array/non_array==true/false:
3412   // Branch around if the kls is an oop array (Object[] or subtype)
3413   //
3414   // Like generate_guard, adds a new path onto the region.
3415   jint  layout_con = 0;
3416   Node* layout_val = get_layout_helper(kls, layout_con);
3417   if (layout_val == NULL) {
3418     bool query = (obj_array
3419                   ? Klass::layout_helper_is_objArray(layout_con)
3420                   : Klass::layout_helper_is_array(layout_con));
3421     if (query == not_array) {







3422       return NULL;                       // never a branch
3423     } else {                             // always a branch
3424       Node* always_branch = control();
3425       if (region != NULL)
3426         region->add_req(always_branch);
3427       set_control(top());
3428       return always_branch;
3429     }
3430   }





















3431   // Now test the correct condition.
3432   jint  nval = (obj_array
3433                 ? (jint)(Klass::_lh_array_tag_type_value
3434                    <<    Klass::_lh_array_tag_shift)
3435                 : Klass::_lh_neutral_value);
3436   Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval)));
3437   BoolTest::mask btest = BoolTest::lt;  // correct for testing is_[obj]array
3438   // invert the test if we are looking for a non-array
3439   if (not_array)  btest = BoolTest(btest).negate();
3440   Node* bol = _gvn.transform(new BoolNode(cmp, btest));
3441   return generate_fair_guard(bol, region);
3442 }
3443 
3444 
3445 //-----------------------inline_native_newArray--------------------------
3446 // private static native Object java.lang.reflect.newArray(Class<?> componentType, int length);
3447 // private        native Object Unsafe.allocateUninitializedArray0(Class<?> cls, int size);
3448 bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) {
3449   Node* mirror;
3450   Node* count_val;
3451   if (uninitialized) {
3452     mirror    = argument(1);
3453     count_val = argument(2);
3454   } else {
3455     mirror    = argument(0);
3456     count_val = argument(1);
3457   }
3458 
3459   mirror = null_check(mirror);
3460   // If mirror or obj is dead, only null-path is taken.
3461   if (stopped())  return true;
3462 
3463   enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
3464   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
3465   PhiNode*    result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
3466   PhiNode*    result_io  = new PhiNode(result_reg, Type::ABIO);

3571   // the bytecode that invokes Arrays.copyOf if deoptimization happens.
3572   { PreserveReexecuteState preexecs(this);
3573     jvms()->set_should_reexecute(true);
3574 
3575     array_type_mirror = null_check(array_type_mirror);
3576     original          = null_check(original);
3577 
3578     // Check if a null path was taken unconditionally.
3579     if (stopped())  return true;
3580 
3581     Node* orig_length = load_array_length(original);
3582 
3583     Node* klass_node = load_klass_from_mirror(array_type_mirror, false, NULL, 0);
3584     klass_node = null_check(klass_node);
3585 
3586     RegionNode* bailout = new RegionNode(1);
3587     record_for_igvn(bailout);
3588 
3589     // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
3590     // Bail out if that is so.
3591     Node* not_objArray = generate_non_objArray_guard(klass_node, bailout);












3592     if (not_objArray != NULL) {
3593       // Improve the klass node's type from the new optimistic assumption:
3594       ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
3595       const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/);
3596       Node* cast = new CastPPNode(klass_node, akls);
3597       cast->init_req(0, control());
3598       klass_node = _gvn.transform(cast);
3599     }
3600 






















3601     // Bail out if either start or end is negative.
3602     generate_negative_guard(start, bailout, &start);
3603     generate_negative_guard(end,   bailout, &end);
3604 
3605     Node* length = end;
3606     if (_gvn.type(start) != TypeInt::ZERO) {
3607       length = _gvn.transform(new SubINode(end, start));
3608     }
3609 
3610     // Bail out if length is negative.
3611     // Without this the new_array would throw
3612     // NegativeArraySizeException but IllegalArgumentException is what
3613     // should be thrown
3614     generate_negative_guard(length, bailout, &length);
3615 
































3616     if (bailout->req() > 1) {
3617       PreserveJVMState pjvms(this);
3618       set_control(_gvn.transform(bailout));
3619       uncommon_trap(Deoptimization::Reason_intrinsic,
3620                     Deoptimization::Action_maybe_recompile);
3621     }
3622 
3623     if (!stopped()) {
3624       // How many elements will we copy from the original?
3625       // The answer is MinI(orig_length - start, length).
3626       Node* orig_tail = _gvn.transform(new SubINode(orig_length, start));
3627       Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
3628 
3629       // Generate a direct call to the right arraycopy function(s).
3630       // We know the copy is disjoint but we might not know if the
3631       // oop stores need checking.
3632       // Extreme case:  Arrays.copyOf((Integer[])x, 10, String[].class).
3633       // This will fail a store-check if x contains any non-nulls.
3634 
3635       // ArrayCopyNode:Ideal may transform the ArrayCopyNode to
3636       // loads/stores but it is legal only if we're sure the
3637       // Arrays.copyOf would succeed. So we need all input arguments
3638       // to the copyOf to be validated, including that the copy to the
3639       // new array won't trigger an ArrayStoreException. That subtype
3640       // check can be optimized if we know something on the type of
3641       // the input array from type speculation.
3642       if (_gvn.type(klass_node)->singleton()) {
3643         ciKlass* subk   = _gvn.type(load_object_klass(original))->is_klassptr()->klass();
3644         ciKlass* superk = _gvn.type(klass_node)->is_klassptr()->klass();
3645 
3646         int test = C->static_subtype_check(superk, subk);
3647         if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
3648           const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();
3649           if (t_original->speculative_type() != NULL) {
3650             original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true);
3651           }
3652         }
3653       }
3654 
3655       bool validated = false;
3656       // Reason_class_check rather than Reason_intrinsic because we
3657       // want to intrinsify even if this traps.
3658       if (!too_many_traps(Deoptimization::Reason_class_check)) {
3659         Node* not_subtype_ctrl = gen_subtype_check(original, klass_node);
3660 
3661         if (not_subtype_ctrl != top()) {
3662           PreserveJVMState pjvms(this);
3663           set_control(not_subtype_ctrl);
3664           uncommon_trap(Deoptimization::Reason_class_check,
3665                         Deoptimization::Action_make_not_entrant);
3666           assert(stopped(), "Should be stopped");
3667         }
3668         validated = true;
3669       }
3670 
3671       if (!stopped()) {
3672         newcopy = new_array(klass_node, length, 0);  // no arguments to push
3673 
3674         ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true, false,
3675                                                 load_object_klass(original), klass_node);
3676         if (!is_copyOfRange) {
3677           ac->set_copyof(validated);
3678         } else {
3679           ac->set_copyofrange(validated);
3680         }
3681         Node* n = _gvn.transform(ac);
3682         if (n == ac) {
3683           ac->connect_outputs(this);
3684         } else {
3685           assert(validated, "shouldn't transform if all arguments not validated");
3686           set_all_memory(n);
3687         }
3688       }
3689     }
3690   } // original reexecute is set back here
3691 
3692   C->set_has_split_ifs(true); // Has chance for split-if optimization
3693   if (!stopped()) {
3694     set_result(newcopy);
3695   }

3777   set_edges_for_java_call(slow_call);
3778   return slow_call;
3779 }
3780 
3781 
3782 /**
3783  * Build special case code for calls to hashCode on an object. This call may
3784  * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
3785  * slightly different code.
3786  */
3787 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
3788   assert(is_static == callee()->is_static(), "correct intrinsic selection");
3789   assert(!(is_virtual && is_static), "either virtual, special, or static");
3790 
3791   enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
3792 
3793   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
3794   PhiNode*    result_val = new PhiNode(result_reg, TypeInt::INT);
3795   PhiNode*    result_io  = new PhiNode(result_reg, Type::ABIO);
3796   PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
3797   Node* obj = NULL;





3798   if (!is_static) {
3799     // Check for hashing null object
3800     obj = null_check_receiver();
3801     if (stopped())  return true;        // unconditionally null
3802     result_reg->init_req(_null_path, top());
3803     result_val->init_req(_null_path, top());
3804   } else {
3805     // Do a null check, and return zero if null.
3806     // System.identityHashCode(null) == 0
3807     obj = argument(0);
3808     Node* null_ctl = top();
3809     obj = null_check_oop(obj, &null_ctl);
3810     result_reg->init_req(_null_path, null_ctl);
3811     result_val->init_req(_null_path, _gvn.intcon(0));
3812   }
3813 
3814   // Unconditionally null?  Then return right away.
3815   if (stopped()) {
3816     set_control( result_reg->in(_null_path));
3817     if (!stopped())
3818       set_result(result_val->in(_null_path));
3819     return true;
3820   }
3821 
3822   // We only go to the fast case code if we pass a number of guards.  The
3823   // paths which do not pass are accumulated in the slow_region.
3824   RegionNode* slow_region = new RegionNode(1);
3825   record_for_igvn(slow_region);
3826 
3827   // If this is a virtual call, we generate a funny guard.  We pull out
3828   // the vtable entry corresponding to hashCode() from the target object.
3829   // If the target method which we are calling happens to be the native
3830   // Object hashCode() method, we pass the guard.  We do not need this
3831   // guard for non-virtual calls -- the caller is known to be the native
3832   // Object hashCode().
3833   if (is_virtual) {
3834     // After null check, get the object's klass.
3835     Node* obj_klass = load_object_klass(obj);
3836     generate_virtual_guard(obj_klass, slow_region);
3837   }
3838 
3839   // Get the header out of the object, use LoadMarkNode when available
3840   Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
3841   // The control of the load must be NULL. Otherwise, the load can move before
3842   // the null check after castPP removal.
3843   Node* no_ctrl = NULL;
3844   Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
3845 
3846   // Test the header to see if it is unlocked.
3847   Node *lock_mask      = _gvn.MakeConX(markWord::lock_mask_in_place);

3848   Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
3849   Node *unlocked_val   = _gvn.MakeConX(markWord::unlocked_value);
3850   Node *chk_unlocked   = _gvn.transform(new CmpXNode( lmasked_header, unlocked_val));
3851   Node *test_unlocked  = _gvn.transform(new BoolNode( chk_unlocked, BoolTest::ne));
3852 
3853   generate_slow_guard(test_unlocked, slow_region);
3854 
3855   // Get the hash value and check to see that it has been properly assigned.
3856   // We depend on hash_mask being at most 32 bits and avoid the use of
3857   // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
3858   // vm: see markWord.hpp.
3859   Node *hash_mask      = _gvn.intcon(markWord::hash_mask);
3860   Node *hash_shift     = _gvn.intcon(markWord::hash_shift);
3861   Node *hshifted_header= _gvn.transform(new URShiftXNode(header, hash_shift));
3862   // This hack lets the hash bits live anywhere in the mark object now, as long
3863   // as the shift drops the relevant bits into the low 32 bits.  Note that
3864   // Java spec says that HashCode is an int so there's no point in capturing
3865   // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build).
3866   hshifted_header      = ConvX2I(hshifted_header);
3867   Node *hash_val       = _gvn.transform(new AndINode(hshifted_header, hash_mask));

3893     // this->control() comes from set_results_for_java_call
3894     result_reg->init_req(_slow_path, control());
3895     result_val->init_req(_slow_path, slow_result);
3896     result_io  ->set_req(_slow_path, i_o());
3897     result_mem ->set_req(_slow_path, reset_memory());
3898   }
3899 
3900   // Return the combined state.
3901   set_i_o(        _gvn.transform(result_io)  );
3902   set_all_memory( _gvn.transform(result_mem));
3903 
3904   set_result(result_reg, result_val);
3905   return true;
3906 }
3907 
3908 //---------------------------inline_native_getClass----------------------------
3909 // public final native Class<?> java.lang.Object.getClass();
3910 //
3911 // Build special case code for calls to getClass on an object.
3912 bool LibraryCallKit::inline_native_getClass() {
3913   Node* obj = null_check_receiver();









3914   if (stopped())  return true;
3915   set_result(load_mirror_from_klass(load_object_klass(obj)));
3916   return true;
3917 }
3918 
3919 //-----------------inline_native_Reflection_getCallerClass---------------------
3920 // public static native Class<?> sun.reflect.Reflection.getCallerClass();
3921 //
3922 // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
3923 //
3924 // NOTE: This code must perform the same logic as JVM_GetCallerClass
3925 // in that it must skip particular security frames and checks for
3926 // caller sensitive methods.
3927 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
3928 #ifndef PRODUCT
3929   if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
3930     tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
3931   }
3932 #endif
3933 

4231 //  not cloneable or finalizer => slow path to out-of-line Object.clone
4232 //
4233 // The general case has two steps, allocation and copying.
4234 // Allocation has two cases, and uses GraphKit::new_instance or new_array.
4235 //
4236 // Copying also has two cases, oop arrays and everything else.
4237 // Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
4238 // Everything else uses the tight inline loop supplied by CopyArrayNode.
4239 //
4240 // These steps fold up nicely if and when the cloned object's klass
4241 // can be sharply typed as an object array, a type array, or an instance.
4242 //
4243 bool LibraryCallKit::inline_native_clone(bool is_virtual) {
4244   PhiNode* result_val;
4245 
4246   // Set the reexecute bit for the interpreter to reexecute
4247   // the bytecode that invokes Object.clone if deoptimization happens.
4248   { PreserveReexecuteState preexecs(this);
4249     jvms()->set_should_reexecute(true);
4250 
4251     Node* obj = null_check_receiver();





4252     if (stopped())  return true;
4253 
4254     const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
4255 
4256     // If we are going to clone an instance, we need its exact type to
4257     // know the number and types of fields to convert the clone to
4258     // loads/stores. Maybe a speculative type can help us.
4259     if (!obj_type->klass_is_exact() &&
4260         obj_type->speculative_type() != NULL &&
4261         obj_type->speculative_type()->is_instance_klass()) {

4262       ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass();
4263       if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem &&
4264           !spec_ik->has_injected_fields()) {
4265         ciKlass* k = obj_type->klass();
4266         if (!k->is_instance_klass() ||
4267             k->as_instance_klass()->is_interface() ||
4268             k->as_instance_klass()->has_subklass()) {
4269           obj = maybe_cast_profiled_obj(obj, obj_type->speculative_type(), false);
4270         }
4271       }
4272     }
4273 
4274     // Conservatively insert a memory barrier on all memory slices.
4275     // Do not let writes into the original float below the clone.
4276     insert_mem_bar(Op_MemBarCPUOrder);
4277 
4278     // paths into result_reg:
4279     enum {
4280       _slow_path = 1,     // out-of-line call to clone method (virtual or not)
4281       _objArray_path,     // plain array allocation, plus arrayof_oop_arraycopy
4282       _array_path,        // plain array allocation, plus arrayof_long_arraycopy
4283       _instance_path,     // plain instance allocation, plus arrayof_long_arraycopy
4284       PATH_LIMIT
4285     };
4286     RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4287     result_val             = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
4288     PhiNode*    result_i_o = new PhiNode(result_reg, Type::ABIO);
4289     PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4290     record_for_igvn(result_reg);
4291 
4292     Node* obj_klass = load_object_klass(obj);





4293     Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL);
4294     if (array_ctl != NULL) {
4295       // It's an array.
4296       PreserveJVMState pjvms(this);
4297       set_control(array_ctl);
4298       Node* obj_length = load_array_length(obj);
4299       Node* obj_size  = NULL;
4300       Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size, /*deoptimize_on_exception=*/true);
4301 
4302       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
4303       if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Parsing)) {
4304         // If it is an oop array, it requires very special treatment,
4305         // because gc barriers are required when accessing the array.
4306         Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL);
4307         if (is_obja != NULL) {
4308           PreserveJVMState pjvms2(this);
4309           set_control(is_obja);
4310           // Generate a direct call to the right arraycopy function(s).
4311           // Clones are always tightly coupled.
4312           ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, true, false);
4313           ac->set_clone_oop_array();
4314           Node* n = _gvn.transform(ac);
4315           assert(n == ac, "cannot disappear");
4316           ac->connect_outputs(this, /*deoptimize_on_exception=*/true);
4317 
4318           result_reg->init_req(_objArray_path, control());
4319           result_val->init_req(_objArray_path, alloc_obj);
4320           result_i_o ->set_req(_objArray_path, i_o());
4321           result_mem ->set_req(_objArray_path, reset_memory());
4322         }
4323       }
4324       // Otherwise, there are no barriers to worry about.
4325       // (We can dispense with card marks if we know the allocation
4326       //  comes out of eden (TLAB)...  In fact, ReduceInitialCardMarks
4327       //  causes the non-eden paths to take compensating steps to
4328       //  simulate a fresh allocation, so that no further
4329       //  card marks are required in compiled code to initialize
4330       //  the object.)
4331 
4332       if (!stopped()) {
4333         copy_to_clone(obj, alloc_obj, obj_size, true);
4334 
4335         // Present the results of the copy.
4336         result_reg->init_req(_array_path, control());
4337         result_val->init_req(_array_path, alloc_obj);
4338         result_i_o ->set_req(_array_path, i_o());
4339         result_mem ->set_req(_array_path, reset_memory());




































4340       }
4341     }
4342 
4343     // We only go to the instance fast case code if we pass a number of guards.
4344     // The paths which do not pass are accumulated in the slow_region.
4345     RegionNode* slow_region = new RegionNode(1);
4346     record_for_igvn(slow_region);
4347     if (!stopped()) {
4348       // It's an instance (we did array above).  Make the slow-path tests.
4349       // If this is a virtual call, we generate a funny guard.  We grab
4350       // the vtable entry corresponding to clone() from the target object.
4351       // If the target method which we are calling happens to be the
4352       // Object clone() method, we pass the guard.  We do not need this
4353       // guard for non-virtual calls; the caller is known to be the native
4354       // Object clone().
4355       if (is_virtual) {
4356         generate_virtual_guard(obj_klass, slow_region);
4357       }
4358 
4359       // The object must be easily cloneable and must not have a finalizer.
4360       // Both of these conditions may be checked in a single test.
4361       // We could optimize the test further, but we don't care.
4362       generate_access_flags_guard(obj_klass,
4363                                   // Test both conditions:
4364                                   JVM_ACC_IS_CLONEABLE_FAST | JVM_ACC_HAS_FINALIZER,
4365                                   // Must be cloneable but not finalizer:
4366                                   JVM_ACC_IS_CLONEABLE_FAST,

4487 // array in the heap that GCs wouldn't expect. Move the allocation
4488 // after the traps so we don't allocate the array if we
4489 // deoptimize. This is possible because tightly_coupled_allocation()
4490 // guarantees there's no observer of the allocated array at this point
4491 // and the control flow is simple enough.
4492 void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms,
4493                                                     int saved_reexecute_sp, uint new_idx) {
4494   if (saved_jvms != NULL && !stopped()) {
4495     assert(alloc != NULL, "only with a tightly coupled allocation");
4496     // restore JVM state to the state at the arraycopy
4497     saved_jvms->map()->set_control(map()->control());
4498     assert(saved_jvms->map()->memory() == map()->memory(), "memory state changed?");
4499     assert(saved_jvms->map()->i_o() == map()->i_o(), "IO state changed?");
4500     // If we've improved the types of some nodes (null check) while
4501     // emitting the guards, propagate them to the current state
4502     map()->replaced_nodes().apply(saved_jvms->map(), new_idx);
4503     set_jvms(saved_jvms);
4504     _reexecute_sp = saved_reexecute_sp;
4505 
4506     // Remove the allocation from above the guards
4507     CallProjections callprojs;
4508     alloc->extract_projections(&callprojs, true);
4509     InitializeNode* init = alloc->initialization();
4510     Node* alloc_mem = alloc->in(TypeFunc::Memory);
4511     C->gvn_replace_by(callprojs.fallthrough_ioproj, alloc->in(TypeFunc::I_O));
4512     C->gvn_replace_by(init->proj_out(TypeFunc::Memory), alloc_mem);
4513 
4514     // The CastIINode created in GraphKit::new_array (in AllocateArrayNode::make_ideal_length) must stay below
4515     // the allocation (i.e. is only valid if the allocation succeeds):
4516     // 1) replace CastIINode with AllocateArrayNode's length here
4517     // 2) Create CastIINode again once allocation has moved (see below) at the end of this method
4518     //
4519     // Multiple identical CastIINodes might exist here. Each GraphKit::load_array_length() call will generate
4520     // new separate CastIINode (arraycopy guard checks or any array length use between array allocation and ararycopy)
4521     Node* init_control = init->proj_out(TypeFunc::Control);
4522     Node* alloc_length = alloc->Ideal_length();
4523 #ifdef ASSERT
4524     Node* prev_cast = NULL;
4525 #endif
4526     for (uint i = 0; i < init_control->outcnt(); i++) {
4527       Node* init_out = init_control->raw_out(i);
4528       if (init_out->is_CastII() && init_out->in(TypeFunc::Control) == init_control && init_out->in(1) == alloc_length) {
4529 #ifdef ASSERT
4530         if (prev_cast == NULL) {
4531           prev_cast = init_out;

4533           if (prev_cast->cmp(*init_out) == false) {
4534             prev_cast->dump();
4535             init_out->dump();
4536             assert(false, "not equal CastIINode");
4537           }
4538         }
4539 #endif
4540         C->gvn_replace_by(init_out, alloc_length);
4541       }
4542     }
4543     C->gvn_replace_by(init->proj_out(TypeFunc::Control), alloc->in(0));
4544 
4545     // move the allocation here (after the guards)
4546     _gvn.hash_delete(alloc);
4547     alloc->set_req(TypeFunc::Control, control());
4548     alloc->set_req(TypeFunc::I_O, i_o());
4549     Node *mem = reset_memory();
4550     set_all_memory(mem);
4551     alloc->set_req(TypeFunc::Memory, mem);
4552     set_control(init->proj_out_or_null(TypeFunc::Control));
4553     set_i_o(callprojs.fallthrough_ioproj);
4554 
4555     // Update memory as done in GraphKit::set_output_for_allocation()
4556     const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
4557     const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
4558     if (ary_type->isa_aryptr() && length_type != NULL) {
4559       ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
4560     }
4561     const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot);
4562     int            elemidx  = C->get_alias_index(telemref);
4563     set_memory(init->proj_out_or_null(TypeFunc::Memory), Compile::AliasIdxRaw);
4564     set_memory(init->proj_out_or_null(TypeFunc::Memory), elemidx);
4565 
4566     Node* allocx = _gvn.transform(alloc);
4567     assert(allocx == alloc, "where has the allocation gone?");
4568     assert(dest->is_CheckCastPP(), "not an allocation result?");
4569 
4570     _gvn.hash_delete(dest);
4571     dest->set_req(0, control());
4572     Node* destx = _gvn.transform(dest);
4573     assert(destx == dest, "where has the allocation result gone?");

4709       // Do we have the exact type of dest?
4710       bool could_have_dest = dest_spec;
4711       ciKlass* src_k = top_src->klass();
4712       ciKlass* dest_k = top_dest->klass();
4713       if (!src_spec) {
4714         src_k = src_type->speculative_type_not_null();
4715         if (src_k != NULL && src_k->is_array_klass()) {
4716           could_have_src = true;
4717         }
4718       }
4719       if (!dest_spec) {
4720         dest_k = dest_type->speculative_type_not_null();
4721         if (dest_k != NULL && dest_k->is_array_klass()) {
4722           could_have_dest = true;
4723         }
4724       }
4725       if (could_have_src && could_have_dest) {
4726         // If we can have both exact types, emit the missing guards
4727         if (could_have_src && !src_spec) {
4728           src = maybe_cast_profiled_obj(src, src_k, true);


4729         }
4730         if (could_have_dest && !dest_spec) {
4731           dest = maybe_cast_profiled_obj(dest, dest_k, true);


4732         }
4733       }
4734     }
4735   }
4736 
4737   ciMethod* trap_method = method();
4738   int trap_bci = bci();
4739   if (saved_jvms != NULL) {
4740     trap_method = alloc->jvms()->method();
4741     trap_bci = alloc->jvms()->bci();
4742   }
4743 
4744   bool negative_length_guard_generated = false;
4745 
4746   if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
4747       can_emit_guards &&
4748       !src->is_top() && !dest->is_top()) {
4749     // validate arguments: enables transformation the ArrayCopyNode
4750     validated = true;
4751 
4752     RegionNode* slow_region = new RegionNode(1);
4753     record_for_igvn(slow_region);
4754 
4755     // (1) src and dest are arrays.
4756     generate_non_array_guard(load_object_klass(src), slow_region);
4757     generate_non_array_guard(load_object_klass(dest), slow_region);
4758 
4759     // (2) src and dest arrays must have elements of the same BasicType
4760     // done at macro expansion or at Ideal transformation time
4761 
4762     // (4) src_offset must not be negative.
4763     generate_negative_guard(src_offset, slow_region);
4764 
4765     // (5) dest_offset must not be negative.
4766     generate_negative_guard(dest_offset, slow_region);
4767 
4768     // (7) src_offset + length must not exceed length of src.

4771                          slow_region);
4772 
4773     // (8) dest_offset + length must not exceed length of dest.
4774     generate_limit_guard(dest_offset, length,
4775                          load_array_length(dest),
4776                          slow_region);
4777 
4778     // (6) length must not be negative.
4779     // This is also checked in generate_arraycopy() during macro expansion, but
4780     // we also have to check it here for the case where the ArrayCopyNode will
4781     // be eliminated by Escape Analysis.
4782     if (EliminateAllocations) {
4783       generate_negative_guard(length, slow_region);
4784       negative_length_guard_generated = true;
4785     }
4786 
4787     // (9) each element of an oop array must be assignable
4788     Node* dest_klass = load_object_klass(dest);
4789     if (src != dest) {
4790       Node* not_subtype_ctrl = gen_subtype_check(src, dest_klass);


4791 
4792       if (not_subtype_ctrl != top()) {
4793         PreserveJVMState pjvms(this);
4794         set_control(not_subtype_ctrl);
4795         uncommon_trap(Deoptimization::Reason_intrinsic,
4796                       Deoptimization::Action_make_not_entrant);
4797         assert(stopped(), "Should be stopped");






















4798       }
4799     }

4800     {
4801       PreserveJVMState pjvms(this);
4802       set_control(_gvn.transform(slow_region));
4803       uncommon_trap(Deoptimization::Reason_intrinsic,
4804                     Deoptimization::Action_make_not_entrant);
4805       assert(stopped(), "Should be stopped");
4806     }
4807 
4808     const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr();
4809     const Type *toop = TypeOopPtr::make_from_klass(dest_klass_t->klass());
4810     src = _gvn.transform(new CheckCastPPNode(control(), src, toop));
4811   }
4812 
4813   arraycopy_move_allocation_here(alloc, dest, saved_jvms, saved_reexecute_sp, new_idx);
4814 
4815   if (stopped()) {
4816     return true;
4817   }
4818 
4819   ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != NULL, negative_length_guard_generated,
4820                                           // Create LoadRange and LoadKlass nodes for use during macro expansion here
4821                                           // so the compiler has a chance to eliminate them: during macro expansion,
4822                                           // we have to set their control (CastPP nodes are eliminated).
4823                                           load_object_klass(src), load_object_klass(dest),
4824                                           load_array_length(src), load_array_length(dest));
4825 
4826   ac->set_arraycopy(validated);
4827 
4828   Node* n = _gvn.transform(ac);
4829   if (n == ac) {
4830     ac->connect_outputs(this);

   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "ci/ciFlatArrayKlass.hpp"
  28 #include "ci/ciUtilities.inline.hpp"
  29 #include "classfile/vmIntrinsics.hpp"
  30 #include "compiler/compileBroker.hpp"
  31 #include "compiler/compileLog.hpp"
  32 #include "gc/shared/barrierSet.hpp"
  33 #include "jfr/support/jfrIntrinsics.hpp"
  34 #include "memory/resourceArea.hpp"
  35 #include "oops/klass.inline.hpp"
  36 #include "oops/objArrayKlass.hpp"
  37 #include "opto/addnode.hpp"
  38 #include "opto/arraycopynode.hpp"
  39 #include "opto/c2compiler.hpp"
  40 #include "opto/castnode.hpp"
  41 #include "opto/cfgnode.hpp"
  42 #include "opto/convertnode.hpp"
  43 #include "opto/countbitsnode.hpp"
  44 #include "opto/idealKit.hpp"
  45 #include "opto/library_call.hpp"
  46 #include "opto/mathexactnode.hpp"
  47 #include "opto/mulnode.hpp"

 303   case vmIntrinsics::_indexOfUL:                return inline_string_indexOf(StrIntrinsicNode::UL);
 304   case vmIntrinsics::_indexOfIL:                return inline_string_indexOfI(StrIntrinsicNode::LL);
 305   case vmIntrinsics::_indexOfIU:                return inline_string_indexOfI(StrIntrinsicNode::UU);
 306   case vmIntrinsics::_indexOfIUL:               return inline_string_indexOfI(StrIntrinsicNode::UL);
 307   case vmIntrinsics::_indexOfU_char:            return inline_string_indexOfChar(StrIntrinsicNode::U);
 308   case vmIntrinsics::_indexOfL_char:            return inline_string_indexOfChar(StrIntrinsicNode::L);
 309 
 310   case vmIntrinsics::_equalsL:                  return inline_string_equals(StrIntrinsicNode::LL);
 311   case vmIntrinsics::_equalsU:                  return inline_string_equals(StrIntrinsicNode::UU);
 312 
 313   case vmIntrinsics::_toBytesStringU:           return inline_string_toBytesU();
 314   case vmIntrinsics::_getCharsStringU:          return inline_string_getCharsU();
 315   case vmIntrinsics::_getCharStringU:           return inline_string_char_access(!is_store);
 316   case vmIntrinsics::_putCharStringU:           return inline_string_char_access( is_store);
 317 
 318   case vmIntrinsics::_compressStringC:
 319   case vmIntrinsics::_compressStringB:          return inline_string_copy( is_compress);
 320   case vmIntrinsics::_inflateStringC:
 321   case vmIntrinsics::_inflateStringB:           return inline_string_copy(!is_compress);
 322 
 323   case vmIntrinsics::_makePrivateBuffer:        return inline_unsafe_make_private_buffer();
 324   case vmIntrinsics::_finishPrivateBuffer:      return inline_unsafe_finish_private_buffer();
 325   case vmIntrinsics::_getReference:             return inline_unsafe_access(!is_store, T_OBJECT,   Relaxed, false);
 326   case vmIntrinsics::_getBoolean:               return inline_unsafe_access(!is_store, T_BOOLEAN,  Relaxed, false);
 327   case vmIntrinsics::_getByte:                  return inline_unsafe_access(!is_store, T_BYTE,     Relaxed, false);
 328   case vmIntrinsics::_getShort:                 return inline_unsafe_access(!is_store, T_SHORT,    Relaxed, false);
 329   case vmIntrinsics::_getChar:                  return inline_unsafe_access(!is_store, T_CHAR,     Relaxed, false);
 330   case vmIntrinsics::_getInt:                   return inline_unsafe_access(!is_store, T_INT,      Relaxed, false);
 331   case vmIntrinsics::_getLong:                  return inline_unsafe_access(!is_store, T_LONG,     Relaxed, false);
 332   case vmIntrinsics::_getFloat:                 return inline_unsafe_access(!is_store, T_FLOAT,    Relaxed, false);
 333   case vmIntrinsics::_getDouble:                return inline_unsafe_access(!is_store, T_DOUBLE,   Relaxed, false);
 334   case vmIntrinsics::_getValue:                 return inline_unsafe_access(!is_store, T_PRIMITIVE_OBJECT,Relaxed, false);
 335 
 336   case vmIntrinsics::_putReference:             return inline_unsafe_access( is_store, T_OBJECT,   Relaxed, false);
 337   case vmIntrinsics::_putBoolean:               return inline_unsafe_access( is_store, T_BOOLEAN,  Relaxed, false);
 338   case vmIntrinsics::_putByte:                  return inline_unsafe_access( is_store, T_BYTE,     Relaxed, false);
 339   case vmIntrinsics::_putShort:                 return inline_unsafe_access( is_store, T_SHORT,    Relaxed, false);
 340   case vmIntrinsics::_putChar:                  return inline_unsafe_access( is_store, T_CHAR,     Relaxed, false);
 341   case vmIntrinsics::_putInt:                   return inline_unsafe_access( is_store, T_INT,      Relaxed, false);
 342   case vmIntrinsics::_putLong:                  return inline_unsafe_access( is_store, T_LONG,     Relaxed, false);
 343   case vmIntrinsics::_putFloat:                 return inline_unsafe_access( is_store, T_FLOAT,    Relaxed, false);
 344   case vmIntrinsics::_putDouble:                return inline_unsafe_access( is_store, T_DOUBLE,   Relaxed, false);
 345   case vmIntrinsics::_putValue:                 return inline_unsafe_access( is_store, T_PRIMITIVE_OBJECT,Relaxed, false);
 346 
 347   case vmIntrinsics::_getReferenceVolatile:     return inline_unsafe_access(!is_store, T_OBJECT,   Volatile, false);
 348   case vmIntrinsics::_getBooleanVolatile:       return inline_unsafe_access(!is_store, T_BOOLEAN,  Volatile, false);
 349   case vmIntrinsics::_getByteVolatile:          return inline_unsafe_access(!is_store, T_BYTE,     Volatile, false);
 350   case vmIntrinsics::_getShortVolatile:         return inline_unsafe_access(!is_store, T_SHORT,    Volatile, false);
 351   case vmIntrinsics::_getCharVolatile:          return inline_unsafe_access(!is_store, T_CHAR,     Volatile, false);
 352   case vmIntrinsics::_getIntVolatile:           return inline_unsafe_access(!is_store, T_INT,      Volatile, false);
 353   case vmIntrinsics::_getLongVolatile:          return inline_unsafe_access(!is_store, T_LONG,     Volatile, false);
 354   case vmIntrinsics::_getFloatVolatile:         return inline_unsafe_access(!is_store, T_FLOAT,    Volatile, false);
 355   case vmIntrinsics::_getDoubleVolatile:        return inline_unsafe_access(!is_store, T_DOUBLE,   Volatile, false);
 356 
 357   case vmIntrinsics::_putReferenceVolatile:     return inline_unsafe_access( is_store, T_OBJECT,   Volatile, false);
 358   case vmIntrinsics::_putBooleanVolatile:       return inline_unsafe_access( is_store, T_BOOLEAN,  Volatile, false);
 359   case vmIntrinsics::_putByteVolatile:          return inline_unsafe_access( is_store, T_BYTE,     Volatile, false);
 360   case vmIntrinsics::_putShortVolatile:         return inline_unsafe_access( is_store, T_SHORT,    Volatile, false);
 361   case vmIntrinsics::_putCharVolatile:          return inline_unsafe_access( is_store, T_CHAR,     Volatile, false);
 362   case vmIntrinsics::_putIntVolatile:           return inline_unsafe_access( is_store, T_INT,      Volatile, false);
 363   case vmIntrinsics::_putLongVolatile:          return inline_unsafe_access( is_store, T_LONG,     Volatile, false);
 364   case vmIntrinsics::_putFloatVolatile:         return inline_unsafe_access( is_store, T_FLOAT,    Volatile, false);
 365   case vmIntrinsics::_putDoubleVolatile:        return inline_unsafe_access( is_store, T_DOUBLE,   Volatile, false);

 495   case vmIntrinsics::_equalsB:                  return inline_array_equals(StrIntrinsicNode::LL);
 496   case vmIntrinsics::_equalsC:                  return inline_array_equals(StrIntrinsicNode::UU);
 497   case vmIntrinsics::_Preconditions_checkIndex: return inline_preconditions_checkIndex(T_INT);
 498   case vmIntrinsics::_Preconditions_checkLongIndex: return inline_preconditions_checkIndex(T_LONG);
 499   case vmIntrinsics::_clone:                    return inline_native_clone(intrinsic()->is_virtual());
 500 
 501   case vmIntrinsics::_allocateUninitializedArray: return inline_unsafe_newArray(true);
 502   case vmIntrinsics::_newArray:                   return inline_unsafe_newArray(false);
 503 
 504   case vmIntrinsics::_isAssignableFrom:         return inline_native_subtype_check();
 505 
 506   case vmIntrinsics::_isInstance:
 507   case vmIntrinsics::_getModifiers:
 508   case vmIntrinsics::_isInterface:
 509   case vmIntrinsics::_isArray:
 510   case vmIntrinsics::_isPrimitive:
 511   case vmIntrinsics::_isHidden:
 512   case vmIntrinsics::_getSuperclass:
 513   case vmIntrinsics::_getClassAccessFlags:      return inline_native_Class_query(intrinsic_id());
 514 
 515   case vmIntrinsics::_asPrimaryType:
 516   case vmIntrinsics::_asValueType:              return inline_primitive_Class_conversion(intrinsic_id());
 517 
 518   case vmIntrinsics::_floatToRawIntBits:
 519   case vmIntrinsics::_floatToIntBits:
 520   case vmIntrinsics::_intBitsToFloat:
 521   case vmIntrinsics::_doubleToRawLongBits:
 522   case vmIntrinsics::_doubleToLongBits:
 523   case vmIntrinsics::_longBitsToDouble:         return inline_fp_conversions(intrinsic_id());
 524 
 525   case vmIntrinsics::_numberOfLeadingZeros_i:
 526   case vmIntrinsics::_numberOfLeadingZeros_l:
 527   case vmIntrinsics::_numberOfTrailingZeros_i:
 528   case vmIntrinsics::_numberOfTrailingZeros_l:
 529   case vmIntrinsics::_bitCount_i:
 530   case vmIntrinsics::_bitCount_l:
 531   case vmIntrinsics::_reverseBytes_i:
 532   case vmIntrinsics::_reverseBytes_l:
 533   case vmIntrinsics::_reverseBytes_s:
 534   case vmIntrinsics::_reverseBytes_c:           return inline_number_methods(intrinsic_id());
 535 
 536   case vmIntrinsics::_getCallerClass:           return inline_native_Reflection_getCallerClass();
 537 

2176   case vmIntrinsics::_numberOfLeadingZeros_l:   n = new CountLeadingZerosLNode( arg);  break;
2177   case vmIntrinsics::_numberOfTrailingZeros_i:  n = new CountTrailingZerosINode(arg);  break;
2178   case vmIntrinsics::_numberOfTrailingZeros_l:  n = new CountTrailingZerosLNode(arg);  break;
2179   case vmIntrinsics::_bitCount_i:               n = new PopCountINode(          arg);  break;
2180   case vmIntrinsics::_bitCount_l:               n = new PopCountLNode(          arg);  break;
2181   case vmIntrinsics::_reverseBytes_c:           n = new ReverseBytesUSNode(0,   arg);  break;
2182   case vmIntrinsics::_reverseBytes_s:           n = new ReverseBytesSNode( 0,   arg);  break;
2183   case vmIntrinsics::_reverseBytes_i:           n = new ReverseBytesINode( 0,   arg);  break;
2184   case vmIntrinsics::_reverseBytes_l:           n = new ReverseBytesLNode( 0,   arg);  break;
2185   default:  fatal_unexpected_iid(id);  break;
2186   }
2187   set_result(_gvn.transform(n));
2188   return true;
2189 }
2190 
2191 //----------------------------inline_unsafe_access----------------------------
2192 
2193 const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type) {
2194   // Attempt to infer a sharper value type from the offset and base type.
2195   ciKlass* sharpened_klass = NULL;
2196   bool null_free = false;
2197 
2198   // See if it is an instance field, with an object type.
2199   if (alias_type->field() != NULL) {
2200     if (alias_type->field()->type()->is_klass()) {
2201       sharpened_klass = alias_type->field()->type()->as_klass();
2202       null_free = alias_type->field()->is_null_free();
2203     }
2204   }
2205 
2206   // See if it is a narrow oop array.
2207   if (adr_type->isa_aryptr()) {
2208     if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
2209       const TypeOopPtr* elem_type = adr_type->is_aryptr()->elem()->make_oopptr();
2210       null_free = adr_type->is_aryptr()->is_null_free();
2211       if (elem_type != NULL) {
2212         sharpened_klass = elem_type->klass();
2213       }
2214     }
2215   }
2216 
2217   // The sharpened class might be unloaded if there is no class loader
2218   // contraint in place.
2219   if (sharpened_klass != NULL && sharpened_klass->is_loaded()) {
2220     const TypeOopPtr* tjp = TypeOopPtr::make_from_klass(sharpened_klass);
2221     if (null_free) {
2222       tjp = tjp->join_speculative(TypePtr::NOTNULL)->is_oopptr();
2223     }
2224 
2225 #ifndef PRODUCT
2226     if (C->print_intrinsics() || C->print_inlining()) {
2227       tty->print("  from base type:  ");  adr_type->dump(); tty->cr();
2228       tty->print("  sharpened value: ");  tjp->dump();      tty->cr();
2229     }
2230 #endif
2231     // Sharpen the value type.
2232     return tjp;
2233   }
2234   return NULL;
2235 }
2236 
2237 DecoratorSet LibraryCallKit::mo_decorator_for_access_kind(AccessKind kind) {
2238   switch (kind) {
2239       case Relaxed:
2240         return MO_UNORDERED;
2241       case Opaque:
2242         return MO_RELAXED;
2243       case Acquire:

2259   guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
2260   assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
2261 
2262   if (is_reference_type(type)) {
2263     decorators |= ON_UNKNOWN_OOP_REF;
2264   }
2265 
2266   if (unaligned) {
2267     decorators |= C2_UNALIGNED;
2268   }
2269 
2270 #ifndef PRODUCT
2271   {
2272     ResourceMark rm;
2273     // Check the signatures.
2274     ciSignature* sig = callee()->signature();
2275 #ifdef ASSERT
2276     if (!is_store) {
2277       // Object getReference(Object base, int/long offset), etc.
2278       BasicType rtype = sig->return_type()->basic_type();
2279       assert(rtype == type || (rtype == T_OBJECT && type == T_PRIMITIVE_OBJECT), "getter must return the expected value");
2280       assert(sig->count() == 2 || (type == T_PRIMITIVE_OBJECT && sig->count() == 3), "oop getter has 2 or 3 arguments");
2281       assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2282       assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2283     } else {
2284       // void putReference(Object base, int/long offset, Object x), etc.
2285       assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
2286       assert(sig->count() == 3 || (type == T_PRIMITIVE_OBJECT && sig->count() == 4), "oop putter has 3 arguments");
2287       assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
2288       assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
2289       BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
2290       assert(vtype == type || (type == T_PRIMITIVE_OBJECT && vtype == T_OBJECT), "putter must accept the expected value");
2291     }
2292 #endif // ASSERT
2293  }
2294 #endif //PRODUCT
2295 
2296   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
2297 
2298   Node* receiver = argument(0);  // type: oop
2299 
2300   // Build address expression.
2301   Node* heap_base_oop = top();
2302 
2303   // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2304   Node* base = argument(1);  // type: oop
2305   // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2306   Node* offset = argument(2);  // type: long
2307   // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2308   // to be plain byte offsets, which are also the same as those accepted
2309   // by oopDesc::field_addr.
2310   assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2311          "fieldOffset must be byte-scaled");
2312 
2313   ciInlineKlass* inline_klass = NULL;
2314   if (type == T_PRIMITIVE_OBJECT) {
2315     const TypeInstPtr* cls = _gvn.type(argument(4))->isa_instptr();
2316     if (cls == NULL || cls->const_oop() == NULL) {
2317       return false;
2318     }
2319     ciType* mirror_type = cls->const_oop()->as_instance()->java_mirror_type();
2320     if (!mirror_type->is_inlinetype()) {
2321       return false;
2322     }
2323     inline_klass = mirror_type->as_inline_klass();
2324   }
2325 
2326   if (base->is_InlineTypeBase()) {
2327     InlineTypeBaseNode* vt = base->as_InlineTypeBase();
2328     if (is_store) {
2329       if (!vt->is_allocated(&_gvn) || !_gvn.type(vt)->isa_inlinetype() || !_gvn.type(vt)->is_inlinetype()->larval()) {
2330         return false;
2331       }
2332       base = vt->get_oop();
2333     } else {
2334       if (offset->is_Con()) {
2335         long off = find_long_con(offset, 0);
2336         ciInlineKlass* vk = vt->type()->inline_klass();
2337         if ((long)(int)off != off || !vk->contains_field_offset(off)) {
2338           return false;
2339         }
2340 
2341         ciField* field = vk->get_non_flattened_field_by_offset(off);
2342         if (field != NULL) {
2343           BasicType bt = field->layout_type();
2344           if (bt == T_ARRAY || bt == T_NARROWOOP || (bt == T_PRIMITIVE_OBJECT && !field->is_flattened())) {
2345             bt = T_OBJECT;
2346           }
2347           if (bt == type && (bt != T_PRIMITIVE_OBJECT || field->type() == inline_klass)) {
2348             set_result(vt->field_value_by_offset(off, false));
2349             return true;
2350           }
2351         }
2352       }
2353       if (vt->is_InlineType()) {
2354         // Re-execute the unsafe access if allocation triggers deoptimization.
2355         PreserveReexecuteState preexecs(this);
2356         jvms()->set_should_reexecute(true);
2357         vt = vt->buffer(this);
2358       }
2359       base = vt->get_oop();
2360     }
2361   }
2362 
2363   // 32-bit machines ignore the high half!
2364   offset = ConvL2X(offset);
2365 
2366   // Save state and restore on bailout
2367   uint old_sp = sp();
2368   SafePointNode* old_map = clone_map();
2369 
2370   Node* adr = make_unsafe_address(base, offset, type, kind == Relaxed);
2371 
2372   if (_gvn.type(base)->isa_ptr() == TypePtr::NULL_PTR) {
2373     if (type != T_OBJECT && (inline_klass == NULL || !inline_klass->has_object_fields())) {
2374       decorators |= IN_NATIVE; // off-heap primitive access
2375     } else {
2376       set_map(old_map);
2377       set_sp(old_sp);
2378       return false; // off-heap oop accesses are not supported
2379     }
2380   } else {
2381     heap_base_oop = base; // on-heap or mixed access
2382   }
2383 
2384   // Can base be NULL? Otherwise, always on-heap access.
2385   bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(base));
2386 
2387   if (!can_access_non_heap) {
2388     decorators |= IN_HEAP;
2389   }
2390 
2391   Node* val = is_store ? argument(4 + (type == T_PRIMITIVE_OBJECT ? 1 : 0)) : NULL;
2392 
2393   const TypePtr* adr_type = _gvn.type(adr)->isa_ptr();
2394   if (adr_type == TypePtr::NULL_PTR) {
2395     set_map(old_map);
2396     set_sp(old_sp);
2397     return false; // off-heap access with zero address
2398   }
2399 
2400   // Try to categorize the address.
2401   Compile::AliasType* alias_type = C->alias_type(adr_type);
2402   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2403 
2404   if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2405       alias_type->adr_type() == TypeAryPtr::RANGE) {
2406     set_map(old_map);
2407     set_sp(old_sp);
2408     return false; // not supported
2409   }
2410 
2411   bool mismatched = false;
2412   BasicType bt = T_ILLEGAL;
2413   ciField* field = NULL;
2414   if (adr_type->isa_instptr()) {
2415     const TypeInstPtr* instptr = adr_type->is_instptr();
2416     ciInstanceKlass* k = instptr->klass()->as_instance_klass();
2417     int off = instptr->offset();
2418     if (instptr->const_oop() != NULL &&
2419         instptr->klass() == ciEnv::current()->Class_klass() &&
2420         instptr->offset() >= (instptr->klass()->as_instance_klass()->size_helper() * wordSize)) {
2421       k = instptr->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
2422       field = k->get_field_by_offset(off, true);
2423     } else {
2424       field = k->get_non_flattened_field_by_offset(off);
2425     }
2426     if (field != NULL) {
2427       bt = field->layout_type();
2428     }
2429     assert(bt == alias_type->basic_type() || bt == T_PRIMITIVE_OBJECT, "should match");
2430     if (field != NULL && bt == T_PRIMITIVE_OBJECT && !field->is_flattened()) {
2431       bt = T_OBJECT;
2432     }
2433   } else {
2434     bt = alias_type->basic_type();
2435   }
2436 
2437   if (bt != T_ILLEGAL) {
2438     assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
2439     if (bt == T_BYTE && adr_type->isa_aryptr()) {
2440       // Alias type doesn't differentiate between byte[] and boolean[]).
2441       // Use address type to get the element type.
2442       bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2443     }
2444     if (bt == T_ARRAY || bt == T_NARROWOOP) {
2445       // accessing an array field with getReference is not a mismatch
2446       bt = T_OBJECT;
2447     }
2448     if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2449       // Don't intrinsify mismatched object accesses
2450       set_map(old_map);
2451       set_sp(old_sp);
2452       return false;
2453     }
2454     mismatched = (bt != type);
2455   } else if (alias_type->adr_type()->isa_oopptr()) {
2456     mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
2457   }
2458 
2459   if (type == T_PRIMITIVE_OBJECT) {
2460     if (adr_type->isa_instptr()) {
2461       if (field == NULL || field->type() != inline_klass) {
2462         mismatched = true;
2463       }
2464     } else if (adr_type->isa_aryptr()) {
2465       const Type* elem = adr_type->is_aryptr()->elem();
2466       if (!elem->isa_inlinetype()) {
2467         mismatched = true;
2468       } else if (elem->inline_klass() != inline_klass) {
2469         mismatched = true;
2470       }
2471     } else {
2472       mismatched = true;
2473     }
2474     if (is_store) {
2475       const Type* val_t = _gvn.type(val);
2476       if (!(val_t->isa_inlinetype() || val_t->is_inlinetypeptr()) || val_t->inline_klass() != inline_klass) {
2477         set_map(old_map);
2478         set_sp(old_sp);
2479         return false;
2480       }
2481     }
2482   }
2483 
2484   old_map->destruct(&_gvn);
2485   assert(!mismatched || type == T_PRIMITIVE_OBJECT || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2486 
2487   if (mismatched) {
2488     decorators |= C2_MISMATCHED;
2489   }
2490 
2491   // First guess at the value type.
2492   const Type *value_type = Type::get_const_basic_type(type);
2493 
2494   // Figure out the memory ordering.
2495   decorators |= mo_decorator_for_access_kind(kind);
2496 
2497   if (!is_store) {
2498     if (type == T_OBJECT) {
2499       const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2500       if (tjp != NULL) {
2501         value_type = tjp;
2502       }
2503     } else if (type == T_PRIMITIVE_OBJECT) {
2504       value_type = NULL;
2505     }
2506   }
2507 
2508   receiver = null_check(receiver);
2509   if (stopped()) {
2510     return true;
2511   }
2512   // Heap pointers get a null-check from the interpreter,
2513   // as a courtesy.  However, this is not guaranteed by Unsafe,
2514   // and it is not possible to fully distinguish unintended nulls
2515   // from intended ones in this API.
2516 
2517   if (!is_store) {
2518     Node* p = NULL;
2519     // Try to constant fold a load from a constant field
2520 
2521     if (heap_base_oop != top() && field != NULL && field->is_constant() && !field->is_flattened() && !mismatched) {
2522       // final or stable field
2523       p = make_constant_from_field(field, heap_base_oop);
2524     }
2525 
2526     if (p == NULL) { // Could not constant fold the load
2527       if (type == T_PRIMITIVE_OBJECT) {
2528         if (adr_type->isa_instptr() && !mismatched) {
2529           ciInstanceKlass* holder = adr_type->is_instptr()->klass()->as_instance_klass();
2530           int offset = adr_type->is_instptr()->offset();
2531           p = InlineTypeNode::make_from_flattened(this, inline_klass, base, base, holder, offset, decorators);
2532         } else {
2533           p = InlineTypeNode::make_from_flattened(this, inline_klass, base, adr, NULL, 0, decorators);
2534         }
2535       } else {
2536         p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);
2537         const TypeOopPtr* ptr = value_type->make_oopptr();
2538         if (ptr != NULL && ptr->is_inlinetypeptr()) {
2539           // Load a non-flattened inline type from memory
2540           p = InlineTypeNode::make_from_oop(this, p, ptr->inline_klass(), !ptr->maybe_null());
2541         }
2542       }
2543       // Normalize the value returned by getBoolean in the following cases
2544       if (type == T_BOOLEAN &&
2545           (mismatched ||
2546            heap_base_oop == top() ||                  // - heap_base_oop is NULL or
2547            (can_access_non_heap && field == NULL))    // - heap_base_oop is potentially NULL
2548                                                       //   and the unsafe access is made to large offset
2549                                                       //   (i.e., larger than the maximum offset necessary for any
2550                                                       //   field access)
2551             ) {
2552           IdealKit ideal = IdealKit(this);
2553 #define __ ideal.
2554           IdealVariable normalized_result(ideal);
2555           __ declarations_done();
2556           __ set(normalized_result, p);
2557           __ if_then(p, BoolTest::ne, ideal.ConI(0));
2558           __ set(normalized_result, ideal.ConI(1));
2559           ideal.end_if();
2560           final_sync(ideal);
2561           p = __ value(normalized_result);
2562 #undef __
2563       }
2564     }
2565     if (type == T_ADDRESS) {
2566       p = gvn().transform(new CastP2XNode(NULL, p));
2567       p = ConvX2UL(p);
2568     }
2569     // The load node has the control of the preceding MemBarCPUOrder.  All
2570     // following nodes will have the control of the MemBarCPUOrder inserted at
2571     // the end of this method.  So, pushing the load onto the stack at a later
2572     // point is fine.
2573     set_result(p);
2574   } else {
2575     if (bt == T_ADDRESS) {
2576       // Repackage the long as a pointer.
2577       val = ConvL2X(val);
2578       val = gvn().transform(new CastX2PNode(val));
2579     }
2580     if (type == T_PRIMITIVE_OBJECT) {
2581       if (adr_type->isa_instptr() && !mismatched) {
2582         ciInstanceKlass* holder = adr_type->is_instptr()->klass()->as_instance_klass();
2583         int offset = adr_type->is_instptr()->offset();
2584         val->as_InlineTypeBase()->store_flattened(this, base, base, holder, offset, decorators);
2585       } else {
2586         val->as_InlineTypeBase()->store_flattened(this, base, adr, NULL, 0, decorators);
2587       }
2588     } else {
2589       access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators);
2590     }
2591   }
2592 
2593   if (argument(1)->is_InlineType() && is_store) {
2594     InlineTypeBaseNode* value = InlineTypeNode::make_from_oop(this, base, _gvn.type(base)->inline_klass());
2595     value = value->make_larval(this, false);
2596     replace_in_map(argument(1), value);
2597   }
2598 
2599   return true;
2600 }
2601 
2602 bool LibraryCallKit::inline_unsafe_make_private_buffer() {
2603   Node* receiver = argument(0);
2604   Node* value = argument(1);
2605   if (!value->is_InlineTypeBase()) {
2606     return false;
2607   }
2608 
2609   receiver = null_check(receiver);
2610   if (stopped()) {
2611     return true;
2612   }
2613 
2614   set_result(value->as_InlineTypeBase()->make_larval(this, true));
2615   return true;
2616 }
2617 
2618 bool LibraryCallKit::inline_unsafe_finish_private_buffer() {
2619   Node* receiver = argument(0);
2620   Node* buffer = argument(1);
2621   if (!buffer->is_InlineType()) {
2622     return false;
2623   }
2624   InlineTypeNode* vt = buffer->as_InlineType();
2625   if (!vt->is_allocated(&_gvn) || !_gvn.type(vt)->is_inlinetype()->larval()) {
2626     return false;
2627   }
2628 
2629   receiver = null_check(receiver);
2630   if (stopped()) {
2631     return true;
2632   }
2633 
2634   set_result(vt->finish_larval(this));
2635   return true;
2636 }
2637 
2638 //----------------------------inline_unsafe_load_store----------------------------
2639 // This method serves a couple of different customers (depending on LoadStoreKind):
2640 //
2641 // LS_cmp_swap:
2642 //
2643 //   boolean compareAndSetReference(Object o, long offset, Object expected, Object x);
2644 //   boolean compareAndSetInt(   Object o, long offset, int    expected, int    x);
2645 //   boolean compareAndSetLong(  Object o, long offset, long   expected, long   x);
2646 //
2647 // LS_cmp_swap_weak:
2648 //
2649 //   boolean weakCompareAndSetReference(       Object o, long offset, Object expected, Object x);
2650 //   boolean weakCompareAndSetReferencePlain(  Object o, long offset, Object expected, Object x);
2651 //   boolean weakCompareAndSetReferenceAcquire(Object o, long offset, Object expected, Object x);
2652 //   boolean weakCompareAndSetReferenceRelease(Object o, long offset, Object expected, Object x);
2653 //
2654 //   boolean weakCompareAndSetInt(          Object o, long offset, int    expected, int    x);

2823     }
2824     case LS_cmp_swap:
2825     case LS_cmp_swap_weak:
2826     case LS_get_add:
2827       break;
2828     default:
2829       ShouldNotReachHere();
2830   }
2831 
2832   // Null check receiver.
2833   receiver = null_check(receiver);
2834   if (stopped()) {
2835     return true;
2836   }
2837 
2838   int alias_idx = C->get_alias_index(adr_type);
2839 
2840   if (is_reference_type(type)) {
2841     decorators |= IN_HEAP | ON_UNKNOWN_OOP_REF;
2842 
2843     if (oldval != NULL && oldval->is_InlineType()) {
2844       // Re-execute the unsafe access if allocation triggers deoptimization.
2845       PreserveReexecuteState preexecs(this);
2846       jvms()->set_should_reexecute(true);
2847       oldval = oldval->as_InlineType()->buffer(this)->get_oop();
2848     }
2849     if (newval != NULL && newval->is_InlineType()) {
2850       // Re-execute the unsafe access if allocation triggers deoptimization.
2851       PreserveReexecuteState preexecs(this);
2852       jvms()->set_should_reexecute(true);
2853       newval = newval->as_InlineType()->buffer(this)->get_oop();
2854     }
2855 
2856     // Transformation of a value which could be NULL pointer (CastPP #NULL)
2857     // could be delayed during Parse (for example, in adjust_map_after_if()).
2858     // Execute transformation here to avoid barrier generation in such case.
2859     if (_gvn.type(newval) == TypePtr::NULL_PTR)
2860       newval = _gvn.makecon(TypePtr::NULL_PTR);
2861 
2862     if (oldval != NULL && _gvn.type(oldval) == TypePtr::NULL_PTR) {
2863       // Refine the value to a null constant, when it is known to be null
2864       oldval = _gvn.makecon(TypePtr::NULL_PTR);
2865     }
2866   }
2867 
2868   Node* result = NULL;
2869   switch (kind) {
2870     case LS_cmp_exchange: {
2871       result = access_atomic_cmpxchg_val_at(base, adr, adr_type, alias_idx,
2872                                             oldval, newval, value_type, type, decorators);
2873       break;
2874     }
2875     case LS_cmp_swap_weak:

2997   Node* cls = null_check(argument(1));
2998   if (stopped())  return true;
2999 
3000   Node* kls = load_klass_from_mirror(cls, false, NULL, 0);
3001   kls = null_check(kls);
3002   if (stopped())  return true;  // argument was like int.class
3003 
3004   Node* test = NULL;
3005   if (LibraryCallKit::klass_needs_init_guard(kls)) {
3006     // Note:  The argument might still be an illegal value like
3007     // Serializable.class or Object[].class.   The runtime will handle it.
3008     // But we must make an explicit check for initialization.
3009     Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset()));
3010     // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
3011     // can generate code to load it as unsigned byte.
3012     Node* inst = make_load(NULL, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered);
3013     Node* bits = intcon(InstanceKlass::fully_initialized);
3014     test = _gvn.transform(new SubINode(inst, bits));
3015     // The 'test' is non-zero if we need to take a slow path.
3016   }
3017   Node* obj = NULL;
3018   ciKlass* klass = _gvn.type(kls)->is_klassptr()->klass();
3019   if (klass->is_inlinetype()) {
3020     obj = InlineTypeNode::make_default(_gvn, klass->as_inline_klass())->buffer(this);
3021   } else {
3022     obj = new_instance(kls, test);
3023   }
3024   set_result(obj);
3025   return true;
3026 }
3027 
3028 //------------------------inline_native_time_funcs--------------
3029 // inline code for System.currentTimeMillis() and System.nanoTime()
3030 // these have the same type and signature
3031 bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
3032   const TypeFunc* tf = OptoRuntime::void_long_Type();
3033   const TypePtr* no_memory_effects = NULL;
3034   Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
3035   Node* value = _gvn.transform(new ProjNode(time, TypeFunc::Parms+0));
3036 #ifdef ASSERT
3037   Node* value_top = _gvn.transform(new ProjNode(time, TypeFunc::Parms+1));
3038   assert(value_top == top(), "second value must be top");
3039 #endif
3040   set_result(value);
3041   return true;
3042 }
3043 

3151   set_control(jobj_is_not_null);
3152   Node* res = access_load(jobj, TypeInstPtr::NOTNULL, T_OBJECT,
3153                           IN_NATIVE | C2_CONTROL_DEPENDENT_LOAD);
3154   result_rgn->init_req(_normal_path, control());
3155   result_val->init_req(_normal_path, res);
3156 
3157   set_result(result_rgn, result_val);
3158 
3159   return true;
3160 }
3161 
3162 #endif // JFR_HAVE_INTRINSICS
3163 
3164 //------------------------inline_native_currentThread------------------
3165 bool LibraryCallKit::inline_native_currentThread() {
3166   Node* junk = NULL;
3167   set_result(generate_current_thread(junk));
3168   return true;
3169 }
3170 









3171 //-----------------------load_klass_from_mirror_common-------------------------
3172 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
3173 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
3174 // and branch to the given path on the region.
3175 // If never_see_null, take an uncommon trap on null, so we can optimistically
3176 // compile for the non-null case.
3177 // If the region is NULL, force never_see_null = true.
3178 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
3179                                                     bool never_see_null,
3180                                                     RegionNode* region,
3181                                                     int null_path,
3182                                                     int offset) {
3183   if (region == NULL)  never_see_null = true;
3184   Node* p = basic_plus_adr(mirror, offset);
3185   const TypeKlassPtr*  kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
3186   Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
3187   Node* null_ctl = top();
3188   kls = null_check_oop(kls, &null_ctl, never_see_null);
3189   if (region != NULL) {
3190     // Set region->in(null_path) if the mirror is a primitive (e.g, int.class).

3193     assert(null_ctl == top(), "no loose ends");
3194   }
3195   return kls;
3196 }
3197 
3198 //--------------------(inline_native_Class_query helpers)---------------------
3199 // Use this for JVM_ACC_INTERFACE, JVM_ACC_IS_CLONEABLE_FAST, JVM_ACC_HAS_FINALIZER.
3200 // Fall through if (mods & mask) == bits, take the guard otherwise.
3201 Node* LibraryCallKit::generate_access_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
3202   // Branch around if the given klass has the given modifier bit set.
3203   // Like generate_guard, adds a new path onto the region.
3204   Node* modp = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3205   Node* mods = make_load(NULL, modp, TypeInt::INT, T_INT, MemNode::unordered);
3206   Node* mask = intcon(modifier_mask);
3207   Node* bits = intcon(modifier_bits);
3208   Node* mbit = _gvn.transform(new AndINode(mods, mask));
3209   Node* cmp  = _gvn.transform(new CmpINode(mbit, bits));
3210   Node* bol  = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
3211   return generate_fair_guard(bol, region);
3212 }
3213 
3214 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
3215   return generate_access_flags_guard(kls, JVM_ACC_INTERFACE, 0, region);
3216 }
3217 Node* LibraryCallKit::generate_hidden_class_guard(Node* kls, RegionNode* region) {
3218   return generate_access_flags_guard(kls, JVM_ACC_IS_HIDDEN_CLASS, 0, region);
3219 }
3220 
3221 //-------------------------inline_native_Class_query-------------------
3222 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
3223   const Type* return_type = TypeInt::BOOL;
3224   Node* prim_return_value = top();  // what happens if it's a primitive class?
3225   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3226   bool expect_prim = false;     // most of these guys expect to work on refs
3227 
3228   enum { _normal_path = 1, _prim_path = 2, PATH_LIMIT };
3229 
3230   Node* mirror = argument(0);
3231   Node* obj    = top();
3232 
3233   switch (id) {

3387 
3388   case vmIntrinsics::_getClassAccessFlags:
3389     p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3390     query_value = make_load(NULL, p, TypeInt::INT, T_INT, MemNode::unordered);
3391     break;
3392 
3393   default:
3394     fatal_unexpected_iid(id);
3395     break;
3396   }
3397 
3398   // Fall-through is the normal case of a query to a real class.
3399   phi->init_req(1, query_value);
3400   region->init_req(1, control());
3401 
3402   C->set_has_split_ifs(true); // Has chance for split-if optimization
3403   set_result(region, phi);
3404   return true;
3405 }
3406 
3407 //-------------------------inline_primitive_Class_conversion-------------------
3408 // public Class<T> java.lang.Class.asPrimaryType();
3409 // public Class<T> java.lang.Class.asValueType()
3410 bool LibraryCallKit::inline_primitive_Class_conversion(vmIntrinsics::ID id) {
3411   Node* mirror = argument(0); // Receiver Class
3412   const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
3413   if (mirror_con == NULL) {
3414     return false;
3415   }
3416 
3417   bool is_val_mirror = true;
3418   ciType* tm = mirror_con->java_mirror_type(&is_val_mirror);
3419   if (tm != NULL) {
3420     Node* result = mirror;
3421     if (id == vmIntrinsics::_asPrimaryType && is_val_mirror) {
3422       result = _gvn.makecon(TypeInstPtr::make(tm->as_inline_klass()->ref_mirror()));
3423     } else if (id == vmIntrinsics::_asValueType) {
3424       if (!tm->is_inlinetype()) {
3425         return false; // Throw UnsupportedOperationException
3426       } else if (!is_val_mirror) {
3427         result = _gvn.makecon(TypeInstPtr::make(tm->as_inline_klass()->val_mirror()));
3428       }
3429     }
3430     set_result(result);
3431     return true;
3432   }
3433   return false;
3434 }
3435 
3436 //-------------------------inline_Class_cast-------------------
3437 bool LibraryCallKit::inline_Class_cast() {
3438   Node* mirror = argument(0); // Class
3439   Node* obj    = argument(1);
3440   const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
3441   if (mirror_con == NULL) {
3442     return false;  // dead path (mirror->is_top()).
3443   }
3444   if (obj == NULL || obj->is_top()) {
3445     return false;  // dead path
3446   }
3447   ciKlass* obj_klass = NULL;
3448   const Type* obj_t = _gvn.type(obj);
3449   if (obj->is_InlineType()) {
3450     obj_klass = obj_t->inline_klass();
3451   } else if (obj_t->isa_oopptr()) {
3452     obj_klass = obj_t->is_oopptr()->klass();
3453   }
3454 
3455   // First, see if Class.cast() can be folded statically.
3456   // java_mirror_type() returns non-null for compile-time Class constants.
3457   bool requires_null_check = false;
3458   ciType* tm = mirror_con->java_mirror_type(&requires_null_check);
3459   // Check for null if casting to QMyValue
3460   if (tm != NULL && tm->is_klass() && obj_klass != NULL) {
3461     if (!obj_klass->is_loaded()) {
3462       // Don't use intrinsic when class is not loaded.
3463       return false;
3464     } else {
3465       int static_res = C->static_subtype_check(tm->as_klass(), obj_klass);
3466       if (static_res == Compile::SSC_always_true) {
3467         // isInstance() is true - fold the code.
3468         if (requires_null_check) {
3469           obj = null_check(obj);
3470         }
3471         set_result(obj);
3472         return true;
3473       } else if (static_res == Compile::SSC_always_false) {
3474         // Don't use intrinsic, have to throw ClassCastException.
3475         // If the reference is null, the non-intrinsic bytecode will
3476         // be optimized appropriately.
3477         return false;
3478       }
3479     }
3480   }
3481 
3482   // Bailout intrinsic and do normal inlining if exception path is frequent.
3483   if (too_many_traps(Deoptimization::Reason_intrinsic)) {
3484     return false;
3485   }
3486 
3487   // Generate dynamic checks.
3488   // Class.cast() is java implementation of _checkcast bytecode.
3489   // Do checkcast (Parse::do_checkcast()) optimizations here.
3490 
3491   if (requires_null_check) {
3492     obj = null_check(obj);
3493   }
3494   mirror = null_check(mirror);
3495   // If mirror is dead, only null-path is taken.
3496   if (stopped()) {
3497     return true;
3498   }
3499 
3500   // Not-subtype or the mirror's klass ptr is NULL (in case it is a primitive).
3501   enum { _bad_type_path = 1, _prim_path = 2, _npe_path = 3, PATH_LIMIT };
3502   RegionNode* region = new RegionNode(PATH_LIMIT);
3503   record_for_igvn(region);
3504 
3505   // Now load the mirror's klass metaobject, and null-check it.
3506   // If kls is null, we have a primitive mirror and
3507   // nothing is an instance of a primitive type.
3508   Node* kls = load_klass_from_mirror(mirror, false, region, _prim_path);
3509 
3510   Node* res = top();
3511   if (!stopped()) {
3512     if (EnableValhalla && !requires_null_check) {
3513       // Check if we are casting to QMyValue
3514       Node* ctrl_val_mirror = generate_fair_guard(is_val_mirror(mirror), NULL);
3515       if (ctrl_val_mirror != NULL) {
3516         RegionNode* r = new RegionNode(3);
3517         record_for_igvn(r);
3518         r->init_req(1, control());
3519 
3520         // Casting to QMyValue, check for null
3521         set_control(ctrl_val_mirror);
3522         { // PreserveJVMState because null check replaces obj in map
3523           PreserveJVMState pjvms(this);
3524           Node* null_ctr = top();
3525           null_check_oop(obj, &null_ctr);
3526           region->init_req(_npe_path, null_ctr);
3527           r->init_req(2, control());
3528         }
3529         set_control(_gvn.transform(r));
3530       }
3531     }
3532 
3533     Node* bad_type_ctrl = top();
3534     // Do checkcast optimizations.
3535     res = gen_checkcast(obj, kls, &bad_type_ctrl);
3536     region->init_req(_bad_type_path, bad_type_ctrl);
3537   }
3538   if (region->in(_prim_path) != top() ||
3539       region->in(_bad_type_path) != top() ||
3540       region->in(_npe_path) != top()) {
3541     // Let Interpreter throw ClassCastException.
3542     PreserveJVMState pjvms(this);
3543     set_control(_gvn.transform(region));
3544     uncommon_trap(Deoptimization::Reason_intrinsic,
3545                   Deoptimization::Action_maybe_recompile);
3546   }
3547   if (!stopped()) {
3548     set_result(res);
3549   }
3550   return true;
3551 }
3552 
3553 
3554 //--------------------------inline_native_subtype_check------------------------
3555 // This intrinsic takes the JNI calls out of the heart of
3556 // UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
3557 bool LibraryCallKit::inline_native_subtype_check() {
3558   // Pull both arguments off the stack.
3559   Node* args[2];                // two java.lang.Class mirrors: superc, subc
3560   args[0] = argument(0);
3561   args[1] = argument(1);
3562   Node* klasses[2];             // corresponding Klasses: superk, subk
3563   klasses[0] = klasses[1] = top();
3564 
3565   enum {
3566     // A full decision tree on {superc is prim, subc is prim}:
3567     _prim_0_path = 1,           // {P,N} => false
3568                                 // {P,P} & superc!=subc => false
3569     _prim_same_path,            // {P,P} & superc==subc => true
3570     _prim_1_path,               // {N,P} => false
3571     _ref_subtype_path,          // {N,N} & subtype check wins => true
3572     _both_ref_path,             // {N,N} & subtype check loses => false
3573     PATH_LIMIT
3574   };
3575 
3576   RegionNode* region = new RegionNode(PATH_LIMIT);
3577   RegionNode* prim_region = new RegionNode(2);
3578   Node*       phi    = new PhiNode(region, TypeInt::BOOL);
3579   record_for_igvn(region);
3580   record_for_igvn(prim_region);
3581 
3582   const TypePtr* adr_type = TypeRawPtr::BOTTOM;   // memory type of loads
3583   const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
3584   int class_klass_offset = java_lang_Class::klass_offset();
3585 
3586   // First null-check both mirrors and load each mirror's klass metaobject.
3587   int which_arg;
3588   for (which_arg = 0; which_arg <= 1; which_arg++) {
3589     Node* arg = args[which_arg];
3590     arg = null_check(arg);
3591     if (stopped())  break;
3592     args[which_arg] = arg;
3593 
3594     Node* p = basic_plus_adr(arg, class_klass_offset);
3595     Node* kls = LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, adr_type, kls_type);
3596     klasses[which_arg] = _gvn.transform(kls);
3597   }
3598 
3599   // Having loaded both klasses, test each for null.
3600   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3601   for (which_arg = 0; which_arg <= 1; which_arg++) {
3602     Node* kls = klasses[which_arg];
3603     Node* null_ctl = top();
3604     kls = null_check_oop(kls, &null_ctl, never_see_null);
3605     if (which_arg == 0) {
3606       prim_region->init_req(1, null_ctl);
3607     } else {
3608       region->init_req(_prim_1_path, null_ctl);
3609     }
3610     if (stopped())  break;
3611     klasses[which_arg] = kls;
3612   }
3613 
3614   if (!stopped()) {
3615     // now we have two reference types, in klasses[0..1]
3616     Node* subk   = klasses[1];  // the argument to isAssignableFrom
3617     Node* superk = klasses[0];  // the receiver
3618     region->set_req(_both_ref_path, gen_subtype_check(subk, superk));
3619     // If superc is an inline mirror, we also need to check if superc == subc because LMyValue
3620     // is not a subtype of QMyValue but due to subk == superk the subtype check will pass.
3621     generate_fair_guard(is_val_mirror(args[0]), prim_region);
3622     // now we have a successful reference subtype check
3623     region->set_req(_ref_subtype_path, control());
3624   }
3625 
3626   // If both operands are primitive (both klasses null), then
3627   // we must return true when they are identical primitives.
3628   // It is convenient to test this after the first null klass check.
3629   // This path is also used if superc is a value mirror.
3630   set_control(_gvn.transform(prim_region));
3631   if (!stopped()) {
3632     // Since superc is primitive, make a guard for the superc==subc case.
3633     Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1]));
3634     Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq));
3635     generate_fair_guard(bol_eq, region);
3636     if (region->req() == PATH_LIMIT+1) {
3637       // A guard was added.  If the added guard is taken, superc==subc.
3638       region->swap_edges(PATH_LIMIT, _prim_same_path);
3639       region->del_req(PATH_LIMIT);
3640     }
3641     region->set_req(_prim_0_path, control()); // Not equal after all.
3642   }
3643 
3644   // these are the only paths that produce 'true':
3645   phi->set_req(_prim_same_path,   intcon(1));
3646   phi->set_req(_ref_subtype_path, intcon(1));
3647 
3648   // pull together the cases:
3649   assert(region->req() == PATH_LIMIT, "sane region");
3650   for (uint i = 1; i < region->req(); i++) {
3651     Node* ctl = region->in(i);
3652     if (ctl == NULL || ctl == top()) {
3653       region->set_req(i, top());
3654       phi   ->set_req(i, top());
3655     } else if (phi->in(i) == NULL) {
3656       phi->set_req(i, intcon(0)); // all other paths produce 'false'
3657     }
3658   }
3659 
3660   set_control(_gvn.transform(region));
3661   set_result(_gvn.transform(phi));
3662   return true;
3663 }
3664 
3665 //---------------------generate_array_guard_common------------------------
3666 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region, ArrayKind kind) {

3667 
3668   if (stopped()) {
3669     return NULL;
3670   }
3671 









3672   // Like generate_guard, adds a new path onto the region.
3673   jint  layout_con = 0;
3674   Node* layout_val = get_layout_helper(kls, layout_con);
3675   if (layout_val == NULL) {
3676     bool query = 0;
3677     switch(kind) {
3678       case ObjectArray:    query = Klass::layout_helper_is_objArray(layout_con); break;
3679       case NonObjectArray: query = !Klass::layout_helper_is_objArray(layout_con); break;
3680       case TypeArray:      query = Klass::layout_helper_is_typeArray(layout_con); break;
3681       case AnyArray:       query = Klass::layout_helper_is_array(layout_con); break;
3682       case NonArray:       query = !Klass::layout_helper_is_array(layout_con); break;
3683       default:
3684         ShouldNotReachHere();
3685     }
3686     if (!query) {
3687       return NULL;                       // never a branch
3688     } else {                             // always a branch
3689       Node* always_branch = control();
3690       if (region != NULL)
3691         region->add_req(always_branch);
3692       set_control(top());
3693       return always_branch;
3694     }
3695   }
3696   unsigned int value = 0;
3697   BoolTest::mask btest = BoolTest::illegal;
3698   switch(kind) {
3699     case ObjectArray:
3700     case NonObjectArray: {
3701       value = Klass::_lh_array_tag_obj_value;
3702       layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
3703       btest = (kind == ObjectArray) ? BoolTest::eq : BoolTest::ne;
3704       break;
3705     }
3706     case TypeArray: {
3707       value = Klass::_lh_array_tag_type_value;
3708       layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
3709       btest = BoolTest::eq;
3710       break;
3711     }
3712     case AnyArray:    value = Klass::_lh_neutral_value; btest = BoolTest::lt; break;
3713     case NonArray:    value = Klass::_lh_neutral_value; btest = BoolTest::gt; break;
3714     default:
3715       ShouldNotReachHere();
3716   }
3717   // Now test the correct condition.
3718   jint nval = (jint)value;



3719   Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval)));



3720   Node* bol = _gvn.transform(new BoolNode(cmp, btest));
3721   return generate_fair_guard(bol, region);
3722 }
3723 
3724 
3725 //-----------------------inline_native_newArray--------------------------
3726 // private static native Object java.lang.reflect.Array.newArray(Class<?> componentType, int length);
3727 // private        native Object Unsafe.allocateUninitializedArray0(Class<?> cls, int size);
3728 bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) {
3729   Node* mirror;
3730   Node* count_val;
3731   if (uninitialized) {
3732     mirror    = argument(1);
3733     count_val = argument(2);
3734   } else {
3735     mirror    = argument(0);
3736     count_val = argument(1);
3737   }
3738 
3739   mirror = null_check(mirror);
3740   // If mirror or obj is dead, only null-path is taken.
3741   if (stopped())  return true;
3742 
3743   enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
3744   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
3745   PhiNode*    result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
3746   PhiNode*    result_io  = new PhiNode(result_reg, Type::ABIO);

3851   // the bytecode that invokes Arrays.copyOf if deoptimization happens.
3852   { PreserveReexecuteState preexecs(this);
3853     jvms()->set_should_reexecute(true);
3854 
3855     array_type_mirror = null_check(array_type_mirror);
3856     original          = null_check(original);
3857 
3858     // Check if a null path was taken unconditionally.
3859     if (stopped())  return true;
3860 
3861     Node* orig_length = load_array_length(original);
3862 
3863     Node* klass_node = load_klass_from_mirror(array_type_mirror, false, NULL, 0);
3864     klass_node = null_check(klass_node);
3865 
3866     RegionNode* bailout = new RegionNode(1);
3867     record_for_igvn(bailout);
3868 
3869     // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
3870     // Bail out if that is so.
3871     // Inline type array may have object field that would require a
3872     // write barrier. Conservatively, go to slow path.
3873     // TODO 8251971: Optimize for the case when flat src/dst are later found
3874     // to not contain oops (i.e., move this check to the macro expansion phase).
3875     BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
3876     const TypeAryPtr* orig_t = _gvn.type(original)->isa_aryptr();
3877     ciKlass* klass = _gvn.type(klass_node)->is_klassptr()->klass();
3878     bool exclude_flat = UseFlatArray && bs->array_copy_requires_gc_barriers(true, T_OBJECT, false, false, BarrierSetC2::Parsing) &&
3879                         // Can src array be flat and contain oops?
3880                         (orig_t == NULL || (!orig_t->is_not_flat() && (!orig_t->is_flat() || orig_t->elem()->inline_klass()->contains_oops()))) &&
3881                         // Can dest array be flat and contain oops?
3882                         klass->can_be_inline_array_klass() && (!klass->is_flat_array_klass() || klass->as_flat_array_klass()->element_klass()->as_inline_klass()->contains_oops());
3883     Node* not_objArray = exclude_flat ? generate_non_objArray_guard(klass_node, bailout) : generate_typeArray_guard(klass_node, bailout);
3884     if (not_objArray != NULL) {
3885       // Improve the klass node's type from the new optimistic assumption:
3886       ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
3887       const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, Type::Offset(0));
3888       Node* cast = new CastPPNode(klass_node, akls);
3889       cast->init_req(0, control());
3890       klass_node = _gvn.transform(cast);
3891     }
3892 
3893     Node* original_kls = load_object_klass(original);
3894     // ArrayCopyNode:Ideal may transform the ArrayCopyNode to
3895     // loads/stores but it is legal only if we're sure the
3896     // Arrays.copyOf would succeed. So we need all input arguments
3897     // to the copyOf to be validated, including that the copy to the
3898     // new array won't trigger an ArrayStoreException. That subtype
3899     // check can be optimized if we know something on the type of
3900     // the input array from type speculation.
3901     if (_gvn.type(klass_node)->singleton() && !stopped()) {
3902       ciKlass* subk   = _gvn.type(original_kls)->is_klassptr()->klass();
3903       ciKlass* superk = _gvn.type(klass_node)->is_klassptr()->klass();
3904 
3905       int test = C->static_subtype_check(superk, subk);
3906       if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
3907         const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();
3908         if (t_original->speculative_type() != NULL) {
3909           original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true);
3910           original_kls = load_object_klass(original);
3911         }
3912       }
3913     }
3914 
3915     // Bail out if either start or end is negative.
3916     generate_negative_guard(start, bailout, &start);
3917     generate_negative_guard(end,   bailout, &end);
3918 
3919     Node* length = end;
3920     if (_gvn.type(start) != TypeInt::ZERO) {
3921       length = _gvn.transform(new SubINode(end, start));
3922     }
3923 
3924     // Bail out if length is negative.
3925     // Without this the new_array would throw
3926     // NegativeArraySizeException but IllegalArgumentException is what
3927     // should be thrown
3928     generate_negative_guard(length, bailout, &length);
3929 
3930     // Handle inline type arrays
3931     bool can_validate = !too_many_traps(Deoptimization::Reason_class_check);
3932     if (!stopped()) {
3933       orig_t = _gvn.type(original)->isa_aryptr();
3934       if (orig_t != NULL && orig_t->is_flat()) {
3935         // Src is flat, check that dest is flat as well
3936         if (exclude_flat) {
3937           // Dest can't be flat, bail out
3938           bailout->add_req(control());
3939           set_control(top());
3940         } else {
3941           generate_fair_guard(flat_array_test(klass_node, /* flat = */ false), bailout);
3942         }
3943       } else if (UseFlatArray && (orig_t == NULL || !orig_t->is_not_flat()) &&
3944                  // If dest is flat, src must be flat as well (guaranteed by src <: dest check if validated).
3945                  ((!klass->is_flat_array_klass() && klass->can_be_inline_array_klass()) || !can_validate)) {
3946         // Src might be flat and dest might not be flat. Go to the slow path if src is flat.
3947         // TODO 8251971: Optimize for the case when src/dest are later found to be both flat.
3948         generate_fair_guard(flat_array_test(original_kls), bailout);
3949         if (orig_t != NULL) {
3950           orig_t = orig_t->cast_to_not_flat();
3951           original = _gvn.transform(new CheckCastPPNode(control(), original, orig_t));
3952         }
3953       }
3954       if (!can_validate) {
3955         // No validation. The subtype check emitted at macro expansion time will not go to the slow
3956         // path but call checkcast_arraycopy which can not handle flat/null-free inline type arrays.
3957         // TODO 8251971: Optimize for the case when src/dest are later found to be both flat/null-free.
3958         generate_fair_guard(null_free_array_test(klass_node), bailout);
3959       }
3960     }
3961 
3962     if (bailout->req() > 1) {
3963       PreserveJVMState pjvms(this);
3964       set_control(_gvn.transform(bailout));
3965       uncommon_trap(Deoptimization::Reason_intrinsic,
3966                     Deoptimization::Action_maybe_recompile);
3967     }
3968 
3969     if (!stopped()) {
3970       // How many elements will we copy from the original?
3971       // The answer is MinI(orig_length - start, length).
3972       Node* orig_tail = _gvn.transform(new SubINode(orig_length, start));
3973       Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
3974 
3975       // Generate a direct call to the right arraycopy function(s).
3976       // We know the copy is disjoint but we might not know if the
3977       // oop stores need checking.
3978       // Extreme case:  Arrays.copyOf((Integer[])x, 10, String[].class).
3979       // This will fail a store-check if x contains any non-nulls.
3980 




















3981       bool validated = false;
3982       // Reason_class_check rather than Reason_intrinsic because we
3983       // want to intrinsify even if this traps.
3984       if (can_validate) {
3985         Node* not_subtype_ctrl = gen_subtype_check(original, klass_node);
3986 
3987         if (not_subtype_ctrl != top()) {
3988           PreserveJVMState pjvms(this);
3989           set_control(not_subtype_ctrl);
3990           uncommon_trap(Deoptimization::Reason_class_check,
3991                         Deoptimization::Action_make_not_entrant);
3992           assert(stopped(), "Should be stopped");
3993         }
3994         validated = true;
3995       }
3996 
3997       if (!stopped()) {
3998         newcopy = new_array(klass_node, length, 0);  // no arguments to push
3999 
4000         ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true, false,
4001                                                 original_kls, klass_node);
4002         if (!is_copyOfRange) {
4003           ac->set_copyof(validated);
4004         } else {
4005           ac->set_copyofrange(validated);
4006         }
4007         Node* n = _gvn.transform(ac);
4008         if (n == ac) {
4009           ac->connect_outputs(this);
4010         } else {
4011           assert(validated, "shouldn't transform if all arguments not validated");
4012           set_all_memory(n);
4013         }
4014       }
4015     }
4016   } // original reexecute is set back here
4017 
4018   C->set_has_split_ifs(true); // Has chance for split-if optimization
4019   if (!stopped()) {
4020     set_result(newcopy);
4021   }

4103   set_edges_for_java_call(slow_call);
4104   return slow_call;
4105 }
4106 
4107 
4108 /**
4109  * Build special case code for calls to hashCode on an object. This call may
4110  * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
4111  * slightly different code.
4112  */
4113 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
4114   assert(is_static == callee()->is_static(), "correct intrinsic selection");
4115   assert(!(is_virtual && is_static), "either virtual, special, or static");
4116 
4117   enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
4118 
4119   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4120   PhiNode*    result_val = new PhiNode(result_reg, TypeInt::INT);
4121   PhiNode*    result_io  = new PhiNode(result_reg, Type::ABIO);
4122   PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4123   Node* obj = argument(0);
4124 
4125   if (obj->is_InlineType() || gvn().type(obj)->is_inlinetypeptr()) {
4126     return false;
4127   }
4128 
4129   if (!is_static) {
4130     // Check for hashing null object
4131     obj = null_check_receiver();
4132     if (stopped())  return true;        // unconditionally null
4133     result_reg->init_req(_null_path, top());
4134     result_val->init_req(_null_path, top());
4135   } else {
4136     // Do a null check, and return zero if null.
4137     // System.identityHashCode(null) == 0

4138     Node* null_ctl = top();
4139     obj = null_check_oop(obj, &null_ctl);
4140     result_reg->init_req(_null_path, null_ctl);
4141     result_val->init_req(_null_path, _gvn.intcon(0));
4142   }
4143 
4144   // Unconditionally null?  Then return right away.
4145   if (stopped()) {
4146     set_control( result_reg->in(_null_path));
4147     if (!stopped())
4148       set_result(result_val->in(_null_path));
4149     return true;
4150   }
4151 
4152   // We only go to the fast case code if we pass a number of guards.  The
4153   // paths which do not pass are accumulated in the slow_region.
4154   RegionNode* slow_region = new RegionNode(1);
4155   record_for_igvn(slow_region);
4156 
4157   // If this is a virtual call, we generate a funny guard.  We pull out
4158   // the vtable entry corresponding to hashCode() from the target object.
4159   // If the target method which we are calling happens to be the native
4160   // Object hashCode() method, we pass the guard.  We do not need this
4161   // guard for non-virtual calls -- the caller is known to be the native
4162   // Object hashCode().
4163   if (is_virtual) {
4164     // After null check, get the object's klass.
4165     Node* obj_klass = load_object_klass(obj);
4166     generate_virtual_guard(obj_klass, slow_region);
4167   }
4168 
4169   // Get the header out of the object, use LoadMarkNode when available
4170   Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
4171   // The control of the load must be NULL. Otherwise, the load can move before
4172   // the null check after castPP removal.
4173   Node* no_ctrl = NULL;
4174   Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
4175 
4176   // Test the header to see if it is unlocked.
4177   // This also serves as guard against inline types
4178   Node *lock_mask      = _gvn.MakeConX(markWord::inline_type_mask_in_place);
4179   Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
4180   Node *unlocked_val   = _gvn.MakeConX(markWord::unlocked_value);
4181   Node *chk_unlocked   = _gvn.transform(new CmpXNode( lmasked_header, unlocked_val));
4182   Node *test_unlocked  = _gvn.transform(new BoolNode( chk_unlocked, BoolTest::ne));
4183 
4184   generate_slow_guard(test_unlocked, slow_region);
4185 
4186   // Get the hash value and check to see that it has been properly assigned.
4187   // We depend on hash_mask being at most 32 bits and avoid the use of
4188   // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
4189   // vm: see markWord.hpp.
4190   Node *hash_mask      = _gvn.intcon(markWord::hash_mask);
4191   Node *hash_shift     = _gvn.intcon(markWord::hash_shift);
4192   Node *hshifted_header= _gvn.transform(new URShiftXNode(header, hash_shift));
4193   // This hack lets the hash bits live anywhere in the mark object now, as long
4194   // as the shift drops the relevant bits into the low 32 bits.  Note that
4195   // Java spec says that HashCode is an int so there's no point in capturing
4196   // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build).
4197   hshifted_header      = ConvX2I(hshifted_header);
4198   Node *hash_val       = _gvn.transform(new AndINode(hshifted_header, hash_mask));

4224     // this->control() comes from set_results_for_java_call
4225     result_reg->init_req(_slow_path, control());
4226     result_val->init_req(_slow_path, slow_result);
4227     result_io  ->set_req(_slow_path, i_o());
4228     result_mem ->set_req(_slow_path, reset_memory());
4229   }
4230 
4231   // Return the combined state.
4232   set_i_o(        _gvn.transform(result_io)  );
4233   set_all_memory( _gvn.transform(result_mem));
4234 
4235   set_result(result_reg, result_val);
4236   return true;
4237 }
4238 
4239 //---------------------------inline_native_getClass----------------------------
4240 // public final native Class<?> java.lang.Object.getClass();
4241 //
4242 // Build special case code for calls to getClass on an object.
4243 bool LibraryCallKit::inline_native_getClass() {
4244   Node* obj = argument(0);
4245   if (obj->is_InlineTypeBase()) {
4246     const Type* t = _gvn.type(obj);
4247     if (t->maybe_null()) {
4248       null_check(obj);
4249     }
4250     set_result(makecon(TypeInstPtr::make(t->inline_klass()->java_mirror())));
4251     return true;
4252   }
4253   obj = null_check_receiver();
4254   if (stopped())  return true;
4255   set_result(load_mirror_from_klass(load_object_klass(obj)));
4256   return true;
4257 }
4258 
4259 //-----------------inline_native_Reflection_getCallerClass---------------------
4260 // public static native Class<?> sun.reflect.Reflection.getCallerClass();
4261 //
4262 // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
4263 //
4264 // NOTE: This code must perform the same logic as JVM_GetCallerClass
4265 // in that it must skip particular security frames and checks for
4266 // caller sensitive methods.
4267 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
4268 #ifndef PRODUCT
4269   if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4270     tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
4271   }
4272 #endif
4273 

4571 //  not cloneable or finalizer => slow path to out-of-line Object.clone
4572 //
4573 // The general case has two steps, allocation and copying.
4574 // Allocation has two cases, and uses GraphKit::new_instance or new_array.
4575 //
4576 // Copying also has two cases, oop arrays and everything else.
4577 // Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
4578 // Everything else uses the tight inline loop supplied by CopyArrayNode.
4579 //
4580 // These steps fold up nicely if and when the cloned object's klass
4581 // can be sharply typed as an object array, a type array, or an instance.
4582 //
4583 bool LibraryCallKit::inline_native_clone(bool is_virtual) {
4584   PhiNode* result_val;
4585 
4586   // Set the reexecute bit for the interpreter to reexecute
4587   // the bytecode that invokes Object.clone if deoptimization happens.
4588   { PreserveReexecuteState preexecs(this);
4589     jvms()->set_should_reexecute(true);
4590 
4591     Node* obj = argument(0);
4592     if (obj->is_InlineType()) {
4593       return false;
4594     }
4595 
4596     obj = null_check_receiver();
4597     if (stopped())  return true;
4598 
4599     const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
4600 
4601     // If we are going to clone an instance, we need its exact type to
4602     // know the number and types of fields to convert the clone to
4603     // loads/stores. Maybe a speculative type can help us.
4604     if (!obj_type->klass_is_exact() &&
4605         obj_type->speculative_type() != NULL &&
4606         obj_type->speculative_type()->is_instance_klass() &&
4607         !obj_type->speculative_type()->is_inlinetype()) {
4608       ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass();
4609       if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem &&
4610           !spec_ik->has_injected_fields()) {
4611         ciKlass* k = obj_type->klass();
4612         if (!k->is_instance_klass() ||
4613             k->as_instance_klass()->is_interface() ||
4614             k->as_instance_klass()->has_subklass()) {
4615           obj = maybe_cast_profiled_obj(obj, obj_type->speculative_type(), false);
4616         }
4617       }
4618     }
4619 
4620     // Conservatively insert a memory barrier on all memory slices.
4621     // Do not let writes into the original float below the clone.
4622     insert_mem_bar(Op_MemBarCPUOrder);
4623 
4624     // paths into result_reg:
4625     enum {
4626       _slow_path = 1,     // out-of-line call to clone method (virtual or not)
4627       _objArray_path,     // plain array allocation, plus arrayof_oop_arraycopy
4628       _array_path,        // plain array allocation, plus arrayof_long_arraycopy
4629       _instance_path,     // plain instance allocation, plus arrayof_long_arraycopy
4630       PATH_LIMIT
4631     };
4632     RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4633     result_val             = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
4634     PhiNode*    result_i_o = new PhiNode(result_reg, Type::ABIO);
4635     PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4636     record_for_igvn(result_reg);
4637 
4638     Node* obj_klass = load_object_klass(obj);
4639     // We only go to the fast case code if we pass a number of guards.
4640     // The paths which do not pass are accumulated in the slow_region.
4641     RegionNode* slow_region = new RegionNode(1);
4642     record_for_igvn(slow_region);
4643 
4644     Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL);
4645     if (array_ctl != NULL) {
4646       // It's an array.
4647       PreserveJVMState pjvms(this);
4648       set_control(array_ctl);



4649 
4650       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
4651       const TypeAryPtr* ary_ptr = obj_type->isa_aryptr();
4652       if (UseFlatArray && bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Expansion) &&
4653           obj_type->klass()->can_be_inline_array_klass() &&
4654           (ary_ptr == NULL || (!ary_ptr->is_not_flat() && (!ary_ptr->is_flat() || ary_ptr->elem()->inline_klass()->contains_oops())))) {
4655         // Flattened inline type array may have object field that would require a
4656         // write barrier. Conservatively, go to slow path.
4657         generate_fair_guard(flat_array_test(obj_klass), slow_region);













4658       }







4659 
4660       if (!stopped()) {
4661         Node* obj_length = load_array_length(obj);
4662         Node* obj_size  = NULL;
4663         Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size, /*deoptimize_on_exception=*/true);
4664 
4665         BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
4666         if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Parsing)) {
4667           // If it is an oop array, it requires very special treatment,
4668           // because gc barriers are required when accessing the array.
4669           Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL);
4670           if (is_obja != NULL) {
4671             PreserveJVMState pjvms2(this);
4672             set_control(is_obja);
4673             // Generate a direct call to the right arraycopy function(s).
4674             // Clones are always tightly coupled.
4675             ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, true, false);
4676             ac->set_clone_oop_array();
4677             Node* n = _gvn.transform(ac);
4678             assert(n == ac, "cannot disappear");
4679             ac->connect_outputs(this, /*deoptimize_on_exception=*/true);
4680 
4681             result_reg->init_req(_objArray_path, control());
4682             result_val->init_req(_objArray_path, alloc_obj);
4683             result_i_o ->set_req(_objArray_path, i_o());
4684             result_mem ->set_req(_objArray_path, reset_memory());
4685           }
4686         }
4687         // Otherwise, there are no barriers to worry about.
4688         // (We can dispense with card marks if we know the allocation
4689         //  comes out of eden (TLAB)...  In fact, ReduceInitialCardMarks
4690         //  causes the non-eden paths to take compensating steps to
4691         //  simulate a fresh allocation, so that no further
4692         //  card marks are required in compiled code to initialize
4693         //  the object.)
4694 
4695         if (!stopped()) {
4696           copy_to_clone(obj, alloc_obj, obj_size, true);
4697 
4698           // Present the results of the copy.
4699           result_reg->init_req(_array_path, control());
4700           result_val->init_req(_array_path, alloc_obj);
4701           result_i_o ->set_req(_array_path, i_o());
4702           result_mem ->set_req(_array_path, reset_memory());
4703         }
4704       }
4705     }
4706 




4707     if (!stopped()) {
4708       // It's an instance (we did array above).  Make the slow-path tests.
4709       // If this is a virtual call, we generate a funny guard.  We grab
4710       // the vtable entry corresponding to clone() from the target object.
4711       // If the target method which we are calling happens to be the
4712       // Object clone() method, we pass the guard.  We do not need this
4713       // guard for non-virtual calls; the caller is known to be the native
4714       // Object clone().
4715       if (is_virtual) {
4716         generate_virtual_guard(obj_klass, slow_region);
4717       }
4718 
4719       // The object must be easily cloneable and must not have a finalizer.
4720       // Both of these conditions may be checked in a single test.
4721       // We could optimize the test further, but we don't care.
4722       generate_access_flags_guard(obj_klass,
4723                                   // Test both conditions:
4724                                   JVM_ACC_IS_CLONEABLE_FAST | JVM_ACC_HAS_FINALIZER,
4725                                   // Must be cloneable but not finalizer:
4726                                   JVM_ACC_IS_CLONEABLE_FAST,

4847 // array in the heap that GCs wouldn't expect. Move the allocation
4848 // after the traps so we don't allocate the array if we
4849 // deoptimize. This is possible because tightly_coupled_allocation()
4850 // guarantees there's no observer of the allocated array at this point
4851 // and the control flow is simple enough.
4852 void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms,
4853                                                     int saved_reexecute_sp, uint new_idx) {
4854   if (saved_jvms != NULL && !stopped()) {
4855     assert(alloc != NULL, "only with a tightly coupled allocation");
4856     // restore JVM state to the state at the arraycopy
4857     saved_jvms->map()->set_control(map()->control());
4858     assert(saved_jvms->map()->memory() == map()->memory(), "memory state changed?");
4859     assert(saved_jvms->map()->i_o() == map()->i_o(), "IO state changed?");
4860     // If we've improved the types of some nodes (null check) while
4861     // emitting the guards, propagate them to the current state
4862     map()->replaced_nodes().apply(saved_jvms->map(), new_idx);
4863     set_jvms(saved_jvms);
4864     _reexecute_sp = saved_reexecute_sp;
4865 
4866     // Remove the allocation from above the guards
4867     CallProjections* callprojs = alloc->extract_projections(true);

4868     InitializeNode* init = alloc->initialization();
4869     Node* alloc_mem = alloc->in(TypeFunc::Memory);
4870     C->gvn_replace_by(callprojs->fallthrough_ioproj, alloc->in(TypeFunc::I_O));
4871     C->gvn_replace_by(init->proj_out(TypeFunc::Memory), alloc_mem);
4872 
4873     // The CastIINode created in GraphKit::new_array (in AllocateArrayNode::make_ideal_length) must stay below
4874     // the allocation (i.e. is only valid if the allocation succeeds):
4875     // 1) replace CastIINode with AllocateArrayNode's length here
4876     // 2) Create CastIINode again once allocation has moved (see below) at the end of this method
4877     //
4878     // Multiple identical CastIINodes might exist here. Each GraphKit::load_array_length() call will generate
4879     // new separate CastIINode (arraycopy guard checks or any array length use between array allocation and ararycopy)
4880     Node* init_control = init->proj_out(TypeFunc::Control);
4881     Node* alloc_length = alloc->Ideal_length();
4882 #ifdef ASSERT
4883     Node* prev_cast = NULL;
4884 #endif
4885     for (uint i = 0; i < init_control->outcnt(); i++) {
4886       Node* init_out = init_control->raw_out(i);
4887       if (init_out->is_CastII() && init_out->in(TypeFunc::Control) == init_control && init_out->in(1) == alloc_length) {
4888 #ifdef ASSERT
4889         if (prev_cast == NULL) {
4890           prev_cast = init_out;

4892           if (prev_cast->cmp(*init_out) == false) {
4893             prev_cast->dump();
4894             init_out->dump();
4895             assert(false, "not equal CastIINode");
4896           }
4897         }
4898 #endif
4899         C->gvn_replace_by(init_out, alloc_length);
4900       }
4901     }
4902     C->gvn_replace_by(init->proj_out(TypeFunc::Control), alloc->in(0));
4903 
4904     // move the allocation here (after the guards)
4905     _gvn.hash_delete(alloc);
4906     alloc->set_req(TypeFunc::Control, control());
4907     alloc->set_req(TypeFunc::I_O, i_o());
4908     Node *mem = reset_memory();
4909     set_all_memory(mem);
4910     alloc->set_req(TypeFunc::Memory, mem);
4911     set_control(init->proj_out_or_null(TypeFunc::Control));
4912     set_i_o(callprojs->fallthrough_ioproj);
4913 
4914     // Update memory as done in GraphKit::set_output_for_allocation()
4915     const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
4916     const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
4917     if (ary_type->isa_aryptr() && length_type != NULL) {
4918       ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
4919     }
4920     const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot);
4921     int            elemidx  = C->get_alias_index(telemref);
4922     set_memory(init->proj_out_or_null(TypeFunc::Memory), Compile::AliasIdxRaw);
4923     set_memory(init->proj_out_or_null(TypeFunc::Memory), elemidx);
4924 
4925     Node* allocx = _gvn.transform(alloc);
4926     assert(allocx == alloc, "where has the allocation gone?");
4927     assert(dest->is_CheckCastPP(), "not an allocation result?");
4928 
4929     _gvn.hash_delete(dest);
4930     dest->set_req(0, control());
4931     Node* destx = _gvn.transform(dest);
4932     assert(destx == dest, "where has the allocation result gone?");

5068       // Do we have the exact type of dest?
5069       bool could_have_dest = dest_spec;
5070       ciKlass* src_k = top_src->klass();
5071       ciKlass* dest_k = top_dest->klass();
5072       if (!src_spec) {
5073         src_k = src_type->speculative_type_not_null();
5074         if (src_k != NULL && src_k->is_array_klass()) {
5075           could_have_src = true;
5076         }
5077       }
5078       if (!dest_spec) {
5079         dest_k = dest_type->speculative_type_not_null();
5080         if (dest_k != NULL && dest_k->is_array_klass()) {
5081           could_have_dest = true;
5082         }
5083       }
5084       if (could_have_src && could_have_dest) {
5085         // If we can have both exact types, emit the missing guards
5086         if (could_have_src && !src_spec) {
5087           src = maybe_cast_profiled_obj(src, src_k, true);
5088           src_type = _gvn.type(src);
5089           top_src = src_type->isa_aryptr();
5090         }
5091         if (could_have_dest && !dest_spec) {
5092           dest = maybe_cast_profiled_obj(dest, dest_k, true);
5093           dest_type = _gvn.type(dest);
5094           top_dest = dest_type->isa_aryptr();
5095         }
5096       }
5097     }
5098   }
5099 
5100   ciMethod* trap_method = method();
5101   int trap_bci = bci();
5102   if (saved_jvms != NULL) {
5103     trap_method = alloc->jvms()->method();
5104     trap_bci = alloc->jvms()->bci();
5105   }
5106 
5107   bool negative_length_guard_generated = false;
5108 
5109   if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
5110       can_emit_guards && !src->is_top() && !dest->is_top()) {

5111     // validate arguments: enables transformation the ArrayCopyNode
5112     validated = true;
5113 
5114     RegionNode* slow_region = new RegionNode(1);
5115     record_for_igvn(slow_region);
5116 
5117     // (1) src and dest are arrays.
5118     generate_non_array_guard(load_object_klass(src), slow_region);
5119     generate_non_array_guard(load_object_klass(dest), slow_region);
5120 
5121     // (2) src and dest arrays must have elements of the same BasicType
5122     // done at macro expansion or at Ideal transformation time
5123 
5124     // (4) src_offset must not be negative.
5125     generate_negative_guard(src_offset, slow_region);
5126 
5127     // (5) dest_offset must not be negative.
5128     generate_negative_guard(dest_offset, slow_region);
5129 
5130     // (7) src_offset + length must not exceed length of src.

5133                          slow_region);
5134 
5135     // (8) dest_offset + length must not exceed length of dest.
5136     generate_limit_guard(dest_offset, length,
5137                          load_array_length(dest),
5138                          slow_region);
5139 
5140     // (6) length must not be negative.
5141     // This is also checked in generate_arraycopy() during macro expansion, but
5142     // we also have to check it here for the case where the ArrayCopyNode will
5143     // be eliminated by Escape Analysis.
5144     if (EliminateAllocations) {
5145       generate_negative_guard(length, slow_region);
5146       negative_length_guard_generated = true;
5147     }
5148 
5149     // (9) each element of an oop array must be assignable
5150     Node* dest_klass = load_object_klass(dest);
5151     if (src != dest) {
5152       Node* not_subtype_ctrl = gen_subtype_check(src, dest_klass);
5153       slow_region->add_req(not_subtype_ctrl);
5154     }
5155 
5156     const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr();
5157     const Type* toop = TypeOopPtr::make_from_klass(dest_klass_t->klass());
5158     src = _gvn.transform(new CheckCastPPNode(control(), src, toop));
5159     src_type = _gvn.type(src);
5160     top_src  = src_type->isa_aryptr();
5161 
5162     // Handle flat inline type arrays (null-free arrays are handled by the subtype check above)
5163     if (!stopped() && UseFlatArray) {
5164       // If dest is flat, src must be flat as well (guaranteed by src <: dest check). Handle flat src here.
5165       assert(top_dest == NULL || !top_dest->is_flat() || top_src->is_flat(), "src array must be flat");
5166       if (top_src != NULL && top_src->is_flat()) {
5167         // Src is flat, check that dest is flat as well
5168         if (top_dest != NULL && !top_dest->is_flat()) {
5169           generate_fair_guard(flat_array_test(dest_klass, /* flat = */ false), slow_region);
5170           // Since dest is flat and src <: dest, dest must have the same type as src.
5171           top_dest = TypeOopPtr::make_from_klass(top_src->klass())->isa_aryptr();
5172           assert(top_dest->is_flat(), "dest must be flat");
5173           dest = _gvn.transform(new CheckCastPPNode(control(), dest, top_dest));
5174         }
5175       } else if (top_src == NULL || !top_src->is_not_flat()) {
5176         // Src might be flat and dest might not be flat. Go to the slow path if src is flat.
5177         // TODO 8251971: Optimize for the case when src/dest are later found to be both flat.
5178         assert(top_dest == NULL || !top_dest->is_flat(), "dest array must not be flat");
5179         generate_fair_guard(flat_array_test(src), slow_region);
5180         if (top_src != NULL) {
5181           top_src = top_src->cast_to_not_flat();
5182           src = _gvn.transform(new CheckCastPPNode(control(), src, top_src));
5183         }
5184       }
5185     }
5186 
5187     {
5188       PreserveJVMState pjvms(this);
5189       set_control(_gvn.transform(slow_region));
5190       uncommon_trap(Deoptimization::Reason_intrinsic,
5191                     Deoptimization::Action_make_not_entrant);
5192       assert(stopped(), "Should be stopped");
5193     }




5194   }
5195 
5196   arraycopy_move_allocation_here(alloc, dest, saved_jvms, saved_reexecute_sp, new_idx);
5197 
5198   if (stopped()) {
5199     return true;
5200   }
5201 
5202   ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != NULL, negative_length_guard_generated,
5203                                           // Create LoadRange and LoadKlass nodes for use during macro expansion here
5204                                           // so the compiler has a chance to eliminate them: during macro expansion,
5205                                           // we have to set their control (CastPP nodes are eliminated).
5206                                           load_object_klass(src), load_object_klass(dest),
5207                                           load_array_length(src), load_array_length(dest));
5208 
5209   ac->set_arraycopy(validated);
5210 
5211   Node* n = _gvn.transform(ac);
5212   if (n == ac) {
5213     ac->connect_outputs(this);
< prev index next >