< prev index next >

src/hotspot/share/opto/library_call.cpp

Print this page

   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"

  27 #include "ci/ciUtilities.inline.hpp"
  28 #include "classfile/vmIntrinsics.hpp"
  29 #include "compiler/compileBroker.hpp"
  30 #include "compiler/compileLog.hpp"
  31 #include "gc/shared/barrierSet.hpp"
  32 #include "jfr/support/jfrIntrinsics.hpp"
  33 #include "memory/resourceArea.hpp"
  34 #include "oops/klass.inline.hpp"
  35 #include "oops/objArrayKlass.hpp"
  36 #include "opto/addnode.hpp"
  37 #include "opto/arraycopynode.hpp"
  38 #include "opto/c2compiler.hpp"
  39 #include "opto/castnode.hpp"
  40 #include "opto/cfgnode.hpp"
  41 #include "opto/convertnode.hpp"
  42 #include "opto/countbitsnode.hpp"
  43 #include "opto/idealKit.hpp"
  44 #include "opto/library_call.hpp"
  45 #include "opto/mathexactnode.hpp"
  46 #include "opto/mulnode.hpp"

 303   case vmIntrinsics::_indexOfUL:                return inline_string_indexOf(StrIntrinsicNode::UL);
 304   case vmIntrinsics::_indexOfIL:                return inline_string_indexOfI(StrIntrinsicNode::LL);
 305   case vmIntrinsics::_indexOfIU:                return inline_string_indexOfI(StrIntrinsicNode::UU);
 306   case vmIntrinsics::_indexOfIUL:               return inline_string_indexOfI(StrIntrinsicNode::UL);
 307   case vmIntrinsics::_indexOfU_char:            return inline_string_indexOfChar(StrIntrinsicNode::U);
 308   case vmIntrinsics::_indexOfL_char:            return inline_string_indexOfChar(StrIntrinsicNode::L);
 309 
 310   case vmIntrinsics::_equalsL:                  return inline_string_equals(StrIntrinsicNode::LL);
 311   case vmIntrinsics::_equalsU:                  return inline_string_equals(StrIntrinsicNode::UU);
 312 
 313   case vmIntrinsics::_toBytesStringU:           return inline_string_toBytesU();
 314   case vmIntrinsics::_getCharsStringU:          return inline_string_getCharsU();
 315   case vmIntrinsics::_getCharStringU:           return inline_string_char_access(!is_store);
 316   case vmIntrinsics::_putCharStringU:           return inline_string_char_access( is_store);
 317 
 318   case vmIntrinsics::_compressStringC:
 319   case vmIntrinsics::_compressStringB:          return inline_string_copy( is_compress);
 320   case vmIntrinsics::_inflateStringC:
 321   case vmIntrinsics::_inflateStringB:           return inline_string_copy(!is_compress);
 322 


 323   case vmIntrinsics::_getReference:             return inline_unsafe_access(!is_store, T_OBJECT,   Relaxed, false);
 324   case vmIntrinsics::_getBoolean:               return inline_unsafe_access(!is_store, T_BOOLEAN,  Relaxed, false);
 325   case vmIntrinsics::_getByte:                  return inline_unsafe_access(!is_store, T_BYTE,     Relaxed, false);
 326   case vmIntrinsics::_getShort:                 return inline_unsafe_access(!is_store, T_SHORT,    Relaxed, false);
 327   case vmIntrinsics::_getChar:                  return inline_unsafe_access(!is_store, T_CHAR,     Relaxed, false);
 328   case vmIntrinsics::_getInt:                   return inline_unsafe_access(!is_store, T_INT,      Relaxed, false);
 329   case vmIntrinsics::_getLong:                  return inline_unsafe_access(!is_store, T_LONG,     Relaxed, false);
 330   case vmIntrinsics::_getFloat:                 return inline_unsafe_access(!is_store, T_FLOAT,    Relaxed, false);
 331   case vmIntrinsics::_getDouble:                return inline_unsafe_access(!is_store, T_DOUBLE,   Relaxed, false);

 332 
 333   case vmIntrinsics::_putReference:             return inline_unsafe_access( is_store, T_OBJECT,   Relaxed, false);
 334   case vmIntrinsics::_putBoolean:               return inline_unsafe_access( is_store, T_BOOLEAN,  Relaxed, false);
 335   case vmIntrinsics::_putByte:                  return inline_unsafe_access( is_store, T_BYTE,     Relaxed, false);
 336   case vmIntrinsics::_putShort:                 return inline_unsafe_access( is_store, T_SHORT,    Relaxed, false);
 337   case vmIntrinsics::_putChar:                  return inline_unsafe_access( is_store, T_CHAR,     Relaxed, false);
 338   case vmIntrinsics::_putInt:                   return inline_unsafe_access( is_store, T_INT,      Relaxed, false);
 339   case vmIntrinsics::_putLong:                  return inline_unsafe_access( is_store, T_LONG,     Relaxed, false);
 340   case vmIntrinsics::_putFloat:                 return inline_unsafe_access( is_store, T_FLOAT,    Relaxed, false);
 341   case vmIntrinsics::_putDouble:                return inline_unsafe_access( is_store, T_DOUBLE,   Relaxed, false);

 342 
 343   case vmIntrinsics::_getReferenceVolatile:     return inline_unsafe_access(!is_store, T_OBJECT,   Volatile, false);
 344   case vmIntrinsics::_getBooleanVolatile:       return inline_unsafe_access(!is_store, T_BOOLEAN,  Volatile, false);
 345   case vmIntrinsics::_getByteVolatile:          return inline_unsafe_access(!is_store, T_BYTE,     Volatile, false);
 346   case vmIntrinsics::_getShortVolatile:         return inline_unsafe_access(!is_store, T_SHORT,    Volatile, false);
 347   case vmIntrinsics::_getCharVolatile:          return inline_unsafe_access(!is_store, T_CHAR,     Volatile, false);
 348   case vmIntrinsics::_getIntVolatile:           return inline_unsafe_access(!is_store, T_INT,      Volatile, false);
 349   case vmIntrinsics::_getLongVolatile:          return inline_unsafe_access(!is_store, T_LONG,     Volatile, false);
 350   case vmIntrinsics::_getFloatVolatile:         return inline_unsafe_access(!is_store, T_FLOAT,    Volatile, false);
 351   case vmIntrinsics::_getDoubleVolatile:        return inline_unsafe_access(!is_store, T_DOUBLE,   Volatile, false);
 352 
 353   case vmIntrinsics::_putReferenceVolatile:     return inline_unsafe_access( is_store, T_OBJECT,   Volatile, false);
 354   case vmIntrinsics::_putBooleanVolatile:       return inline_unsafe_access( is_store, T_BOOLEAN,  Volatile, false);
 355   case vmIntrinsics::_putByteVolatile:          return inline_unsafe_access( is_store, T_BYTE,     Volatile, false);
 356   case vmIntrinsics::_putShortVolatile:         return inline_unsafe_access( is_store, T_SHORT,    Volatile, false);
 357   case vmIntrinsics::_putCharVolatile:          return inline_unsafe_access( is_store, T_CHAR,     Volatile, false);
 358   case vmIntrinsics::_putIntVolatile:           return inline_unsafe_access( is_store, T_INT,      Volatile, false);
 359   case vmIntrinsics::_putLongVolatile:          return inline_unsafe_access( is_store, T_LONG,     Volatile, false);
 360   case vmIntrinsics::_putFloatVolatile:         return inline_unsafe_access( is_store, T_FLOAT,    Volatile, false);
 361   case vmIntrinsics::_putDoubleVolatile:        return inline_unsafe_access( is_store, T_DOUBLE,   Volatile, false);

 490   case vmIntrinsics::_equalsB:                  return inline_array_equals(StrIntrinsicNode::LL);
 491   case vmIntrinsics::_equalsC:                  return inline_array_equals(StrIntrinsicNode::UU);
 492   case vmIntrinsics::_Preconditions_checkIndex: return inline_preconditions_checkIndex(T_INT);
 493   case vmIntrinsics::_Preconditions_checkLongIndex: return inline_preconditions_checkIndex(T_LONG);
 494   case vmIntrinsics::_clone:                    return inline_native_clone(intrinsic()->is_virtual());
 495 
 496   case vmIntrinsics::_allocateUninitializedArray: return inline_unsafe_newArray(true);
 497   case vmIntrinsics::_newArray:                   return inline_unsafe_newArray(false);
 498 
 499   case vmIntrinsics::_isAssignableFrom:         return inline_native_subtype_check();
 500 
 501   case vmIntrinsics::_isInstance:
 502   case vmIntrinsics::_getModifiers:
 503   case vmIntrinsics::_isInterface:
 504   case vmIntrinsics::_isArray:
 505   case vmIntrinsics::_isPrimitive:
 506   case vmIntrinsics::_isHidden:
 507   case vmIntrinsics::_getSuperclass:
 508   case vmIntrinsics::_getClassAccessFlags:      return inline_native_Class_query(intrinsic_id());
 509 



 510   case vmIntrinsics::_floatToRawIntBits:
 511   case vmIntrinsics::_floatToIntBits:
 512   case vmIntrinsics::_intBitsToFloat:
 513   case vmIntrinsics::_doubleToRawLongBits:
 514   case vmIntrinsics::_doubleToLongBits:
 515   case vmIntrinsics::_longBitsToDouble:         return inline_fp_conversions(intrinsic_id());
 516 
 517   case vmIntrinsics::_numberOfLeadingZeros_i:
 518   case vmIntrinsics::_numberOfLeadingZeros_l:
 519   case vmIntrinsics::_numberOfTrailingZeros_i:
 520   case vmIntrinsics::_numberOfTrailingZeros_l:
 521   case vmIntrinsics::_bitCount_i:
 522   case vmIntrinsics::_bitCount_l:
 523   case vmIntrinsics::_reverseBytes_i:
 524   case vmIntrinsics::_reverseBytes_l:
 525   case vmIntrinsics::_reverseBytes_s:
 526   case vmIntrinsics::_reverseBytes_c:           return inline_number_methods(intrinsic_id());
 527 
 528   case vmIntrinsics::_getCallerClass:           return inline_native_Reflection_getCallerClass();
 529 

2146   case vmIntrinsics::_numberOfLeadingZeros_l:   n = new CountLeadingZerosLNode( arg);  break;
2147   case vmIntrinsics::_numberOfTrailingZeros_i:  n = new CountTrailingZerosINode(arg);  break;
2148   case vmIntrinsics::_numberOfTrailingZeros_l:  n = new CountTrailingZerosLNode(arg);  break;
2149   case vmIntrinsics::_bitCount_i:               n = new PopCountINode(          arg);  break;
2150   case vmIntrinsics::_bitCount_l:               n = new PopCountLNode(          arg);  break;
2151   case vmIntrinsics::_reverseBytes_c:           n = new ReverseBytesUSNode(0,   arg);  break;
2152   case vmIntrinsics::_reverseBytes_s:           n = new ReverseBytesSNode( 0,   arg);  break;
2153   case vmIntrinsics::_reverseBytes_i:           n = new ReverseBytesINode( 0,   arg);  break;
2154   case vmIntrinsics::_reverseBytes_l:           n = new ReverseBytesLNode( 0,   arg);  break;
2155   default:  fatal_unexpected_iid(id);  break;
2156   }
2157   set_result(_gvn.transform(n));
2158   return true;
2159 }
2160 
2161 //----------------------------inline_unsafe_access----------------------------
2162 
2163 const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type) {
2164   // Attempt to infer a sharper value type from the offset and base type.
2165   ciKlass* sharpened_klass = NULL;

2166 
2167   // See if it is an instance field, with an object type.
2168   if (alias_type->field() != NULL) {
2169     if (alias_type->field()->type()->is_klass()) {
2170       sharpened_klass = alias_type->field()->type()->as_klass();

2171     }
2172   }
2173 
2174   // See if it is a narrow oop array.
2175   if (adr_type->isa_aryptr()) {
2176     if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
2177       const TypeOopPtr* elem_type = adr_type->is_aryptr()->elem()->make_oopptr();

2178       if (elem_type != NULL) {
2179         sharpened_klass = elem_type->klass();
2180       }
2181     }
2182   }
2183 
2184   // The sharpened class might be unloaded if there is no class loader
2185   // contraint in place.
2186   if (sharpened_klass != NULL && sharpened_klass->is_loaded()) {
2187     const TypeOopPtr* tjp = TypeOopPtr::make_from_klass(sharpened_klass);



2188 
2189 #ifndef PRODUCT
2190     if (C->print_intrinsics() || C->print_inlining()) {
2191       tty->print("  from base type:  ");  adr_type->dump(); tty->cr();
2192       tty->print("  sharpened value: ");  tjp->dump();      tty->cr();
2193     }
2194 #endif
2195     // Sharpen the value type.
2196     return tjp;
2197   }
2198   return NULL;
2199 }
2200 
2201 DecoratorSet LibraryCallKit::mo_decorator_for_access_kind(AccessKind kind) {
2202   switch (kind) {
2203       case Relaxed:
2204         return MO_UNORDERED;
2205       case Opaque:
2206         return MO_RELAXED;
2207       case Acquire:

2223   guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
2224   assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
2225 
2226   if (is_reference_type(type)) {
2227     decorators |= ON_UNKNOWN_OOP_REF;
2228   }
2229 
2230   if (unaligned) {
2231     decorators |= C2_UNALIGNED;
2232   }
2233 
2234 #ifndef PRODUCT
2235   {
2236     ResourceMark rm;
2237     // Check the signatures.
2238     ciSignature* sig = callee()->signature();
2239 #ifdef ASSERT
2240     if (!is_store) {
2241       // Object getReference(Object base, int/long offset), etc.
2242       BasicType rtype = sig->return_type()->basic_type();
2243       assert(rtype == type, "getter must return the expected value");
2244       assert(sig->count() == 2, "oop getter has 2 arguments");
2245       assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2246       assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2247     } else {
2248       // void putReference(Object base, int/long offset, Object x), etc.
2249       assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
2250       assert(sig->count() == 3, "oop putter has 3 arguments");
2251       assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
2252       assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
2253       BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
2254       assert(vtype == type, "putter must accept the expected value");
2255     }
2256 #endif // ASSERT
2257  }
2258 #endif //PRODUCT
2259 
2260   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
2261 
2262   Node* receiver = argument(0);  // type: oop
2263 
2264   // Build address expression.
2265   Node* heap_base_oop = top();
2266 
2267   // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2268   Node* base = argument(1);  // type: oop
2269   // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2270   Node* offset = argument(2);  // type: long
2271   // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2272   // to be plain byte offsets, which are also the same as those accepted
2273   // by oopDesc::field_addr.
2274   assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2275          "fieldOffset must be byte-scaled");



















































2276   // 32-bit machines ignore the high half!
2277   offset = ConvL2X(offset);
2278 
2279   // Save state and restore on bailout
2280   uint old_sp = sp();
2281   SafePointNode* old_map = clone_map();
2282 
2283   Node* adr = make_unsafe_address(base, offset, type, kind == Relaxed);
2284 
2285   if (_gvn.type(base)->isa_ptr() == TypePtr::NULL_PTR) {
2286     if (type != T_OBJECT) {
2287       decorators |= IN_NATIVE; // off-heap primitive access
2288     } else {
2289       set_map(old_map);
2290       set_sp(old_sp);
2291       return false; // off-heap oop accesses are not supported
2292     }
2293   } else {
2294     heap_base_oop = base; // on-heap or mixed access
2295   }
2296 
2297   // Can base be NULL? Otherwise, always on-heap access.
2298   bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(base));
2299 
2300   if (!can_access_non_heap) {
2301     decorators |= IN_HEAP;
2302   }
2303 
2304   Node* val = is_store ? argument(4) : NULL;
2305 
2306   const TypePtr* adr_type = _gvn.type(adr)->isa_ptr();
2307   if (adr_type == TypePtr::NULL_PTR) {
2308     set_map(old_map);
2309     set_sp(old_sp);
2310     return false; // off-heap access with zero address
2311   }
2312 
2313   // Try to categorize the address.
2314   Compile::AliasType* alias_type = C->alias_type(adr_type);
2315   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2316 
2317   if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2318       alias_type->adr_type() == TypeAryPtr::RANGE) {
2319     set_map(old_map);
2320     set_sp(old_sp);
2321     return false; // not supported
2322   }
2323 
2324   bool mismatched = false;
2325   BasicType bt = alias_type->basic_type();
























2326   if (bt != T_ILLEGAL) {
2327     assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
2328     if (bt == T_BYTE && adr_type->isa_aryptr()) {
2329       // Alias type doesn't differentiate between byte[] and boolean[]).
2330       // Use address type to get the element type.
2331       bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2332     }
2333     if (bt == T_ARRAY || bt == T_NARROWOOP) {
2334       // accessing an array field with getReference is not a mismatch
2335       bt = T_OBJECT;
2336     }
2337     if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2338       // Don't intrinsify mismatched object accesses
2339       set_map(old_map);
2340       set_sp(old_sp);
2341       return false;
2342     }
2343     mismatched = (bt != type);
2344   } else if (alias_type->adr_type()->isa_oopptr()) {
2345     mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
2346   }
2347 

























2348   old_map->destruct(&_gvn);
2349   assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2350 
2351   if (mismatched) {
2352     decorators |= C2_MISMATCHED;
2353   }
2354 
2355   // First guess at the value type.
2356   const Type *value_type = Type::get_const_basic_type(type);
2357 
2358   // Figure out the memory ordering.
2359   decorators |= mo_decorator_for_access_kind(kind);
2360 
2361   if (!is_store && type == T_OBJECT) {
2362     const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2363     if (tjp != NULL) {
2364       value_type = tjp;




2365     }
2366   }
2367 
2368   receiver = null_check(receiver);
2369   if (stopped()) {
2370     return true;
2371   }
2372   // Heap pointers get a null-check from the interpreter,
2373   // as a courtesy.  However, this is not guaranteed by Unsafe,
2374   // and it is not possible to fully distinguish unintended nulls
2375   // from intended ones in this API.
2376 
2377   if (!is_store) {
2378     Node* p = NULL;
2379     // Try to constant fold a load from a constant field
2380     ciField* field = alias_type->field();
2381     if (heap_base_oop != top() && field != NULL && field->is_constant() && !mismatched) {
2382       // final or stable field
2383       p = make_constant_from_field(field, heap_base_oop);
2384     }
2385 
2386     if (p == NULL) { // Could not constant fold the load
2387       p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);















2388       // Normalize the value returned by getBoolean in the following cases
2389       if (type == T_BOOLEAN &&
2390           (mismatched ||
2391            heap_base_oop == top() ||                  // - heap_base_oop is NULL or
2392            (can_access_non_heap && field == NULL))    // - heap_base_oop is potentially NULL
2393                                                       //   and the unsafe access is made to large offset
2394                                                       //   (i.e., larger than the maximum offset necessary for any
2395                                                       //   field access)
2396             ) {
2397           IdealKit ideal = IdealKit(this);
2398 #define __ ideal.
2399           IdealVariable normalized_result(ideal);
2400           __ declarations_done();
2401           __ set(normalized_result, p);
2402           __ if_then(p, BoolTest::ne, ideal.ConI(0));
2403           __ set(normalized_result, ideal.ConI(1));
2404           ideal.end_if();
2405           final_sync(ideal);
2406           p = __ value(normalized_result);
2407 #undef __
2408       }
2409     }
2410     if (type == T_ADDRESS) {
2411       p = gvn().transform(new CastP2XNode(NULL, p));
2412       p = ConvX2UL(p);
2413     }
2414     // The load node has the control of the preceding MemBarCPUOrder.  All
2415     // following nodes will have the control of the MemBarCPUOrder inserted at
2416     // the end of this method.  So, pushing the load onto the stack at a later
2417     // point is fine.
2418     set_result(p);
2419   } else {
2420     if (bt == T_ADDRESS) {
2421       // Repackage the long as a pointer.
2422       val = ConvL2X(val);
2423       val = gvn().transform(new CastX2PNode(val));
2424     }
2425     access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators);


























2426   }
2427 


























2428   return true;
2429 }
2430 
2431 //----------------------------inline_unsafe_load_store----------------------------
2432 // This method serves a couple of different customers (depending on LoadStoreKind):
2433 //
2434 // LS_cmp_swap:
2435 //
2436 //   boolean compareAndSetReference(Object o, long offset, Object expected, Object x);
2437 //   boolean compareAndSetInt(   Object o, long offset, int    expected, int    x);
2438 //   boolean compareAndSetLong(  Object o, long offset, long   expected, long   x);
2439 //
2440 // LS_cmp_swap_weak:
2441 //
2442 //   boolean weakCompareAndSetReference(       Object o, long offset, Object expected, Object x);
2443 //   boolean weakCompareAndSetReferencePlain(  Object o, long offset, Object expected, Object x);
2444 //   boolean weakCompareAndSetReferenceAcquire(Object o, long offset, Object expected, Object x);
2445 //   boolean weakCompareAndSetReferenceRelease(Object o, long offset, Object expected, Object x);
2446 //
2447 //   boolean weakCompareAndSetInt(          Object o, long offset, int    expected, int    x);

2616     }
2617     case LS_cmp_swap:
2618     case LS_cmp_swap_weak:
2619     case LS_get_add:
2620       break;
2621     default:
2622       ShouldNotReachHere();
2623   }
2624 
2625   // Null check receiver.
2626   receiver = null_check(receiver);
2627   if (stopped()) {
2628     return true;
2629   }
2630 
2631   int alias_idx = C->get_alias_index(adr_type);
2632 
2633   if (is_reference_type(type)) {
2634     decorators |= IN_HEAP | ON_UNKNOWN_OOP_REF;
2635 













2636     // Transformation of a value which could be NULL pointer (CastPP #NULL)
2637     // could be delayed during Parse (for example, in adjust_map_after_if()).
2638     // Execute transformation here to avoid barrier generation in such case.
2639     if (_gvn.type(newval) == TypePtr::NULL_PTR)
2640       newval = _gvn.makecon(TypePtr::NULL_PTR);
2641 
2642     if (oldval != NULL && _gvn.type(oldval) == TypePtr::NULL_PTR) {
2643       // Refine the value to a null constant, when it is known to be null
2644       oldval = _gvn.makecon(TypePtr::NULL_PTR);
2645     }
2646   }
2647 
2648   Node* result = NULL;
2649   switch (kind) {
2650     case LS_cmp_exchange: {
2651       result = access_atomic_cmpxchg_val_at(base, adr, adr_type, alias_idx,
2652                                             oldval, newval, value_type, type, decorators);
2653       break;
2654     }
2655     case LS_cmp_swap_weak:

2774   Node* cls = null_check(argument(1));
2775   if (stopped())  return true;
2776 
2777   Node* kls = load_klass_from_mirror(cls, false, NULL, 0);
2778   kls = null_check(kls);
2779   if (stopped())  return true;  // argument was like int.class
2780 
2781   Node* test = NULL;
2782   if (LibraryCallKit::klass_needs_init_guard(kls)) {
2783     // Note:  The argument might still be an illegal value like
2784     // Serializable.class or Object[].class.   The runtime will handle it.
2785     // But we must make an explicit check for initialization.
2786     Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset()));
2787     // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
2788     // can generate code to load it as unsigned byte.
2789     Node* inst = make_load(NULL, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered);
2790     Node* bits = intcon(InstanceKlass::fully_initialized);
2791     test = _gvn.transform(new SubINode(inst, bits));
2792     // The 'test' is non-zero if we need to take a slow path.
2793   }
2794 
2795   Node* obj = new_instance(kls, test);





2796   set_result(obj);
2797   return true;
2798 }
2799 
2800 //------------------------inline_native_time_funcs--------------
2801 // inline code for System.currentTimeMillis() and System.nanoTime()
2802 // these have the same type and signature
2803 bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
2804   const TypeFunc* tf = OptoRuntime::void_long_Type();
2805   const TypePtr* no_memory_effects = NULL;
2806   Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
2807   Node* value = _gvn.transform(new ProjNode(time, TypeFunc::Parms+0));
2808 #ifdef ASSERT
2809   Node* value_top = _gvn.transform(new ProjNode(time, TypeFunc::Parms+1));
2810   assert(value_top == top(), "second value must be top");
2811 #endif
2812   set_result(value);
2813   return true;
2814 }
2815 

2923   set_control(jobj_is_not_null);
2924   Node* res = access_load(jobj, TypeInstPtr::NOTNULL, T_OBJECT,
2925                           IN_NATIVE | C2_CONTROL_DEPENDENT_LOAD);
2926   result_rgn->init_req(_normal_path, control());
2927   result_val->init_req(_normal_path, res);
2928 
2929   set_result(result_rgn, result_val);
2930 
2931   return true;
2932 }
2933 
2934 #endif // JFR_HAVE_INTRINSICS
2935 
2936 //------------------------inline_native_currentThread------------------
2937 bool LibraryCallKit::inline_native_currentThread() {
2938   Node* junk = NULL;
2939   set_result(generate_current_thread(junk));
2940   return true;
2941 }
2942 
2943 //---------------------------load_mirror_from_klass----------------------------
2944 // Given a klass oop, load its java mirror (a java.lang.Class oop).
2945 Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
2946   Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
2947   Node* load = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
2948   // mirror = ((OopHandle)mirror)->resolve();
2949   return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE);
2950 }
2951 
2952 //-----------------------load_klass_from_mirror_common-------------------------
2953 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
2954 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
2955 // and branch to the given path on the region.
2956 // If never_see_null, take an uncommon trap on null, so we can optimistically
2957 // compile for the non-null case.
2958 // If the region is NULL, force never_see_null = true.
2959 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
2960                                                     bool never_see_null,
2961                                                     RegionNode* region,
2962                                                     int null_path,
2963                                                     int offset) {
2964   if (region == NULL)  never_see_null = true;
2965   Node* p = basic_plus_adr(mirror, offset);
2966   const TypeKlassPtr*  kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
2967   Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
2968   Node* null_ctl = top();
2969   kls = null_check_oop(kls, &null_ctl, never_see_null);
2970   if (region != NULL) {
2971     // Set region->in(null_path) if the mirror is a primitive (e.g, int.class).

2974     assert(null_ctl == top(), "no loose ends");
2975   }
2976   return kls;
2977 }
2978 
2979 //--------------------(inline_native_Class_query helpers)---------------------
2980 // Use this for JVM_ACC_INTERFACE, JVM_ACC_IS_CLONEABLE_FAST, JVM_ACC_HAS_FINALIZER.
2981 // Fall through if (mods & mask) == bits, take the guard otherwise.
2982 Node* LibraryCallKit::generate_access_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
2983   // Branch around if the given klass has the given modifier bit set.
2984   // Like generate_guard, adds a new path onto the region.
2985   Node* modp = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
2986   Node* mods = make_load(NULL, modp, TypeInt::INT, T_INT, MemNode::unordered);
2987   Node* mask = intcon(modifier_mask);
2988   Node* bits = intcon(modifier_bits);
2989   Node* mbit = _gvn.transform(new AndINode(mods, mask));
2990   Node* cmp  = _gvn.transform(new CmpINode(mbit, bits));
2991   Node* bol  = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
2992   return generate_fair_guard(bol, region);
2993 }

2994 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
2995   return generate_access_flags_guard(kls, JVM_ACC_INTERFACE, 0, region);
2996 }
2997 Node* LibraryCallKit::generate_hidden_class_guard(Node* kls, RegionNode* region) {
2998   return generate_access_flags_guard(kls, JVM_ACC_IS_HIDDEN_CLASS, 0, region);
2999 }
3000 
3001 //-------------------------inline_native_Class_query-------------------
3002 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
3003   const Type* return_type = TypeInt::BOOL;
3004   Node* prim_return_value = top();  // what happens if it's a primitive class?
3005   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3006   bool expect_prim = false;     // most of these guys expect to work on refs
3007 
3008   enum { _normal_path = 1, _prim_path = 2, PATH_LIMIT };
3009 
3010   Node* mirror = argument(0);
3011   Node* obj    = top();
3012 
3013   switch (id) {

3167 
3168   case vmIntrinsics::_getClassAccessFlags:
3169     p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3170     query_value = make_load(NULL, p, TypeInt::INT, T_INT, MemNode::unordered);
3171     break;
3172 
3173   default:
3174     fatal_unexpected_iid(id);
3175     break;
3176   }
3177 
3178   // Fall-through is the normal case of a query to a real class.
3179   phi->init_req(1, query_value);
3180   region->init_req(1, control());
3181 
3182   C->set_has_split_ifs(true); // Has chance for split-if optimization
3183   set_result(region, phi);
3184   return true;
3185 }
3186 





























3187 //-------------------------inline_Class_cast-------------------
3188 bool LibraryCallKit::inline_Class_cast() {
3189   Node* mirror = argument(0); // Class
3190   Node* obj    = argument(1);
3191   const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
3192   if (mirror_con == NULL) {
3193     return false;  // dead path (mirror->is_top()).
3194   }
3195   if (obj == NULL || obj->is_top()) {
3196     return false;  // dead path
3197   }
3198   const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr();






3199 
3200   // First, see if Class.cast() can be folded statically.
3201   // java_mirror_type() returns non-null for compile-time Class constants.
3202   ciType* tm = mirror_con->java_mirror_type();
3203   if (tm != NULL && tm->is_klass() &&
3204       tp != NULL && tp->klass() != NULL) {
3205     if (!tp->klass()->is_loaded()) {


3206       // Don't use intrinsic when class is not loaded.
3207       return false;
3208     } else {
3209       int static_res = C->static_subtype_check(tm->as_klass(), tp->klass());
3210       if (static_res == Compile::SSC_always_true) {
3211         // isInstance() is true - fold the code.



3212         set_result(obj);
3213         return true;
3214       } else if (static_res == Compile::SSC_always_false) {
3215         // Don't use intrinsic, have to throw ClassCastException.
3216         // If the reference is null, the non-intrinsic bytecode will
3217         // be optimized appropriately.
3218         return false;
3219       }
3220     }
3221   }
3222 
3223   // Bailout intrinsic and do normal inlining if exception path is frequent.
3224   if (too_many_traps(Deoptimization::Reason_intrinsic)) {
3225     return false;
3226   }
3227 
3228   // Generate dynamic checks.
3229   // Class.cast() is java implementation of _checkcast bytecode.
3230   // Do checkcast (Parse::do_checkcast()) optimizations here.
3231 



3232   mirror = null_check(mirror);
3233   // If mirror is dead, only null-path is taken.
3234   if (stopped()) {
3235     return true;
3236   }
3237 
3238   // Not-subtype or the mirror's klass ptr is NULL (in case it is a primitive).
3239   enum { _bad_type_path = 1, _prim_path = 2, PATH_LIMIT };
3240   RegionNode* region = new RegionNode(PATH_LIMIT);
3241   record_for_igvn(region);
3242 
3243   // Now load the mirror's klass metaobject, and null-check it.
3244   // If kls is null, we have a primitive mirror and
3245   // nothing is an instance of a primitive type.
3246   Node* kls = load_klass_from_mirror(mirror, false, region, _prim_path);
3247 
3248   Node* res = top();
3249   if (!stopped()) {





















3250     Node* bad_type_ctrl = top();
3251     // Do checkcast optimizations.
3252     res = gen_checkcast(obj, kls, &bad_type_ctrl);
3253     region->init_req(_bad_type_path, bad_type_ctrl);
3254   }
3255   if (region->in(_prim_path) != top() ||
3256       region->in(_bad_type_path) != top()) {

3257     // Let Interpreter throw ClassCastException.
3258     PreserveJVMState pjvms(this);
3259     set_control(_gvn.transform(region));
3260     uncommon_trap(Deoptimization::Reason_intrinsic,
3261                   Deoptimization::Action_maybe_recompile);
3262   }
3263   if (!stopped()) {
3264     set_result(res);
3265   }
3266   return true;
3267 }
3268 
3269 
3270 //--------------------------inline_native_subtype_check------------------------
3271 // This intrinsic takes the JNI calls out of the heart of
3272 // UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
3273 bool LibraryCallKit::inline_native_subtype_check() {
3274   // Pull both arguments off the stack.
3275   Node* args[2];                // two java.lang.Class mirrors: superc, subc
3276   args[0] = argument(0);
3277   args[1] = argument(1);
3278   Node* klasses[2];             // corresponding Klasses: superk, subk
3279   klasses[0] = klasses[1] = top();
3280 
3281   enum {
3282     // A full decision tree on {superc is prim, subc is prim}:
3283     _prim_0_path = 1,           // {P,N} => false
3284                                 // {P,P} & superc!=subc => false
3285     _prim_same_path,            // {P,P} & superc==subc => true
3286     _prim_1_path,               // {N,P} => false
3287     _ref_subtype_path,          // {N,N} & subtype check wins => true
3288     _both_ref_path,             // {N,N} & subtype check loses => false
3289     PATH_LIMIT
3290   };
3291 
3292   RegionNode* region = new RegionNode(PATH_LIMIT);

3293   Node*       phi    = new PhiNode(region, TypeInt::BOOL);
3294   record_for_igvn(region);

3295 
3296   const TypePtr* adr_type = TypeRawPtr::BOTTOM;   // memory type of loads
3297   const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
3298   int class_klass_offset = java_lang_Class::klass_offset();
3299 
3300   // First null-check both mirrors and load each mirror's klass metaobject.
3301   int which_arg;
3302   for (which_arg = 0; which_arg <= 1; which_arg++) {
3303     Node* arg = args[which_arg];
3304     arg = null_check(arg);
3305     if (stopped())  break;
3306     args[which_arg] = arg;
3307 
3308     Node* p = basic_plus_adr(arg, class_klass_offset);
3309     Node* kls = LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, adr_type, kls_type);
3310     klasses[which_arg] = _gvn.transform(kls);
3311   }
3312 
3313   // Having loaded both klasses, test each for null.
3314   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3315   for (which_arg = 0; which_arg <= 1; which_arg++) {
3316     Node* kls = klasses[which_arg];
3317     Node* null_ctl = top();
3318     kls = null_check_oop(kls, &null_ctl, never_see_null);
3319     int prim_path = (which_arg == 0 ? _prim_0_path : _prim_1_path);
3320     region->init_req(prim_path, null_ctl);



3321     if (stopped())  break;
3322     klasses[which_arg] = kls;
3323   }
3324 
3325   if (!stopped()) {
3326     // now we have two reference types, in klasses[0..1]
3327     Node* subk   = klasses[1];  // the argument to isAssignableFrom
3328     Node* superk = klasses[0];  // the receiver
3329     region->set_req(_both_ref_path, gen_subtype_check(subk, superk));



3330     // now we have a successful reference subtype check
3331     region->set_req(_ref_subtype_path, control());
3332   }
3333 
3334   // If both operands are primitive (both klasses null), then
3335   // we must return true when they are identical primitives.
3336   // It is convenient to test this after the first null klass check.
3337   set_control(region->in(_prim_0_path)); // go back to first null check

3338   if (!stopped()) {
3339     // Since superc is primitive, make a guard for the superc==subc case.
3340     Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1]));
3341     Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq));
3342     generate_guard(bol_eq, region, PROB_FAIR);
3343     if (region->req() == PATH_LIMIT+1) {
3344       // A guard was added.  If the added guard is taken, superc==subc.
3345       region->swap_edges(PATH_LIMIT, _prim_same_path);
3346       region->del_req(PATH_LIMIT);
3347     }
3348     region->set_req(_prim_0_path, control()); // Not equal after all.
3349   }
3350 
3351   // these are the only paths that produce 'true':
3352   phi->set_req(_prim_same_path,   intcon(1));
3353   phi->set_req(_ref_subtype_path, intcon(1));
3354 
3355   // pull together the cases:
3356   assert(region->req() == PATH_LIMIT, "sane region");
3357   for (uint i = 1; i < region->req(); i++) {
3358     Node* ctl = region->in(i);
3359     if (ctl == NULL || ctl == top()) {
3360       region->set_req(i, top());
3361       phi   ->set_req(i, top());
3362     } else if (phi->in(i) == NULL) {
3363       phi->set_req(i, intcon(0)); // all other paths produce 'false'
3364     }
3365   }
3366 
3367   set_control(_gvn.transform(region));
3368   set_result(_gvn.transform(phi));
3369   return true;
3370 }
3371 
3372 //---------------------generate_array_guard_common------------------------
3373 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region,
3374                                                   bool obj_array, bool not_array) {
3375 
3376   if (stopped()) {
3377     return NULL;
3378   }
3379 
3380   // If obj_array/non_array==false/false:
3381   // Branch around if the given klass is in fact an array (either obj or prim).
3382   // If obj_array/non_array==false/true:
3383   // Branch around if the given klass is not an array klass of any kind.
3384   // If obj_array/non_array==true/true:
3385   // Branch around if the kls is not an oop array (kls is int[], String, etc.)
3386   // If obj_array/non_array==true/false:
3387   // Branch around if the kls is an oop array (Object[] or subtype)
3388   //
3389   // Like generate_guard, adds a new path onto the region.
3390   jint  layout_con = 0;
3391   Node* layout_val = get_layout_helper(kls, layout_con);
3392   if (layout_val == NULL) {
3393     bool query = (obj_array
3394                   ? Klass::layout_helper_is_objArray(layout_con)
3395                   : Klass::layout_helper_is_array(layout_con));
3396     if (query == not_array) {









3397       return NULL;                       // never a branch
3398     } else {                             // always a branch
3399       Node* always_branch = control();
3400       if (region != NULL)
3401         region->add_req(always_branch);
3402       set_control(top());
3403       return always_branch;
3404     }
3405   }




























3406   // Now test the correct condition.
3407   jint  nval = (obj_array
3408                 ? (jint)(Klass::_lh_array_tag_type_value
3409                    <<    Klass::_lh_array_tag_shift)
3410                 : Klass::_lh_neutral_value);
3411   Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval)));
3412   BoolTest::mask btest = BoolTest::lt;  // correct for testing is_[obj]array
3413   // invert the test if we are looking for a non-array
3414   if (not_array)  btest = BoolTest(btest).negate();
3415   Node* bol = _gvn.transform(new BoolNode(cmp, btest));
3416   return generate_fair_guard(bol, region);
3417 }
3418 
3419 
3420 //-----------------------inline_native_newArray--------------------------
3421 // private static native Object java.lang.reflect.newArray(Class<?> componentType, int length);
3422 // private        native Object Unsafe.allocateUninitializedArray0(Class<?> cls, int size);
3423 bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) {
3424   Node* mirror;
3425   Node* count_val;
3426   if (uninitialized) {
3427     mirror    = argument(1);
3428     count_val = argument(2);
3429   } else {
3430     mirror    = argument(0);
3431     count_val = argument(1);
3432   }
3433 
3434   mirror = null_check(mirror);
3435   // If mirror or obj is dead, only null-path is taken.
3436   if (stopped())  return true;
3437 
3438   enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
3439   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
3440   PhiNode*    result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
3441   PhiNode*    result_io  = new PhiNode(result_reg, Type::ABIO);

3546   // the bytecode that invokes Arrays.copyOf if deoptimization happens.
3547   { PreserveReexecuteState preexecs(this);
3548     jvms()->set_should_reexecute(true);
3549 
3550     array_type_mirror = null_check(array_type_mirror);
3551     original          = null_check(original);
3552 
3553     // Check if a null path was taken unconditionally.
3554     if (stopped())  return true;
3555 
3556     Node* orig_length = load_array_length(original);
3557 
3558     Node* klass_node = load_klass_from_mirror(array_type_mirror, false, NULL, 0);
3559     klass_node = null_check(klass_node);
3560 
3561     RegionNode* bailout = new RegionNode(1);
3562     record_for_igvn(bailout);
3563 
3564     // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
3565     // Bail out if that is so.
3566     Node* not_objArray = generate_non_objArray_guard(klass_node, bailout);












3567     if (not_objArray != NULL) {
3568       // Improve the klass node's type from the new optimistic assumption:
3569       ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
3570       const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/);
3571       Node* cast = new CastPPNode(klass_node, akls);
3572       cast->init_req(0, control());
3573       klass_node = _gvn.transform(cast);
3574     }
3575 






















3576     // Bail out if either start or end is negative.
3577     generate_negative_guard(start, bailout, &start);
3578     generate_negative_guard(end,   bailout, &end);
3579 
3580     Node* length = end;
3581     if (_gvn.type(start) != TypeInt::ZERO) {
3582       length = _gvn.transform(new SubINode(end, start));
3583     }
3584 
3585     // Bail out if length is negative.
3586     // Without this the new_array would throw
3587     // NegativeArraySizeException but IllegalArgumentException is what
3588     // should be thrown
3589     generate_negative_guard(length, bailout, &length);
3590 
































3591     if (bailout->req() > 1) {
3592       PreserveJVMState pjvms(this);
3593       set_control(_gvn.transform(bailout));
3594       uncommon_trap(Deoptimization::Reason_intrinsic,
3595                     Deoptimization::Action_maybe_recompile);
3596     }
3597 
3598     if (!stopped()) {
3599       // How many elements will we copy from the original?
3600       // The answer is MinI(orig_length - start, length).
3601       Node* orig_tail = _gvn.transform(new SubINode(orig_length, start));
3602       Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
3603 
3604       // Generate a direct call to the right arraycopy function(s).
3605       // We know the copy is disjoint but we might not know if the
3606       // oop stores need checking.
3607       // Extreme case:  Arrays.copyOf((Integer[])x, 10, String[].class).
3608       // This will fail a store-check if x contains any non-nulls.
3609 
3610       // ArrayCopyNode:Ideal may transform the ArrayCopyNode to
3611       // loads/stores but it is legal only if we're sure the
3612       // Arrays.copyOf would succeed. So we need all input arguments
3613       // to the copyOf to be validated, including that the copy to the
3614       // new array won't trigger an ArrayStoreException. That subtype
3615       // check can be optimized if we know something on the type of
3616       // the input array from type speculation.
3617       if (_gvn.type(klass_node)->singleton()) {
3618         ciKlass* subk   = _gvn.type(load_object_klass(original))->is_klassptr()->klass();
3619         ciKlass* superk = _gvn.type(klass_node)->is_klassptr()->klass();
3620 
3621         int test = C->static_subtype_check(superk, subk);
3622         if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
3623           const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();
3624           if (t_original->speculative_type() != NULL) {
3625             original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true);
3626           }
3627         }
3628       }
3629 
3630       bool validated = false;
3631       // Reason_class_check rather than Reason_intrinsic because we
3632       // want to intrinsify even if this traps.
3633       if (!too_many_traps(Deoptimization::Reason_class_check)) {
3634         Node* not_subtype_ctrl = gen_subtype_check(original, klass_node);
3635 
3636         if (not_subtype_ctrl != top()) {
3637           PreserveJVMState pjvms(this);
3638           set_control(not_subtype_ctrl);
3639           uncommon_trap(Deoptimization::Reason_class_check,
3640                         Deoptimization::Action_make_not_entrant);
3641           assert(stopped(), "Should be stopped");
3642         }
3643         validated = true;
3644       }
3645 
3646       if (!stopped()) {
3647         newcopy = new_array(klass_node, length, 0);  // no arguments to push
3648 
3649         ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true, false,
3650                                                 load_object_klass(original), klass_node);
3651         if (!is_copyOfRange) {
3652           ac->set_copyof(validated);
3653         } else {
3654           ac->set_copyofrange(validated);
3655         }
3656         Node* n = _gvn.transform(ac);
3657         if (n == ac) {
3658           ac->connect_outputs(this);
3659         } else {
3660           assert(validated, "shouldn't transform if all arguments not validated");
3661           set_all_memory(n);
3662         }
3663       }
3664     }
3665   } // original reexecute is set back here
3666 
3667   C->set_has_split_ifs(true); // Has chance for split-if optimization
3668   if (!stopped()) {
3669     set_result(newcopy);
3670   }

3752   set_edges_for_java_call(slow_call);
3753   return slow_call;
3754 }
3755 
3756 
3757 /**
3758  * Build special case code for calls to hashCode on an object. This call may
3759  * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
3760  * slightly different code.
3761  */
3762 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
3763   assert(is_static == callee()->is_static(), "correct intrinsic selection");
3764   assert(!(is_virtual && is_static), "either virtual, special, or static");
3765 
3766   enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
3767 
3768   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
3769   PhiNode*    result_val = new PhiNode(result_reg, TypeInt::INT);
3770   PhiNode*    result_io  = new PhiNode(result_reg, Type::ABIO);
3771   PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
3772   Node* obj = NULL;





3773   if (!is_static) {
3774     // Check for hashing null object
3775     obj = null_check_receiver();
3776     if (stopped())  return true;        // unconditionally null
3777     result_reg->init_req(_null_path, top());
3778     result_val->init_req(_null_path, top());
3779   } else {
3780     // Do a null check, and return zero if null.
3781     // System.identityHashCode(null) == 0
3782     obj = argument(0);
3783     Node* null_ctl = top();
3784     obj = null_check_oop(obj, &null_ctl);
3785     result_reg->init_req(_null_path, null_ctl);
3786     result_val->init_req(_null_path, _gvn.intcon(0));
3787   }
3788 
3789   // Unconditionally null?  Then return right away.
3790   if (stopped()) {
3791     set_control( result_reg->in(_null_path));
3792     if (!stopped())
3793       set_result(result_val->in(_null_path));
3794     return true;
3795   }
3796 
3797   // We only go to the fast case code if we pass a number of guards.  The
3798   // paths which do not pass are accumulated in the slow_region.
3799   RegionNode* slow_region = new RegionNode(1);
3800   record_for_igvn(slow_region);
3801 
3802   // If this is a virtual call, we generate a funny guard.  We pull out
3803   // the vtable entry corresponding to hashCode() from the target object.
3804   // If the target method which we are calling happens to be the native
3805   // Object hashCode() method, we pass the guard.  We do not need this
3806   // guard for non-virtual calls -- the caller is known to be the native
3807   // Object hashCode().
3808   if (is_virtual) {
3809     // After null check, get the object's klass.
3810     Node* obj_klass = load_object_klass(obj);
3811     generate_virtual_guard(obj_klass, slow_region);
3812   }
3813 
3814   // Get the header out of the object, use LoadMarkNode when available
3815   Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
3816   // The control of the load must be NULL. Otherwise, the load can move before
3817   // the null check after castPP removal.
3818   Node* no_ctrl = NULL;
3819   Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
3820 
3821   // Test the header to see if it is unlocked.
3822   Node *lock_mask      = _gvn.MakeConX(markWord::lock_mask_in_place);

3823   Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
3824   Node *unlocked_val   = _gvn.MakeConX(markWord::unlocked_value);
3825   Node *chk_unlocked   = _gvn.transform(new CmpXNode( lmasked_header, unlocked_val));
3826   Node *test_unlocked  = _gvn.transform(new BoolNode( chk_unlocked, BoolTest::ne));
3827 
3828   generate_slow_guard(test_unlocked, slow_region);
3829 
3830   // Get the hash value and check to see that it has been properly assigned.
3831   // We depend on hash_mask being at most 32 bits and avoid the use of
3832   // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
3833   // vm: see markWord.hpp.
3834   Node *hash_mask      = _gvn.intcon(markWord::hash_mask);
3835   Node *hash_shift     = _gvn.intcon(markWord::hash_shift);
3836   Node *hshifted_header= _gvn.transform(new URShiftXNode(header, hash_shift));
3837   // This hack lets the hash bits live anywhere in the mark object now, as long
3838   // as the shift drops the relevant bits into the low 32 bits.  Note that
3839   // Java spec says that HashCode is an int so there's no point in capturing
3840   // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build).
3841   hshifted_header      = ConvX2I(hshifted_header);
3842   Node *hash_val       = _gvn.transform(new AndINode(hshifted_header, hash_mask));

3868     // this->control() comes from set_results_for_java_call
3869     result_reg->init_req(_slow_path, control());
3870     result_val->init_req(_slow_path, slow_result);
3871     result_io  ->set_req(_slow_path, i_o());
3872     result_mem ->set_req(_slow_path, reset_memory());
3873   }
3874 
3875   // Return the combined state.
3876   set_i_o(        _gvn.transform(result_io)  );
3877   set_all_memory( _gvn.transform(result_mem));
3878 
3879   set_result(result_reg, result_val);
3880   return true;
3881 }
3882 
3883 //---------------------------inline_native_getClass----------------------------
3884 // public final native Class<?> java.lang.Object.getClass();
3885 //
3886 // Build special case code for calls to getClass on an object.
3887 bool LibraryCallKit::inline_native_getClass() {
3888   Node* obj = null_check_receiver();









3889   if (stopped())  return true;
3890   set_result(load_mirror_from_klass(load_object_klass(obj)));
3891   return true;
3892 }
3893 
3894 //-----------------inline_native_Reflection_getCallerClass---------------------
3895 // public static native Class<?> sun.reflect.Reflection.getCallerClass();
3896 //
3897 // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
3898 //
3899 // NOTE: This code must perform the same logic as JVM_GetCallerClass
3900 // in that it must skip particular security frames and checks for
3901 // caller sensitive methods.
3902 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
3903 #ifndef PRODUCT
3904   if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
3905     tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
3906   }
3907 #endif
3908 

4206 //  not cloneable or finalizer => slow path to out-of-line Object.clone
4207 //
4208 // The general case has two steps, allocation and copying.
4209 // Allocation has two cases, and uses GraphKit::new_instance or new_array.
4210 //
4211 // Copying also has two cases, oop arrays and everything else.
4212 // Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
4213 // Everything else uses the tight inline loop supplied by CopyArrayNode.
4214 //
4215 // These steps fold up nicely if and when the cloned object's klass
4216 // can be sharply typed as an object array, a type array, or an instance.
4217 //
4218 bool LibraryCallKit::inline_native_clone(bool is_virtual) {
4219   PhiNode* result_val;
4220 
4221   // Set the reexecute bit for the interpreter to reexecute
4222   // the bytecode that invokes Object.clone if deoptimization happens.
4223   { PreserveReexecuteState preexecs(this);
4224     jvms()->set_should_reexecute(true);
4225 
4226     Node* obj = null_check_receiver();





4227     if (stopped())  return true;
4228 
4229     const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
4230 
4231     // If we are going to clone an instance, we need its exact type to
4232     // know the number and types of fields to convert the clone to
4233     // loads/stores. Maybe a speculative type can help us.
4234     if (!obj_type->klass_is_exact() &&
4235         obj_type->speculative_type() != NULL &&
4236         obj_type->speculative_type()->is_instance_klass()) {

4237       ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass();
4238       if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem &&
4239           !spec_ik->has_injected_fields()) {
4240         ciKlass* k = obj_type->klass();
4241         if (!k->is_instance_klass() ||
4242             k->as_instance_klass()->is_interface() ||
4243             k->as_instance_klass()->has_subklass()) {
4244           obj = maybe_cast_profiled_obj(obj, obj_type->speculative_type(), false);
4245         }
4246       }
4247     }
4248 
4249     // Conservatively insert a memory barrier on all memory slices.
4250     // Do not let writes into the original float below the clone.
4251     insert_mem_bar(Op_MemBarCPUOrder);
4252 
4253     // paths into result_reg:
4254     enum {
4255       _slow_path = 1,     // out-of-line call to clone method (virtual or not)
4256       _objArray_path,     // plain array allocation, plus arrayof_oop_arraycopy
4257       _array_path,        // plain array allocation, plus arrayof_long_arraycopy
4258       _instance_path,     // plain instance allocation, plus arrayof_long_arraycopy
4259       PATH_LIMIT
4260     };
4261     RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4262     result_val             = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
4263     PhiNode*    result_i_o = new PhiNode(result_reg, Type::ABIO);
4264     PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4265     record_for_igvn(result_reg);
4266 
4267     Node* obj_klass = load_object_klass(obj);





4268     Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL);
4269     if (array_ctl != NULL) {
4270       // It's an array.
4271       PreserveJVMState pjvms(this);
4272       set_control(array_ctl);
4273       Node* obj_length = load_array_length(obj);
4274       Node* obj_size  = NULL;
4275       Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size, /*deoptimize_on_exception=*/true);
4276 
4277       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
4278       if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Parsing)) {
4279         // If it is an oop array, it requires very special treatment,
4280         // because gc barriers are required when accessing the array.
4281         Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL);
4282         if (is_obja != NULL) {
4283           PreserveJVMState pjvms2(this);
4284           set_control(is_obja);
4285           // Generate a direct call to the right arraycopy function(s).
4286           // Clones are always tightly coupled.
4287           ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, true, false);
4288           ac->set_clone_oop_array();
4289           Node* n = _gvn.transform(ac);
4290           assert(n == ac, "cannot disappear");
4291           ac->connect_outputs(this, /*deoptimize_on_exception=*/true);
4292 
4293           result_reg->init_req(_objArray_path, control());
4294           result_val->init_req(_objArray_path, alloc_obj);
4295           result_i_o ->set_req(_objArray_path, i_o());
4296           result_mem ->set_req(_objArray_path, reset_memory());
4297         }
4298       }
4299       // Otherwise, there are no barriers to worry about.
4300       // (We can dispense with card marks if we know the allocation
4301       //  comes out of eden (TLAB)...  In fact, ReduceInitialCardMarks
4302       //  causes the non-eden paths to take compensating steps to
4303       //  simulate a fresh allocation, so that no further
4304       //  card marks are required in compiled code to initialize
4305       //  the object.)
4306 
4307       if (!stopped()) {
4308         copy_to_clone(obj, alloc_obj, obj_size, true);
4309 
4310         // Present the results of the copy.
4311         result_reg->init_req(_array_path, control());
4312         result_val->init_req(_array_path, alloc_obj);
4313         result_i_o ->set_req(_array_path, i_o());
4314         result_mem ->set_req(_array_path, reset_memory());




































4315       }
4316     }
4317 
4318     // We only go to the instance fast case code if we pass a number of guards.
4319     // The paths which do not pass are accumulated in the slow_region.
4320     RegionNode* slow_region = new RegionNode(1);
4321     record_for_igvn(slow_region);
4322     if (!stopped()) {
4323       // It's an instance (we did array above).  Make the slow-path tests.
4324       // If this is a virtual call, we generate a funny guard.  We grab
4325       // the vtable entry corresponding to clone() from the target object.
4326       // If the target method which we are calling happens to be the
4327       // Object clone() method, we pass the guard.  We do not need this
4328       // guard for non-virtual calls; the caller is known to be the native
4329       // Object clone().
4330       if (is_virtual) {
4331         generate_virtual_guard(obj_klass, slow_region);
4332       }
4333 
4334       // The object must be easily cloneable and must not have a finalizer.
4335       // Both of these conditions may be checked in a single test.
4336       // We could optimize the test further, but we don't care.
4337       generate_access_flags_guard(obj_klass,
4338                                   // Test both conditions:
4339                                   JVM_ACC_IS_CLONEABLE_FAST | JVM_ACC_HAS_FINALIZER,
4340                                   // Must be cloneable but not finalizer:
4341                                   JVM_ACC_IS_CLONEABLE_FAST,

4462 // array in the heap that GCs wouldn't expect. Move the allocation
4463 // after the traps so we don't allocate the array if we
4464 // deoptimize. This is possible because tightly_coupled_allocation()
4465 // guarantees there's no observer of the allocated array at this point
4466 // and the control flow is simple enough.
4467 void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms,
4468                                                     int saved_reexecute_sp, uint new_idx) {
4469   if (saved_jvms != NULL && !stopped()) {
4470     assert(alloc != NULL, "only with a tightly coupled allocation");
4471     // restore JVM state to the state at the arraycopy
4472     saved_jvms->map()->set_control(map()->control());
4473     assert(saved_jvms->map()->memory() == map()->memory(), "memory state changed?");
4474     assert(saved_jvms->map()->i_o() == map()->i_o(), "IO state changed?");
4475     // If we've improved the types of some nodes (null check) while
4476     // emitting the guards, propagate them to the current state
4477     map()->replaced_nodes().apply(saved_jvms->map(), new_idx);
4478     set_jvms(saved_jvms);
4479     _reexecute_sp = saved_reexecute_sp;
4480 
4481     // Remove the allocation from above the guards
4482     CallProjections callprojs;
4483     alloc->extract_projections(&callprojs, true);
4484     InitializeNode* init = alloc->initialization();
4485     Node* alloc_mem = alloc->in(TypeFunc::Memory);
4486     C->gvn_replace_by(callprojs.fallthrough_ioproj, alloc->in(TypeFunc::I_O));
4487     C->gvn_replace_by(init->proj_out(TypeFunc::Memory), alloc_mem);
4488 
4489     // The CastIINode created in GraphKit::new_array (in AllocateArrayNode::make_ideal_length) must stay below
4490     // the allocation (i.e. is only valid if the allocation succeeds):
4491     // 1) replace CastIINode with AllocateArrayNode's length here
4492     // 2) Create CastIINode again once allocation has moved (see below) at the end of this method
4493     //
4494     // Multiple identical CastIINodes might exist here. Each GraphKit::load_array_length() call will generate
4495     // new separate CastIINode (arraycopy guard checks or any array length use between array allocation and ararycopy)
4496     Node* init_control = init->proj_out(TypeFunc::Control);
4497     Node* alloc_length = alloc->Ideal_length();
4498 #ifdef ASSERT
4499     Node* prev_cast = NULL;
4500 #endif
4501     for (uint i = 0; i < init_control->outcnt(); i++) {
4502       Node* init_out = init_control->raw_out(i);
4503       if (init_out->is_CastII() && init_out->in(TypeFunc::Control) == init_control && init_out->in(1) == alloc_length) {
4504 #ifdef ASSERT
4505         if (prev_cast == NULL) {
4506           prev_cast = init_out;

4508           if (prev_cast->cmp(*init_out) == false) {
4509             prev_cast->dump();
4510             init_out->dump();
4511             assert(false, "not equal CastIINode");
4512           }
4513         }
4514 #endif
4515         C->gvn_replace_by(init_out, alloc_length);
4516       }
4517     }
4518     C->gvn_replace_by(init->proj_out(TypeFunc::Control), alloc->in(0));
4519 
4520     // move the allocation here (after the guards)
4521     _gvn.hash_delete(alloc);
4522     alloc->set_req(TypeFunc::Control, control());
4523     alloc->set_req(TypeFunc::I_O, i_o());
4524     Node *mem = reset_memory();
4525     set_all_memory(mem);
4526     alloc->set_req(TypeFunc::Memory, mem);
4527     set_control(init->proj_out_or_null(TypeFunc::Control));
4528     set_i_o(callprojs.fallthrough_ioproj);
4529 
4530     // Update memory as done in GraphKit::set_output_for_allocation()
4531     const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
4532     const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
4533     if (ary_type->isa_aryptr() && length_type != NULL) {
4534       ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
4535     }
4536     const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot);
4537     int            elemidx  = C->get_alias_index(telemref);
4538     set_memory(init->proj_out_or_null(TypeFunc::Memory), Compile::AliasIdxRaw);
4539     set_memory(init->proj_out_or_null(TypeFunc::Memory), elemidx);
4540 
4541     Node* allocx = _gvn.transform(alloc);
4542     assert(allocx == alloc, "where has the allocation gone?");
4543     assert(dest->is_CheckCastPP(), "not an allocation result?");
4544 
4545     _gvn.hash_delete(dest);
4546     dest->set_req(0, control());
4547     Node* destx = _gvn.transform(dest);
4548     assert(destx == dest, "where has the allocation result gone?");

4684       // Do we have the exact type of dest?
4685       bool could_have_dest = dest_spec;
4686       ciKlass* src_k = top_src->klass();
4687       ciKlass* dest_k = top_dest->klass();
4688       if (!src_spec) {
4689         src_k = src_type->speculative_type_not_null();
4690         if (src_k != NULL && src_k->is_array_klass()) {
4691           could_have_src = true;
4692         }
4693       }
4694       if (!dest_spec) {
4695         dest_k = dest_type->speculative_type_not_null();
4696         if (dest_k != NULL && dest_k->is_array_klass()) {
4697           could_have_dest = true;
4698         }
4699       }
4700       if (could_have_src && could_have_dest) {
4701         // If we can have both exact types, emit the missing guards
4702         if (could_have_src && !src_spec) {
4703           src = maybe_cast_profiled_obj(src, src_k, true);


4704         }
4705         if (could_have_dest && !dest_spec) {
4706           dest = maybe_cast_profiled_obj(dest, dest_k, true);


4707         }
4708       }
4709     }
4710   }
4711 
4712   ciMethod* trap_method = method();
4713   int trap_bci = bci();
4714   if (saved_jvms != NULL) {
4715     trap_method = alloc->jvms()->method();
4716     trap_bci = alloc->jvms()->bci();
4717   }
4718 
4719   bool negative_length_guard_generated = false;
4720 
4721   if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
4722       can_emit_guards &&
4723       !src->is_top() && !dest->is_top()) {
4724     // validate arguments: enables transformation the ArrayCopyNode
4725     validated = true;
4726 
4727     RegionNode* slow_region = new RegionNode(1);
4728     record_for_igvn(slow_region);
4729 
4730     // (1) src and dest are arrays.
4731     generate_non_array_guard(load_object_klass(src), slow_region);
4732     generate_non_array_guard(load_object_klass(dest), slow_region);
4733 
4734     // (2) src and dest arrays must have elements of the same BasicType
4735     // done at macro expansion or at Ideal transformation time
4736 
4737     // (4) src_offset must not be negative.
4738     generate_negative_guard(src_offset, slow_region);
4739 
4740     // (5) dest_offset must not be negative.
4741     generate_negative_guard(dest_offset, slow_region);
4742 
4743     // (7) src_offset + length must not exceed length of src.

4746                          slow_region);
4747 
4748     // (8) dest_offset + length must not exceed length of dest.
4749     generate_limit_guard(dest_offset, length,
4750                          load_array_length(dest),
4751                          slow_region);
4752 
4753     // (6) length must not be negative.
4754     // This is also checked in generate_arraycopy() during macro expansion, but
4755     // we also have to check it here for the case where the ArrayCopyNode will
4756     // be eliminated by Escape Analysis.
4757     if (EliminateAllocations) {
4758       generate_negative_guard(length, slow_region);
4759       negative_length_guard_generated = true;
4760     }
4761 
4762     // (9) each element of an oop array must be assignable
4763     Node* dest_klass = load_object_klass(dest);
4764     if (src != dest) {
4765       Node* not_subtype_ctrl = gen_subtype_check(src, dest_klass);


4766 
4767       if (not_subtype_ctrl != top()) {
4768         PreserveJVMState pjvms(this);
4769         set_control(not_subtype_ctrl);
4770         uncommon_trap(Deoptimization::Reason_intrinsic,
4771                       Deoptimization::Action_make_not_entrant);
4772         assert(stopped(), "Should be stopped");






















4773       }
4774     }

4775     {
4776       PreserveJVMState pjvms(this);
4777       set_control(_gvn.transform(slow_region));
4778       uncommon_trap(Deoptimization::Reason_intrinsic,
4779                     Deoptimization::Action_make_not_entrant);
4780       assert(stopped(), "Should be stopped");
4781     }
4782 
4783     const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr();
4784     const Type *toop = TypeOopPtr::make_from_klass(dest_klass_t->klass());
4785     src = _gvn.transform(new CheckCastPPNode(control(), src, toop));
4786   }
4787 
4788   arraycopy_move_allocation_here(alloc, dest, saved_jvms, saved_reexecute_sp, new_idx);
4789 
4790   if (stopped()) {
4791     return true;
4792   }
4793 
4794   ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != NULL, negative_length_guard_generated,
4795                                           // Create LoadRange and LoadKlass nodes for use during macro expansion here
4796                                           // so the compiler has a chance to eliminate them: during macro expansion,
4797                                           // we have to set their control (CastPP nodes are eliminated).
4798                                           load_object_klass(src), load_object_klass(dest),
4799                                           load_array_length(src), load_array_length(dest));
4800 
4801   ac->set_arraycopy(validated);
4802 
4803   Node* n = _gvn.transform(ac);
4804   if (n == ac) {
4805     ac->connect_outputs(this);

   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "ci/ciFlatArrayKlass.hpp"
  28 #include "ci/ciUtilities.inline.hpp"
  29 #include "classfile/vmIntrinsics.hpp"
  30 #include "compiler/compileBroker.hpp"
  31 #include "compiler/compileLog.hpp"
  32 #include "gc/shared/barrierSet.hpp"
  33 #include "jfr/support/jfrIntrinsics.hpp"
  34 #include "memory/resourceArea.hpp"
  35 #include "oops/klass.inline.hpp"
  36 #include "oops/objArrayKlass.hpp"
  37 #include "opto/addnode.hpp"
  38 #include "opto/arraycopynode.hpp"
  39 #include "opto/c2compiler.hpp"
  40 #include "opto/castnode.hpp"
  41 #include "opto/cfgnode.hpp"
  42 #include "opto/convertnode.hpp"
  43 #include "opto/countbitsnode.hpp"
  44 #include "opto/idealKit.hpp"
  45 #include "opto/library_call.hpp"
  46 #include "opto/mathexactnode.hpp"
  47 #include "opto/mulnode.hpp"

 304   case vmIntrinsics::_indexOfUL:                return inline_string_indexOf(StrIntrinsicNode::UL);
 305   case vmIntrinsics::_indexOfIL:                return inline_string_indexOfI(StrIntrinsicNode::LL);
 306   case vmIntrinsics::_indexOfIU:                return inline_string_indexOfI(StrIntrinsicNode::UU);
 307   case vmIntrinsics::_indexOfIUL:               return inline_string_indexOfI(StrIntrinsicNode::UL);
 308   case vmIntrinsics::_indexOfU_char:            return inline_string_indexOfChar(StrIntrinsicNode::U);
 309   case vmIntrinsics::_indexOfL_char:            return inline_string_indexOfChar(StrIntrinsicNode::L);
 310 
 311   case vmIntrinsics::_equalsL:                  return inline_string_equals(StrIntrinsicNode::LL);
 312   case vmIntrinsics::_equalsU:                  return inline_string_equals(StrIntrinsicNode::UU);
 313 
 314   case vmIntrinsics::_toBytesStringU:           return inline_string_toBytesU();
 315   case vmIntrinsics::_getCharsStringU:          return inline_string_getCharsU();
 316   case vmIntrinsics::_getCharStringU:           return inline_string_char_access(!is_store);
 317   case vmIntrinsics::_putCharStringU:           return inline_string_char_access( is_store);
 318 
 319   case vmIntrinsics::_compressStringC:
 320   case vmIntrinsics::_compressStringB:          return inline_string_copy( is_compress);
 321   case vmIntrinsics::_inflateStringC:
 322   case vmIntrinsics::_inflateStringB:           return inline_string_copy(!is_compress);
 323 
 324   case vmIntrinsics::_makePrivateBuffer:        return inline_unsafe_make_private_buffer();
 325   case vmIntrinsics::_finishPrivateBuffer:      return inline_unsafe_finish_private_buffer();
 326   case vmIntrinsics::_getReference:             return inline_unsafe_access(!is_store, T_OBJECT,   Relaxed, false);
 327   case vmIntrinsics::_getBoolean:               return inline_unsafe_access(!is_store, T_BOOLEAN,  Relaxed, false);
 328   case vmIntrinsics::_getByte:                  return inline_unsafe_access(!is_store, T_BYTE,     Relaxed, false);
 329   case vmIntrinsics::_getShort:                 return inline_unsafe_access(!is_store, T_SHORT,    Relaxed, false);
 330   case vmIntrinsics::_getChar:                  return inline_unsafe_access(!is_store, T_CHAR,     Relaxed, false);
 331   case vmIntrinsics::_getInt:                   return inline_unsafe_access(!is_store, T_INT,      Relaxed, false);
 332   case vmIntrinsics::_getLong:                  return inline_unsafe_access(!is_store, T_LONG,     Relaxed, false);
 333   case vmIntrinsics::_getFloat:                 return inline_unsafe_access(!is_store, T_FLOAT,    Relaxed, false);
 334   case vmIntrinsics::_getDouble:                return inline_unsafe_access(!is_store, T_DOUBLE,   Relaxed, false);
 335   case vmIntrinsics::_getValue:                 return inline_unsafe_access(!is_store, T_INLINE_TYPE,Relaxed, false);
 336 
 337   case vmIntrinsics::_putReference:             return inline_unsafe_access( is_store, T_OBJECT,   Relaxed, false);
 338   case vmIntrinsics::_putBoolean:               return inline_unsafe_access( is_store, T_BOOLEAN,  Relaxed, false);
 339   case vmIntrinsics::_putByte:                  return inline_unsafe_access( is_store, T_BYTE,     Relaxed, false);
 340   case vmIntrinsics::_putShort:                 return inline_unsafe_access( is_store, T_SHORT,    Relaxed, false);
 341   case vmIntrinsics::_putChar:                  return inline_unsafe_access( is_store, T_CHAR,     Relaxed, false);
 342   case vmIntrinsics::_putInt:                   return inline_unsafe_access( is_store, T_INT,      Relaxed, false);
 343   case vmIntrinsics::_putLong:                  return inline_unsafe_access( is_store, T_LONG,     Relaxed, false);
 344   case vmIntrinsics::_putFloat:                 return inline_unsafe_access( is_store, T_FLOAT,    Relaxed, false);
 345   case vmIntrinsics::_putDouble:                return inline_unsafe_access( is_store, T_DOUBLE,   Relaxed, false);
 346   case vmIntrinsics::_putValue:                 return inline_unsafe_access( is_store, T_INLINE_TYPE,Relaxed, false);
 347 
 348   case vmIntrinsics::_getReferenceVolatile:     return inline_unsafe_access(!is_store, T_OBJECT,   Volatile, false);
 349   case vmIntrinsics::_getBooleanVolatile:       return inline_unsafe_access(!is_store, T_BOOLEAN,  Volatile, false);
 350   case vmIntrinsics::_getByteVolatile:          return inline_unsafe_access(!is_store, T_BYTE,     Volatile, false);
 351   case vmIntrinsics::_getShortVolatile:         return inline_unsafe_access(!is_store, T_SHORT,    Volatile, false);
 352   case vmIntrinsics::_getCharVolatile:          return inline_unsafe_access(!is_store, T_CHAR,     Volatile, false);
 353   case vmIntrinsics::_getIntVolatile:           return inline_unsafe_access(!is_store, T_INT,      Volatile, false);
 354   case vmIntrinsics::_getLongVolatile:          return inline_unsafe_access(!is_store, T_LONG,     Volatile, false);
 355   case vmIntrinsics::_getFloatVolatile:         return inline_unsafe_access(!is_store, T_FLOAT,    Volatile, false);
 356   case vmIntrinsics::_getDoubleVolatile:        return inline_unsafe_access(!is_store, T_DOUBLE,   Volatile, false);
 357 
 358   case vmIntrinsics::_putReferenceVolatile:     return inline_unsafe_access( is_store, T_OBJECT,   Volatile, false);
 359   case vmIntrinsics::_putBooleanVolatile:       return inline_unsafe_access( is_store, T_BOOLEAN,  Volatile, false);
 360   case vmIntrinsics::_putByteVolatile:          return inline_unsafe_access( is_store, T_BYTE,     Volatile, false);
 361   case vmIntrinsics::_putShortVolatile:         return inline_unsafe_access( is_store, T_SHORT,    Volatile, false);
 362   case vmIntrinsics::_putCharVolatile:          return inline_unsafe_access( is_store, T_CHAR,     Volatile, false);
 363   case vmIntrinsics::_putIntVolatile:           return inline_unsafe_access( is_store, T_INT,      Volatile, false);
 364   case vmIntrinsics::_putLongVolatile:          return inline_unsafe_access( is_store, T_LONG,     Volatile, false);
 365   case vmIntrinsics::_putFloatVolatile:         return inline_unsafe_access( is_store, T_FLOAT,    Volatile, false);
 366   case vmIntrinsics::_putDoubleVolatile:        return inline_unsafe_access( is_store, T_DOUBLE,   Volatile, false);

 495   case vmIntrinsics::_equalsB:                  return inline_array_equals(StrIntrinsicNode::LL);
 496   case vmIntrinsics::_equalsC:                  return inline_array_equals(StrIntrinsicNode::UU);
 497   case vmIntrinsics::_Preconditions_checkIndex: return inline_preconditions_checkIndex(T_INT);
 498   case vmIntrinsics::_Preconditions_checkLongIndex: return inline_preconditions_checkIndex(T_LONG);
 499   case vmIntrinsics::_clone:                    return inline_native_clone(intrinsic()->is_virtual());
 500 
 501   case vmIntrinsics::_allocateUninitializedArray: return inline_unsafe_newArray(true);
 502   case vmIntrinsics::_newArray:                   return inline_unsafe_newArray(false);
 503 
 504   case vmIntrinsics::_isAssignableFrom:         return inline_native_subtype_check();
 505 
 506   case vmIntrinsics::_isInstance:
 507   case vmIntrinsics::_getModifiers:
 508   case vmIntrinsics::_isInterface:
 509   case vmIntrinsics::_isArray:
 510   case vmIntrinsics::_isPrimitive:
 511   case vmIntrinsics::_isHidden:
 512   case vmIntrinsics::_getSuperclass:
 513   case vmIntrinsics::_getClassAccessFlags:      return inline_native_Class_query(intrinsic_id());
 514 
 515   case vmIntrinsics::_asPrimaryType:
 516   case vmIntrinsics::_asValueType:              return inline_primitive_Class_conversion(intrinsic_id());
 517 
 518   case vmIntrinsics::_floatToRawIntBits:
 519   case vmIntrinsics::_floatToIntBits:
 520   case vmIntrinsics::_intBitsToFloat:
 521   case vmIntrinsics::_doubleToRawLongBits:
 522   case vmIntrinsics::_doubleToLongBits:
 523   case vmIntrinsics::_longBitsToDouble:         return inline_fp_conversions(intrinsic_id());
 524 
 525   case vmIntrinsics::_numberOfLeadingZeros_i:
 526   case vmIntrinsics::_numberOfLeadingZeros_l:
 527   case vmIntrinsics::_numberOfTrailingZeros_i:
 528   case vmIntrinsics::_numberOfTrailingZeros_l:
 529   case vmIntrinsics::_bitCount_i:
 530   case vmIntrinsics::_bitCount_l:
 531   case vmIntrinsics::_reverseBytes_i:
 532   case vmIntrinsics::_reverseBytes_l:
 533   case vmIntrinsics::_reverseBytes_s:
 534   case vmIntrinsics::_reverseBytes_c:           return inline_number_methods(intrinsic_id());
 535 
 536   case vmIntrinsics::_getCallerClass:           return inline_native_Reflection_getCallerClass();
 537 

2154   case vmIntrinsics::_numberOfLeadingZeros_l:   n = new CountLeadingZerosLNode( arg);  break;
2155   case vmIntrinsics::_numberOfTrailingZeros_i:  n = new CountTrailingZerosINode(arg);  break;
2156   case vmIntrinsics::_numberOfTrailingZeros_l:  n = new CountTrailingZerosLNode(arg);  break;
2157   case vmIntrinsics::_bitCount_i:               n = new PopCountINode(          arg);  break;
2158   case vmIntrinsics::_bitCount_l:               n = new PopCountLNode(          arg);  break;
2159   case vmIntrinsics::_reverseBytes_c:           n = new ReverseBytesUSNode(0,   arg);  break;
2160   case vmIntrinsics::_reverseBytes_s:           n = new ReverseBytesSNode( 0,   arg);  break;
2161   case vmIntrinsics::_reverseBytes_i:           n = new ReverseBytesINode( 0,   arg);  break;
2162   case vmIntrinsics::_reverseBytes_l:           n = new ReverseBytesLNode( 0,   arg);  break;
2163   default:  fatal_unexpected_iid(id);  break;
2164   }
2165   set_result(_gvn.transform(n));
2166   return true;
2167 }
2168 
2169 //----------------------------inline_unsafe_access----------------------------
2170 
2171 const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type) {
2172   // Attempt to infer a sharper value type from the offset and base type.
2173   ciKlass* sharpened_klass = NULL;
2174   bool null_free = false;
2175 
2176   // See if it is an instance field, with an object type.
2177   if (alias_type->field() != NULL) {
2178     if (alias_type->field()->type()->is_klass()) {
2179       sharpened_klass = alias_type->field()->type()->as_klass();
2180       null_free = alias_type->field()->is_null_free();
2181     }
2182   }
2183 
2184   // See if it is a narrow oop array.
2185   if (adr_type->isa_aryptr()) {
2186     if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
2187       const TypeOopPtr* elem_type = adr_type->is_aryptr()->elem()->make_oopptr();
2188       null_free = adr_type->is_aryptr()->is_null_free();
2189       if (elem_type != NULL) {
2190         sharpened_klass = elem_type->klass();
2191       }
2192     }
2193   }
2194 
2195   // The sharpened class might be unloaded if there is no class loader
2196   // contraint in place.
2197   if (sharpened_klass != NULL && sharpened_klass->is_loaded()) {
2198     const TypeOopPtr* tjp = TypeOopPtr::make_from_klass(sharpened_klass);
2199     if (null_free) {
2200       tjp = tjp->join_speculative(TypePtr::NOTNULL)->is_oopptr();
2201     }
2202 
2203 #ifndef PRODUCT
2204     if (C->print_intrinsics() || C->print_inlining()) {
2205       tty->print("  from base type:  ");  adr_type->dump(); tty->cr();
2206       tty->print("  sharpened value: ");  tjp->dump();      tty->cr();
2207     }
2208 #endif
2209     // Sharpen the value type.
2210     return tjp;
2211   }
2212   return NULL;
2213 }
2214 
2215 DecoratorSet LibraryCallKit::mo_decorator_for_access_kind(AccessKind kind) {
2216   switch (kind) {
2217       case Relaxed:
2218         return MO_UNORDERED;
2219       case Opaque:
2220         return MO_RELAXED;
2221       case Acquire:

2237   guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
2238   assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
2239 
2240   if (is_reference_type(type)) {
2241     decorators |= ON_UNKNOWN_OOP_REF;
2242   }
2243 
2244   if (unaligned) {
2245     decorators |= C2_UNALIGNED;
2246   }
2247 
2248 #ifndef PRODUCT
2249   {
2250     ResourceMark rm;
2251     // Check the signatures.
2252     ciSignature* sig = callee()->signature();
2253 #ifdef ASSERT
2254     if (!is_store) {
2255       // Object getReference(Object base, int/long offset), etc.
2256       BasicType rtype = sig->return_type()->basic_type();
2257       assert(rtype == type || (rtype == T_OBJECT && type == T_INLINE_TYPE), "getter must return the expected value");
2258       assert(sig->count() == 2 || (type == T_INLINE_TYPE && sig->count() == 3), "oop getter has 2 or 3 arguments");
2259       assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2260       assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2261     } else {
2262       // void putReference(Object base, int/long offset, Object x), etc.
2263       assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
2264       assert(sig->count() == 3 || (type == T_INLINE_TYPE && sig->count() == 4), "oop putter has 3 arguments");
2265       assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
2266       assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
2267       BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
2268       assert(vtype == type || (type == T_INLINE_TYPE && vtype == T_OBJECT), "putter must accept the expected value");
2269     }
2270 #endif // ASSERT
2271  }
2272 #endif //PRODUCT
2273 
2274   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
2275 
2276   Node* receiver = argument(0);  // type: oop
2277 
2278   // Build address expression.
2279   Node* heap_base_oop = top();
2280 
2281   // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2282   Node* base = argument(1);  // type: oop
2283   // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2284   Node* offset = argument(2);  // type: long
2285   // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2286   // to be plain byte offsets, which are also the same as those accepted
2287   // by oopDesc::field_addr.
2288   assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2289          "fieldOffset must be byte-scaled");
2290 
2291   ciInlineKlass* inline_klass = NULL;
2292   if (type == T_INLINE_TYPE) {
2293     const TypeInstPtr* cls = _gvn.type(argument(4))->isa_instptr();
2294     if (cls == NULL || cls->const_oop() == NULL) {
2295       return false;
2296     }
2297     ciType* mirror_type = cls->const_oop()->as_instance()->java_mirror_type();
2298     if (!mirror_type->is_inlinetype()) {
2299       return false;
2300     }
2301     inline_klass = mirror_type->as_inline_klass();
2302   }
2303 
2304   if (base->is_InlineTypeBase()) {
2305     InlineTypeBaseNode* vt = base->as_InlineTypeBase();
2306     if (is_store) {
2307       if (!vt->is_allocated(&_gvn) || !_gvn.type(vt)->isa_inlinetype() || !_gvn.type(vt)->is_inlinetype()->larval()) {
2308         return false;
2309       }
2310       base = vt->get_oop();
2311     } else {
2312       if (offset->is_Con()) {
2313         long off = find_long_con(offset, 0);
2314         ciInlineKlass* vk = vt->type()->inline_klass();
2315         if ((long)(int)off != off || !vk->contains_field_offset(off)) {
2316           return false;
2317         }
2318 
2319         ciField* field = vk->get_non_flattened_field_by_offset(off);
2320         if (field != NULL) {
2321           BasicType bt = field->layout_type();
2322           if (bt == T_ARRAY || bt == T_NARROWOOP || (bt == T_INLINE_TYPE && !field->is_flattened())) {
2323             bt = T_OBJECT;
2324           }
2325           if (bt == type && (bt != T_INLINE_TYPE || field->type() == inline_klass)) {
2326             set_result(vt->field_value_by_offset(off, false));
2327             return true;
2328           }
2329         }
2330       }
2331       if (vt->is_InlineType()) {
2332         // Re-execute the unsafe access if allocation triggers deoptimization.
2333         PreserveReexecuteState preexecs(this);
2334         jvms()->set_should_reexecute(true);
2335         vt = vt->buffer(this);
2336       }
2337       base = vt->get_oop();
2338     }
2339   }
2340 
2341   // 32-bit machines ignore the high half!
2342   offset = ConvL2X(offset);
2343 
2344   // Save state and restore on bailout
2345   uint old_sp = sp();
2346   SafePointNode* old_map = clone_map();
2347 
2348   Node* adr = make_unsafe_address(base, offset, type, kind == Relaxed);
2349 
2350   if (_gvn.type(base)->isa_ptr() == TypePtr::NULL_PTR) {
2351     if (type != T_OBJECT && (inline_klass == NULL || !inline_klass->has_object_fields())) {
2352       decorators |= IN_NATIVE; // off-heap primitive access
2353     } else {
2354       set_map(old_map);
2355       set_sp(old_sp);
2356       return false; // off-heap oop accesses are not supported
2357     }
2358   } else {
2359     heap_base_oop = base; // on-heap or mixed access
2360   }
2361 
2362   // Can base be NULL? Otherwise, always on-heap access.
2363   bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(base));
2364 
2365   if (!can_access_non_heap) {
2366     decorators |= IN_HEAP;
2367   }
2368 
2369   Node* val = is_store ? argument(4 + (type == T_INLINE_TYPE ? 1 : 0)) : NULL;
2370 
2371   const TypePtr* adr_type = _gvn.type(adr)->isa_ptr();
2372   if (adr_type == TypePtr::NULL_PTR) {
2373     set_map(old_map);
2374     set_sp(old_sp);
2375     return false; // off-heap access with zero address
2376   }
2377 
2378   // Try to categorize the address.
2379   Compile::AliasType* alias_type = C->alias_type(adr_type);
2380   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2381 
2382   if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2383       alias_type->adr_type() == TypeAryPtr::RANGE) {
2384     set_map(old_map);
2385     set_sp(old_sp);
2386     return false; // not supported
2387   }
2388 
2389   bool mismatched = false;
2390   BasicType bt = T_ILLEGAL;
2391   ciField* field = NULL;
2392   if (adr_type->isa_instptr()) {
2393     const TypeInstPtr* instptr = adr_type->is_instptr();
2394     ciInstanceKlass* k = instptr->klass()->as_instance_klass();
2395     int off = instptr->offset();
2396     if (instptr->const_oop() != NULL &&
2397         instptr->klass() == ciEnv::current()->Class_klass() &&
2398         instptr->offset() >= (instptr->klass()->as_instance_klass()->size_helper() * wordSize)) {
2399       k = instptr->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
2400       field = k->get_field_by_offset(off, true);
2401     } else {
2402       field = k->get_non_flattened_field_by_offset(off);
2403     }
2404     if (field != NULL) {
2405       bt = field->layout_type();
2406     }
2407     assert(bt == alias_type->basic_type() || bt == T_INLINE_TYPE, "should match");
2408     if (field != NULL && bt == T_INLINE_TYPE && !field->is_flattened()) {
2409       bt = T_OBJECT;
2410     }
2411   } else {
2412     bt = alias_type->basic_type();
2413   }
2414 
2415   if (bt != T_ILLEGAL) {
2416     assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
2417     if (bt == T_BYTE && adr_type->isa_aryptr()) {
2418       // Alias type doesn't differentiate between byte[] and boolean[]).
2419       // Use address type to get the element type.
2420       bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2421     }
2422     if (bt == T_ARRAY || bt == T_NARROWOOP) {
2423       // accessing an array field with getReference is not a mismatch
2424       bt = T_OBJECT;
2425     }
2426     if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2427       // Don't intrinsify mismatched object accesses
2428       set_map(old_map);
2429       set_sp(old_sp);
2430       return false;
2431     }
2432     mismatched = (bt != type);
2433   } else if (alias_type->adr_type()->isa_oopptr()) {
2434     mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
2435   }
2436 
2437   if (type == T_INLINE_TYPE) {
2438     if (adr_type->isa_instptr()) {
2439       if (field == NULL || field->type() != inline_klass) {
2440         mismatched = true;
2441       }
2442     } else if (adr_type->isa_aryptr()) {
2443       const Type* elem = adr_type->is_aryptr()->elem();
2444       if (!elem->isa_inlinetype()) {
2445         mismatched = true;
2446       } else if (elem->inline_klass() != inline_klass) {
2447         mismatched = true;
2448       }
2449     } else {
2450       mismatched = true;
2451     }
2452     if (is_store) {
2453       const Type* val_t = _gvn.type(val);
2454       if (!val_t->isa_inlinetype() || val_t->inline_klass() != inline_klass) {
2455         set_map(old_map);
2456         set_sp(old_sp);
2457         return false;
2458       }
2459     }
2460   }
2461 
2462   old_map->destruct(&_gvn);
2463   assert(!mismatched || type == T_INLINE_TYPE || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2464 
2465   if (mismatched) {
2466     decorators |= C2_MISMATCHED;
2467   }
2468 
2469   // First guess at the value type.
2470   const Type *value_type = Type::get_const_basic_type(type);
2471 
2472   // Figure out the memory ordering.
2473   decorators |= mo_decorator_for_access_kind(kind);
2474 
2475   if (!is_store) {
2476     if (type == T_OBJECT) {
2477       const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2478       if (tjp != NULL) {
2479         value_type = tjp;
2480       }
2481     } else if (type == T_INLINE_TYPE) {
2482       value_type = NULL;
2483     }
2484   }
2485 
2486   receiver = null_check(receiver);
2487   if (stopped()) {
2488     return true;
2489   }
2490   // Heap pointers get a null-check from the interpreter,
2491   // as a courtesy.  However, this is not guaranteed by Unsafe,
2492   // and it is not possible to fully distinguish unintended nulls
2493   // from intended ones in this API.
2494 
2495   if (!is_store) {
2496     Node* p = NULL;
2497     // Try to constant fold a load from a constant field
2498 
2499     if (heap_base_oop != top() && field != NULL && field->is_constant() && !field->is_flattened() && !mismatched) {
2500       // final or stable field
2501       p = make_constant_from_field(field, heap_base_oop);
2502     }
2503 
2504     if (p == NULL) { // Could not constant fold the load
2505       if (type == T_INLINE_TYPE) {
2506         if (adr_type->isa_instptr() && !mismatched) {
2507           ciInstanceKlass* holder = adr_type->is_instptr()->klass()->as_instance_klass();
2508           int offset = adr_type->is_instptr()->offset();
2509           p = InlineTypeNode::make_from_flattened(this, inline_klass, base, base, holder, offset, decorators);
2510         } else {
2511           p = InlineTypeNode::make_from_flattened(this, inline_klass, base, adr, NULL, 0, decorators);
2512         }
2513       } else {
2514         p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);
2515         const TypeOopPtr* ptr = value_type->make_oopptr();
2516         if (ptr != NULL && ptr->is_inlinetypeptr()) {
2517           // Load a non-flattened inline type from memory
2518           p = InlineTypeNode::make_from_oop(this, p, ptr->inline_klass(), !ptr->maybe_null());
2519         }
2520       }
2521       // Normalize the value returned by getBoolean in the following cases
2522       if (type == T_BOOLEAN &&
2523           (mismatched ||
2524            heap_base_oop == top() ||                  // - heap_base_oop is NULL or
2525            (can_access_non_heap && field == NULL))    // - heap_base_oop is potentially NULL
2526                                                       //   and the unsafe access is made to large offset
2527                                                       //   (i.e., larger than the maximum offset necessary for any
2528                                                       //   field access)
2529             ) {
2530           IdealKit ideal = IdealKit(this);
2531 #define __ ideal.
2532           IdealVariable normalized_result(ideal);
2533           __ declarations_done();
2534           __ set(normalized_result, p);
2535           __ if_then(p, BoolTest::ne, ideal.ConI(0));
2536           __ set(normalized_result, ideal.ConI(1));
2537           ideal.end_if();
2538           final_sync(ideal);
2539           p = __ value(normalized_result);
2540 #undef __
2541       }
2542     }
2543     if (type == T_ADDRESS) {
2544       p = gvn().transform(new CastP2XNode(NULL, p));
2545       p = ConvX2UL(p);
2546     }
2547     // The load node has the control of the preceding MemBarCPUOrder.  All
2548     // following nodes will have the control of the MemBarCPUOrder inserted at
2549     // the end of this method.  So, pushing the load onto the stack at a later
2550     // point is fine.
2551     set_result(p);
2552   } else {
2553     if (bt == T_ADDRESS) {
2554       // Repackage the long as a pointer.
2555       val = ConvL2X(val);
2556       val = gvn().transform(new CastX2PNode(val));
2557     }
2558     if (type == T_INLINE_TYPE) {
2559       if (adr_type->isa_instptr() && !mismatched) {
2560         ciInstanceKlass* holder = adr_type->is_instptr()->klass()->as_instance_klass();
2561         int offset = adr_type->is_instptr()->offset();
2562         val->as_InlineTypeBase()->store_flattened(this, base, base, holder, offset, decorators);
2563       } else {
2564         val->as_InlineTypeBase()->store_flattened(this, base, adr, NULL, 0, decorators);
2565       }
2566     } else {
2567       access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators);
2568     }
2569   }
2570 
2571   if (argument(1)->is_InlineType() && is_store) {
2572     Node* value = InlineTypeNode::make_from_oop(this, base, _gvn.type(base)->inline_klass());
2573     value = value->as_InlineType()->make_larval(this, false);
2574     replace_in_map(argument(1), value);
2575   }
2576 
2577   return true;
2578 }
2579 
2580 bool LibraryCallKit::inline_unsafe_make_private_buffer() {
2581   Node* receiver = argument(0);
2582   Node* value = argument(1);
2583   if (!value->is_InlineType()) {
2584     return false;
2585   }
2586 
2587   receiver = null_check(receiver);
2588   if (stopped()) {
2589     return true;
2590   }
2591 
2592   set_result(value->as_InlineType()->make_larval(this, true));
2593   return true;
2594 }
2595 
2596 bool LibraryCallKit::inline_unsafe_finish_private_buffer() {
2597   Node* receiver = argument(0);
2598   Node* buffer = argument(1);
2599   if (!buffer->is_InlineType()) {
2600     return false;
2601   }
2602   InlineTypeNode* vt = buffer->as_InlineType();
2603   if (!vt->is_allocated(&_gvn) || !_gvn.type(vt)->is_inlinetype()->larval()) {
2604     return false;
2605   }
2606 
2607   receiver = null_check(receiver);
2608   if (stopped()) {
2609     return true;
2610   }
2611 
2612   set_result(vt->finish_larval(this));
2613   return true;
2614 }
2615 
2616 //----------------------------inline_unsafe_load_store----------------------------
2617 // This method serves a couple of different customers (depending on LoadStoreKind):
2618 //
2619 // LS_cmp_swap:
2620 //
2621 //   boolean compareAndSetReference(Object o, long offset, Object expected, Object x);
2622 //   boolean compareAndSetInt(   Object o, long offset, int    expected, int    x);
2623 //   boolean compareAndSetLong(  Object o, long offset, long   expected, long   x);
2624 //
2625 // LS_cmp_swap_weak:
2626 //
2627 //   boolean weakCompareAndSetReference(       Object o, long offset, Object expected, Object x);
2628 //   boolean weakCompareAndSetReferencePlain(  Object o, long offset, Object expected, Object x);
2629 //   boolean weakCompareAndSetReferenceAcquire(Object o, long offset, Object expected, Object x);
2630 //   boolean weakCompareAndSetReferenceRelease(Object o, long offset, Object expected, Object x);
2631 //
2632 //   boolean weakCompareAndSetInt(          Object o, long offset, int    expected, int    x);

2801     }
2802     case LS_cmp_swap:
2803     case LS_cmp_swap_weak:
2804     case LS_get_add:
2805       break;
2806     default:
2807       ShouldNotReachHere();
2808   }
2809 
2810   // Null check receiver.
2811   receiver = null_check(receiver);
2812   if (stopped()) {
2813     return true;
2814   }
2815 
2816   int alias_idx = C->get_alias_index(adr_type);
2817 
2818   if (is_reference_type(type)) {
2819     decorators |= IN_HEAP | ON_UNKNOWN_OOP_REF;
2820 
2821     if (oldval != NULL && oldval->is_InlineType()) {
2822       // Re-execute the unsafe access if allocation triggers deoptimization.
2823       PreserveReexecuteState preexecs(this);
2824       jvms()->set_should_reexecute(true);
2825       oldval = oldval->as_InlineType()->buffer(this)->get_oop();
2826     }
2827     if (newval != NULL && newval->is_InlineType()) {
2828       // Re-execute the unsafe access if allocation triggers deoptimization.
2829       PreserveReexecuteState preexecs(this);
2830       jvms()->set_should_reexecute(true);
2831       newval = newval->as_InlineType()->buffer(this)->get_oop();
2832     }
2833 
2834     // Transformation of a value which could be NULL pointer (CastPP #NULL)
2835     // could be delayed during Parse (for example, in adjust_map_after_if()).
2836     // Execute transformation here to avoid barrier generation in such case.
2837     if (_gvn.type(newval) == TypePtr::NULL_PTR)
2838       newval = _gvn.makecon(TypePtr::NULL_PTR);
2839 
2840     if (oldval != NULL && _gvn.type(oldval) == TypePtr::NULL_PTR) {
2841       // Refine the value to a null constant, when it is known to be null
2842       oldval = _gvn.makecon(TypePtr::NULL_PTR);
2843     }
2844   }
2845 
2846   Node* result = NULL;
2847   switch (kind) {
2848     case LS_cmp_exchange: {
2849       result = access_atomic_cmpxchg_val_at(base, adr, adr_type, alias_idx,
2850                                             oldval, newval, value_type, type, decorators);
2851       break;
2852     }
2853     case LS_cmp_swap_weak:

2972   Node* cls = null_check(argument(1));
2973   if (stopped())  return true;
2974 
2975   Node* kls = load_klass_from_mirror(cls, false, NULL, 0);
2976   kls = null_check(kls);
2977   if (stopped())  return true;  // argument was like int.class
2978 
2979   Node* test = NULL;
2980   if (LibraryCallKit::klass_needs_init_guard(kls)) {
2981     // Note:  The argument might still be an illegal value like
2982     // Serializable.class or Object[].class.   The runtime will handle it.
2983     // But we must make an explicit check for initialization.
2984     Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset()));
2985     // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
2986     // can generate code to load it as unsigned byte.
2987     Node* inst = make_load(NULL, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered);
2988     Node* bits = intcon(InstanceKlass::fully_initialized);
2989     test = _gvn.transform(new SubINode(inst, bits));
2990     // The 'test' is non-zero if we need to take a slow path.
2991   }
2992   Node* obj = NULL;
2993   ciKlass* klass = _gvn.type(kls)->is_klassptr()->klass();
2994   if (klass->is_inlinetype()) {
2995     obj = InlineTypeNode::make_default(_gvn, klass->as_inline_klass());
2996   } else {
2997     obj = new_instance(kls, test);
2998   }
2999   set_result(obj);
3000   return true;
3001 }
3002 
3003 //------------------------inline_native_time_funcs--------------
3004 // inline code for System.currentTimeMillis() and System.nanoTime()
3005 // these have the same type and signature
3006 bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
3007   const TypeFunc* tf = OptoRuntime::void_long_Type();
3008   const TypePtr* no_memory_effects = NULL;
3009   Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
3010   Node* value = _gvn.transform(new ProjNode(time, TypeFunc::Parms+0));
3011 #ifdef ASSERT
3012   Node* value_top = _gvn.transform(new ProjNode(time, TypeFunc::Parms+1));
3013   assert(value_top == top(), "second value must be top");
3014 #endif
3015   set_result(value);
3016   return true;
3017 }
3018 

3126   set_control(jobj_is_not_null);
3127   Node* res = access_load(jobj, TypeInstPtr::NOTNULL, T_OBJECT,
3128                           IN_NATIVE | C2_CONTROL_DEPENDENT_LOAD);
3129   result_rgn->init_req(_normal_path, control());
3130   result_val->init_req(_normal_path, res);
3131 
3132   set_result(result_rgn, result_val);
3133 
3134   return true;
3135 }
3136 
3137 #endif // JFR_HAVE_INTRINSICS
3138 
3139 //------------------------inline_native_currentThread------------------
3140 bool LibraryCallKit::inline_native_currentThread() {
3141   Node* junk = NULL;
3142   set_result(generate_current_thread(junk));
3143   return true;
3144 }
3145 









3146 //-----------------------load_klass_from_mirror_common-------------------------
3147 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
3148 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
3149 // and branch to the given path on the region.
3150 // If never_see_null, take an uncommon trap on null, so we can optimistically
3151 // compile for the non-null case.
3152 // If the region is NULL, force never_see_null = true.
3153 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
3154                                                     bool never_see_null,
3155                                                     RegionNode* region,
3156                                                     int null_path,
3157                                                     int offset) {
3158   if (region == NULL)  never_see_null = true;
3159   Node* p = basic_plus_adr(mirror, offset);
3160   const TypeKlassPtr*  kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
3161   Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
3162   Node* null_ctl = top();
3163   kls = null_check_oop(kls, &null_ctl, never_see_null);
3164   if (region != NULL) {
3165     // Set region->in(null_path) if the mirror is a primitive (e.g, int.class).

3168     assert(null_ctl == top(), "no loose ends");
3169   }
3170   return kls;
3171 }
3172 
3173 //--------------------(inline_native_Class_query helpers)---------------------
3174 // Use this for JVM_ACC_INTERFACE, JVM_ACC_IS_CLONEABLE_FAST, JVM_ACC_HAS_FINALIZER.
3175 // Fall through if (mods & mask) == bits, take the guard otherwise.
3176 Node* LibraryCallKit::generate_access_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
3177   // Branch around if the given klass has the given modifier bit set.
3178   // Like generate_guard, adds a new path onto the region.
3179   Node* modp = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3180   Node* mods = make_load(NULL, modp, TypeInt::INT, T_INT, MemNode::unordered);
3181   Node* mask = intcon(modifier_mask);
3182   Node* bits = intcon(modifier_bits);
3183   Node* mbit = _gvn.transform(new AndINode(mods, mask));
3184   Node* cmp  = _gvn.transform(new CmpINode(mbit, bits));
3185   Node* bol  = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
3186   return generate_fair_guard(bol, region);
3187 }
3188 
3189 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
3190   return generate_access_flags_guard(kls, JVM_ACC_INTERFACE, 0, region);
3191 }
3192 Node* LibraryCallKit::generate_hidden_class_guard(Node* kls, RegionNode* region) {
3193   return generate_access_flags_guard(kls, JVM_ACC_IS_HIDDEN_CLASS, 0, region);
3194 }
3195 
3196 //-------------------------inline_native_Class_query-------------------
3197 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
3198   const Type* return_type = TypeInt::BOOL;
3199   Node* prim_return_value = top();  // what happens if it's a primitive class?
3200   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3201   bool expect_prim = false;     // most of these guys expect to work on refs
3202 
3203   enum { _normal_path = 1, _prim_path = 2, PATH_LIMIT };
3204 
3205   Node* mirror = argument(0);
3206   Node* obj    = top();
3207 
3208   switch (id) {

3362 
3363   case vmIntrinsics::_getClassAccessFlags:
3364     p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3365     query_value = make_load(NULL, p, TypeInt::INT, T_INT, MemNode::unordered);
3366     break;
3367 
3368   default:
3369     fatal_unexpected_iid(id);
3370     break;
3371   }
3372 
3373   // Fall-through is the normal case of a query to a real class.
3374   phi->init_req(1, query_value);
3375   region->init_req(1, control());
3376 
3377   C->set_has_split_ifs(true); // Has chance for split-if optimization
3378   set_result(region, phi);
3379   return true;
3380 }
3381 
3382 //-------------------------inline_primitive_Class_conversion-------------------
3383 // public Class<T> java.lang.Class.asPrimaryType();
3384 // public Class<T> java.lang.Class.asValueType()
3385 bool LibraryCallKit::inline_primitive_Class_conversion(vmIntrinsics::ID id) {
3386   Node* mirror = argument(0); // Receiver Class
3387   const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
3388   if (mirror_con == NULL) {
3389     return false;
3390   }
3391 
3392   bool is_val_mirror = true;
3393   ciType* tm = mirror_con->java_mirror_type(&is_val_mirror);
3394   if (tm != NULL) {
3395     Node* result = mirror;
3396     if (id == vmIntrinsics::_asPrimaryType && is_val_mirror) {
3397       result = _gvn.makecon(TypeInstPtr::make(tm->as_inline_klass()->ref_mirror()));
3398     } else if (id == vmIntrinsics::_asValueType) {
3399       if (!tm->is_inlinetype()) {
3400         return false; // Throw UnsupportedOperationException
3401       } else if (!is_val_mirror) {
3402         result = _gvn.makecon(TypeInstPtr::make(tm->as_inline_klass()->val_mirror()));
3403       }
3404     }
3405     set_result(result);
3406     return true;
3407   }
3408   return false;
3409 }
3410 
3411 //-------------------------inline_Class_cast-------------------
3412 bool LibraryCallKit::inline_Class_cast() {
3413   Node* mirror = argument(0); // Class
3414   Node* obj    = argument(1);
3415   const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
3416   if (mirror_con == NULL) {
3417     return false;  // dead path (mirror->is_top()).
3418   }
3419   if (obj == NULL || obj->is_top()) {
3420     return false;  // dead path
3421   }
3422   ciKlass* obj_klass = NULL;
3423   const Type* obj_t = _gvn.type(obj);
3424   if (obj->is_InlineType()) {
3425     obj_klass = obj_t->inline_klass();
3426   } else if (obj_t->isa_oopptr()) {
3427     obj_klass = obj_t->is_oopptr()->klass();
3428   }
3429 
3430   // First, see if Class.cast() can be folded statically.
3431   // java_mirror_type() returns non-null for compile-time Class constants.
3432   bool requires_null_check = false;
3433   ciType* tm = mirror_con->java_mirror_type(&requires_null_check);
3434   // Check for null if casting to QMyValue
3435   requires_null_check &= !obj->is_InlineType();
3436   if (tm != NULL && tm->is_klass() && obj_klass != NULL) {
3437     if (!obj_klass->is_loaded()) {
3438       // Don't use intrinsic when class is not loaded.
3439       return false;
3440     } else {
3441       int static_res = C->static_subtype_check(tm->as_klass(), obj_klass);
3442       if (static_res == Compile::SSC_always_true) {
3443         // isInstance() is true - fold the code.
3444         if (requires_null_check) {
3445           obj = null_check(obj);
3446         }
3447         set_result(obj);
3448         return true;
3449       } else if (static_res == Compile::SSC_always_false) {
3450         // Don't use intrinsic, have to throw ClassCastException.
3451         // If the reference is null, the non-intrinsic bytecode will
3452         // be optimized appropriately.
3453         return false;
3454       }
3455     }
3456   }
3457 
3458   // Bailout intrinsic and do normal inlining if exception path is frequent.
3459   if (too_many_traps(Deoptimization::Reason_intrinsic)) {
3460     return false;
3461   }
3462 
3463   // Generate dynamic checks.
3464   // Class.cast() is java implementation of _checkcast bytecode.
3465   // Do checkcast (Parse::do_checkcast()) optimizations here.
3466 
3467   if (requires_null_check) {
3468     obj = null_check(obj);
3469   }
3470   mirror = null_check(mirror);
3471   // If mirror is dead, only null-path is taken.
3472   if (stopped()) {
3473     return true;
3474   }
3475 
3476   // Not-subtype or the mirror's klass ptr is NULL (in case it is a primitive).
3477   enum { _bad_type_path = 1, _prim_path = 2, _npe_path = 3, PATH_LIMIT };
3478   RegionNode* region = new RegionNode(PATH_LIMIT);
3479   record_for_igvn(region);
3480 
3481   // Now load the mirror's klass metaobject, and null-check it.
3482   // If kls is null, we have a primitive mirror and
3483   // nothing is an instance of a primitive type.
3484   Node* kls = load_klass_from_mirror(mirror, false, region, _prim_path);
3485 
3486   Node* res = top();
3487   if (!stopped()) {
3488     if (EnableValhalla && !obj->is_InlineType() && !requires_null_check) {
3489       // Check if we are casting to QMyValue
3490       Node* ctrl_val_mirror = generate_fair_guard(is_val_mirror(mirror), NULL);
3491       if (ctrl_val_mirror != NULL) {
3492         RegionNode* r = new RegionNode(3);
3493         record_for_igvn(r);
3494         r->init_req(1, control());
3495 
3496         // Casting to QMyValue, check for null
3497         set_control(ctrl_val_mirror);
3498         { // PreserveJVMState because null check replaces obj in map
3499           PreserveJVMState pjvms(this);
3500           Node* null_ctr = top();
3501           null_check_oop(obj, &null_ctr);
3502           region->init_req(_npe_path, null_ctr);
3503           r->init_req(2, control());
3504         }
3505         set_control(_gvn.transform(r));
3506       }
3507     }
3508 
3509     Node* bad_type_ctrl = top();
3510     // Do checkcast optimizations.
3511     res = gen_checkcast(obj, kls, &bad_type_ctrl);
3512     region->init_req(_bad_type_path, bad_type_ctrl);
3513   }
3514   if (region->in(_prim_path) != top() ||
3515       region->in(_bad_type_path) != top() ||
3516       region->in(_npe_path) != top()) {
3517     // Let Interpreter throw ClassCastException.
3518     PreserveJVMState pjvms(this);
3519     set_control(_gvn.transform(region));
3520     uncommon_trap(Deoptimization::Reason_intrinsic,
3521                   Deoptimization::Action_maybe_recompile);
3522   }
3523   if (!stopped()) {
3524     set_result(res);
3525   }
3526   return true;
3527 }
3528 
3529 
3530 //--------------------------inline_native_subtype_check------------------------
3531 // This intrinsic takes the JNI calls out of the heart of
3532 // UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
3533 bool LibraryCallKit::inline_native_subtype_check() {
3534   // Pull both arguments off the stack.
3535   Node* args[2];                // two java.lang.Class mirrors: superc, subc
3536   args[0] = argument(0);
3537   args[1] = argument(1);
3538   Node* klasses[2];             // corresponding Klasses: superk, subk
3539   klasses[0] = klasses[1] = top();
3540 
3541   enum {
3542     // A full decision tree on {superc is prim, subc is prim}:
3543     _prim_0_path = 1,           // {P,N} => false
3544                                 // {P,P} & superc!=subc => false
3545     _prim_same_path,            // {P,P} & superc==subc => true
3546     _prim_1_path,               // {N,P} => false
3547     _ref_subtype_path,          // {N,N} & subtype check wins => true
3548     _both_ref_path,             // {N,N} & subtype check loses => false
3549     PATH_LIMIT
3550   };
3551 
3552   RegionNode* region = new RegionNode(PATH_LIMIT);
3553   RegionNode* prim_region = new RegionNode(2);
3554   Node*       phi    = new PhiNode(region, TypeInt::BOOL);
3555   record_for_igvn(region);
3556   record_for_igvn(prim_region);
3557 
3558   const TypePtr* adr_type = TypeRawPtr::BOTTOM;   // memory type of loads
3559   const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
3560   int class_klass_offset = java_lang_Class::klass_offset();
3561 
3562   // First null-check both mirrors and load each mirror's klass metaobject.
3563   int which_arg;
3564   for (which_arg = 0; which_arg <= 1; which_arg++) {
3565     Node* arg = args[which_arg];
3566     arg = null_check(arg);
3567     if (stopped())  break;
3568     args[which_arg] = arg;
3569 
3570     Node* p = basic_plus_adr(arg, class_klass_offset);
3571     Node* kls = LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, adr_type, kls_type);
3572     klasses[which_arg] = _gvn.transform(kls);
3573   }
3574 
3575   // Having loaded both klasses, test each for null.
3576   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3577   for (which_arg = 0; which_arg <= 1; which_arg++) {
3578     Node* kls = klasses[which_arg];
3579     Node* null_ctl = top();
3580     kls = null_check_oop(kls, &null_ctl, never_see_null);
3581     if (which_arg == 0) {
3582       prim_region->init_req(1, null_ctl);
3583     } else {
3584       region->init_req(_prim_1_path, null_ctl);
3585     }
3586     if (stopped())  break;
3587     klasses[which_arg] = kls;
3588   }
3589 
3590   if (!stopped()) {
3591     // now we have two reference types, in klasses[0..1]
3592     Node* subk   = klasses[1];  // the argument to isAssignableFrom
3593     Node* superk = klasses[0];  // the receiver
3594     region->set_req(_both_ref_path, gen_subtype_check(subk, superk));
3595     // If superc is an inline mirror, we also need to check if superc == subc because LMyValue
3596     // is not a subtype of QMyValue but due to subk == superk the subtype check will pass.
3597     generate_fair_guard(is_val_mirror(args[0]), prim_region);
3598     // now we have a successful reference subtype check
3599     region->set_req(_ref_subtype_path, control());
3600   }
3601 
3602   // If both operands are primitive (both klasses null), then
3603   // we must return true when they are identical primitives.
3604   // It is convenient to test this after the first null klass check.
3605   // This path is also used if superc is a value mirror.
3606   set_control(_gvn.transform(prim_region));
3607   if (!stopped()) {
3608     // Since superc is primitive, make a guard for the superc==subc case.
3609     Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1]));
3610     Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq));
3611     generate_fair_guard(bol_eq, region);
3612     if (region->req() == PATH_LIMIT+1) {
3613       // A guard was added.  If the added guard is taken, superc==subc.
3614       region->swap_edges(PATH_LIMIT, _prim_same_path);
3615       region->del_req(PATH_LIMIT);
3616     }
3617     region->set_req(_prim_0_path, control()); // Not equal after all.
3618   }
3619 
3620   // these are the only paths that produce 'true':
3621   phi->set_req(_prim_same_path,   intcon(1));
3622   phi->set_req(_ref_subtype_path, intcon(1));
3623 
3624   // pull together the cases:
3625   assert(region->req() == PATH_LIMIT, "sane region");
3626   for (uint i = 1; i < region->req(); i++) {
3627     Node* ctl = region->in(i);
3628     if (ctl == NULL || ctl == top()) {
3629       region->set_req(i, top());
3630       phi   ->set_req(i, top());
3631     } else if (phi->in(i) == NULL) {
3632       phi->set_req(i, intcon(0)); // all other paths produce 'false'
3633     }
3634   }
3635 
3636   set_control(_gvn.transform(region));
3637   set_result(_gvn.transform(phi));
3638   return true;
3639 }
3640 
3641 //---------------------generate_array_guard_common------------------------
3642 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region, ArrayKind kind) {

3643 
3644   if (stopped()) {
3645     return NULL;
3646   }
3647 









3648   // Like generate_guard, adds a new path onto the region.
3649   jint  layout_con = 0;
3650   Node* layout_val = get_layout_helper(kls, layout_con);
3651   if (layout_val == NULL) {
3652     bool query = 0;
3653     switch(kind) {
3654       case ObjectArray:    query = Klass::layout_helper_is_objArray(layout_con); break;
3655       case NonObjectArray: query = !Klass::layout_helper_is_objArray(layout_con); break;
3656       case TypeArray:      query = Klass::layout_helper_is_typeArray(layout_con); break;
3657       case FlatArray:      query = Klass::layout_helper_is_flatArray(layout_con); break;
3658       case NonFlatArray:   query = !Klass::layout_helper_is_flatArray(layout_con); break;
3659       case AnyArray:       query = Klass::layout_helper_is_array(layout_con); break;
3660       case NonArray:       query = !Klass::layout_helper_is_array(layout_con); break;
3661       default:
3662         ShouldNotReachHere();
3663     }
3664     if (!query) {
3665       return NULL;                       // never a branch
3666     } else {                             // always a branch
3667       Node* always_branch = control();
3668       if (region != NULL)
3669         region->add_req(always_branch);
3670       set_control(top());
3671       return always_branch;
3672     }
3673   }
3674   unsigned int value = 0;
3675   BoolTest::mask btest = BoolTest::illegal;
3676   switch(kind) {
3677     case ObjectArray:
3678     case NonObjectArray: {
3679       value = Klass::_lh_array_tag_obj_value;
3680       layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
3681       btest = (kind == ObjectArray) ? BoolTest::eq : BoolTest::ne;
3682       break;
3683     }
3684     case TypeArray: {
3685       value = Klass::_lh_array_tag_type_value;
3686       layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
3687       btest = BoolTest::eq;
3688       break;
3689     }
3690     case FlatArray:
3691     case NonFlatArray: {
3692       value = 0;
3693       layout_val = _gvn.transform(new AndINode(layout_val, intcon(Klass::_lh_array_tag_vt_value_bit_inplace)));
3694       btest = (kind == FlatArray) ? BoolTest::ne : BoolTest::eq;
3695       break;
3696     }
3697     case AnyArray:    value = Klass::_lh_neutral_value; btest = BoolTest::lt; break;
3698     case NonArray:    value = Klass::_lh_neutral_value; btest = BoolTest::gt; break;
3699     default:
3700       ShouldNotReachHere();
3701   }
3702   // Now test the correct condition.
3703   jint nval = (jint)value;



3704   Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval)));



3705   Node* bol = _gvn.transform(new BoolNode(cmp, btest));
3706   return generate_fair_guard(bol, region);
3707 }
3708 
3709 
3710 //-----------------------inline_native_newArray--------------------------
3711 // private static native Object java.lang.reflect.Array.newArray(Class<?> componentType, int length);
3712 // private        native Object Unsafe.allocateUninitializedArray0(Class<?> cls, int size);
3713 bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) {
3714   Node* mirror;
3715   Node* count_val;
3716   if (uninitialized) {
3717     mirror    = argument(1);
3718     count_val = argument(2);
3719   } else {
3720     mirror    = argument(0);
3721     count_val = argument(1);
3722   }
3723 
3724   mirror = null_check(mirror);
3725   // If mirror or obj is dead, only null-path is taken.
3726   if (stopped())  return true;
3727 
3728   enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
3729   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
3730   PhiNode*    result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
3731   PhiNode*    result_io  = new PhiNode(result_reg, Type::ABIO);

3836   // the bytecode that invokes Arrays.copyOf if deoptimization happens.
3837   { PreserveReexecuteState preexecs(this);
3838     jvms()->set_should_reexecute(true);
3839 
3840     array_type_mirror = null_check(array_type_mirror);
3841     original          = null_check(original);
3842 
3843     // Check if a null path was taken unconditionally.
3844     if (stopped())  return true;
3845 
3846     Node* orig_length = load_array_length(original);
3847 
3848     Node* klass_node = load_klass_from_mirror(array_type_mirror, false, NULL, 0);
3849     klass_node = null_check(klass_node);
3850 
3851     RegionNode* bailout = new RegionNode(1);
3852     record_for_igvn(bailout);
3853 
3854     // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
3855     // Bail out if that is so.
3856     // Inline type array may have object field that would require a
3857     // write barrier. Conservatively, go to slow path.
3858     // TODO 8251971: Optimize for the case when flat src/dst are later found
3859     // to not contain oops (i.e., move this check to the macro expansion phase).
3860     BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
3861     const TypeAryPtr* orig_t = _gvn.type(original)->isa_aryptr();
3862     ciKlass* klass = _gvn.type(klass_node)->is_klassptr()->klass();
3863     bool exclude_flat = UseFlatArray && bs->array_copy_requires_gc_barriers(true, T_OBJECT, false, false, BarrierSetC2::Parsing) &&
3864                         // Can src array be flat and contain oops?
3865                         (orig_t == NULL || (!orig_t->is_not_flat() && (!orig_t->is_flat() || orig_t->elem()->inline_klass()->contains_oops()))) &&
3866                         // Can dest array be flat and contain oops?
3867                         klass->can_be_inline_array_klass() && (!klass->is_flat_array_klass() || klass->as_flat_array_klass()->element_klass()->as_inline_klass()->contains_oops());
3868     Node* not_objArray = exclude_flat ? generate_non_objArray_guard(klass_node, bailout) : generate_typeArray_guard(klass_node, bailout);
3869     if (not_objArray != NULL) {
3870       // Improve the klass node's type from the new optimistic assumption:
3871       ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
3872       const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, Type::Offset(0));
3873       Node* cast = new CastPPNode(klass_node, akls);
3874       cast->init_req(0, control());
3875       klass_node = _gvn.transform(cast);
3876     }
3877 
3878     Node* original_kls = load_object_klass(original);
3879     // ArrayCopyNode:Ideal may transform the ArrayCopyNode to
3880     // loads/stores but it is legal only if we're sure the
3881     // Arrays.copyOf would succeed. So we need all input arguments
3882     // to the copyOf to be validated, including that the copy to the
3883     // new array won't trigger an ArrayStoreException. That subtype
3884     // check can be optimized if we know something on the type of
3885     // the input array from type speculation.
3886     if (_gvn.type(klass_node)->singleton() && !stopped()) {
3887       ciKlass* subk   = _gvn.type(original_kls)->is_klassptr()->klass();
3888       ciKlass* superk = _gvn.type(klass_node)->is_klassptr()->klass();
3889 
3890       int test = C->static_subtype_check(superk, subk);
3891       if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
3892         const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();
3893         if (t_original->speculative_type() != NULL) {
3894           original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true);
3895           original_kls = load_object_klass(original);
3896         }
3897       }
3898     }
3899 
3900     // Bail out if either start or end is negative.
3901     generate_negative_guard(start, bailout, &start);
3902     generate_negative_guard(end,   bailout, &end);
3903 
3904     Node* length = end;
3905     if (_gvn.type(start) != TypeInt::ZERO) {
3906       length = _gvn.transform(new SubINode(end, start));
3907     }
3908 
3909     // Bail out if length is negative.
3910     // Without this the new_array would throw
3911     // NegativeArraySizeException but IllegalArgumentException is what
3912     // should be thrown
3913     generate_negative_guard(length, bailout, &length);
3914 
3915     // Handle inline type arrays
3916     bool can_validate = !too_many_traps(Deoptimization::Reason_class_check);
3917     if (!stopped()) {
3918       orig_t = _gvn.type(original)->isa_aryptr();
3919       if (orig_t != NULL && orig_t->is_flat()) {
3920         // Src is flat, check that dest is flat as well
3921         if (exclude_flat) {
3922           // Dest can't be flat, bail out
3923           bailout->add_req(control());
3924           set_control(top());
3925         } else {
3926           generate_non_flatArray_guard(klass_node, bailout);
3927         }
3928       } else if (UseFlatArray && (orig_t == NULL || !orig_t->is_not_flat()) &&
3929                  // If dest is flat, src must be flat as well (guaranteed by src <: dest check if validated).
3930                  ((!klass->is_flat_array_klass() && klass->can_be_inline_array_klass()) || !can_validate)) {
3931         // Src might be flat and dest might not be flat. Go to the slow path if src is flat.
3932         // TODO 8251971: Optimize for the case when src/dest are later found to be both flat.
3933         generate_flatArray_guard(original_kls, bailout);
3934         if (orig_t != NULL) {
3935           orig_t = orig_t->cast_to_not_flat();
3936           original = _gvn.transform(new CheckCastPPNode(control(), original, orig_t));
3937         }
3938       }
3939       if (!can_validate) {
3940         // No validation. The subtype check emitted at macro expansion time will not go to the slow
3941         // path but call checkcast_arraycopy which can not handle flat/null-free inline type arrays.
3942         // TODO 8251971: Optimize for the case when src/dest are later found to be both flat/null-free.
3943         generate_fair_guard(null_free_array_test(klass_node), bailout);
3944       }
3945     }
3946 
3947     if (bailout->req() > 1) {
3948       PreserveJVMState pjvms(this);
3949       set_control(_gvn.transform(bailout));
3950       uncommon_trap(Deoptimization::Reason_intrinsic,
3951                     Deoptimization::Action_maybe_recompile);
3952     }
3953 
3954     if (!stopped()) {
3955       // How many elements will we copy from the original?
3956       // The answer is MinI(orig_length - start, length).
3957       Node* orig_tail = _gvn.transform(new SubINode(orig_length, start));
3958       Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
3959 
3960       // Generate a direct call to the right arraycopy function(s).
3961       // We know the copy is disjoint but we might not know if the
3962       // oop stores need checking.
3963       // Extreme case:  Arrays.copyOf((Integer[])x, 10, String[].class).
3964       // This will fail a store-check if x contains any non-nulls.
3965 




















3966       bool validated = false;
3967       // Reason_class_check rather than Reason_intrinsic because we
3968       // want to intrinsify even if this traps.
3969       if (can_validate) {
3970         Node* not_subtype_ctrl = gen_subtype_check(original, klass_node);
3971 
3972         if (not_subtype_ctrl != top()) {
3973           PreserveJVMState pjvms(this);
3974           set_control(not_subtype_ctrl);
3975           uncommon_trap(Deoptimization::Reason_class_check,
3976                         Deoptimization::Action_make_not_entrant);
3977           assert(stopped(), "Should be stopped");
3978         }
3979         validated = true;
3980       }
3981 
3982       if (!stopped()) {
3983         newcopy = new_array(klass_node, length, 0);  // no arguments to push
3984 
3985         ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true, false,
3986                                                 original_kls, klass_node);
3987         if (!is_copyOfRange) {
3988           ac->set_copyof(validated);
3989         } else {
3990           ac->set_copyofrange(validated);
3991         }
3992         Node* n = _gvn.transform(ac);
3993         if (n == ac) {
3994           ac->connect_outputs(this);
3995         } else {
3996           assert(validated, "shouldn't transform if all arguments not validated");
3997           set_all_memory(n);
3998         }
3999       }
4000     }
4001   } // original reexecute is set back here
4002 
4003   C->set_has_split_ifs(true); // Has chance for split-if optimization
4004   if (!stopped()) {
4005     set_result(newcopy);
4006   }

4088   set_edges_for_java_call(slow_call);
4089   return slow_call;
4090 }
4091 
4092 
4093 /**
4094  * Build special case code for calls to hashCode on an object. This call may
4095  * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
4096  * slightly different code.
4097  */
4098 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
4099   assert(is_static == callee()->is_static(), "correct intrinsic selection");
4100   assert(!(is_virtual && is_static), "either virtual, special, or static");
4101 
4102   enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
4103 
4104   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4105   PhiNode*    result_val = new PhiNode(result_reg, TypeInt::INT);
4106   PhiNode*    result_io  = new PhiNode(result_reg, Type::ABIO);
4107   PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4108   Node* obj = argument(0);
4109 
4110   if (obj->is_InlineType() || gvn().type(obj)->is_inlinetypeptr()) {
4111     return false;
4112   }
4113 
4114   if (!is_static) {
4115     // Check for hashing null object
4116     obj = null_check_receiver();
4117     if (stopped())  return true;        // unconditionally null
4118     result_reg->init_req(_null_path, top());
4119     result_val->init_req(_null_path, top());
4120   } else {
4121     // Do a null check, and return zero if null.
4122     // System.identityHashCode(null) == 0

4123     Node* null_ctl = top();
4124     obj = null_check_oop(obj, &null_ctl);
4125     result_reg->init_req(_null_path, null_ctl);
4126     result_val->init_req(_null_path, _gvn.intcon(0));
4127   }
4128 
4129   // Unconditionally null?  Then return right away.
4130   if (stopped()) {
4131     set_control( result_reg->in(_null_path));
4132     if (!stopped())
4133       set_result(result_val->in(_null_path));
4134     return true;
4135   }
4136 
4137   // We only go to the fast case code if we pass a number of guards.  The
4138   // paths which do not pass are accumulated in the slow_region.
4139   RegionNode* slow_region = new RegionNode(1);
4140   record_for_igvn(slow_region);
4141 
4142   // If this is a virtual call, we generate a funny guard.  We pull out
4143   // the vtable entry corresponding to hashCode() from the target object.
4144   // If the target method which we are calling happens to be the native
4145   // Object hashCode() method, we pass the guard.  We do not need this
4146   // guard for non-virtual calls -- the caller is known to be the native
4147   // Object hashCode().
4148   if (is_virtual) {
4149     // After null check, get the object's klass.
4150     Node* obj_klass = load_object_klass(obj);
4151     generate_virtual_guard(obj_klass, slow_region);
4152   }
4153 
4154   // Get the header out of the object, use LoadMarkNode when available
4155   Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
4156   // The control of the load must be NULL. Otherwise, the load can move before
4157   // the null check after castPP removal.
4158   Node* no_ctrl = NULL;
4159   Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
4160 
4161   // Test the header to see if it is unlocked.
4162   // This also serves as guard against inline types
4163   Node *lock_mask      = _gvn.MakeConX(markWord::inline_type_mask_in_place);
4164   Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
4165   Node *unlocked_val   = _gvn.MakeConX(markWord::unlocked_value);
4166   Node *chk_unlocked   = _gvn.transform(new CmpXNode( lmasked_header, unlocked_val));
4167   Node *test_unlocked  = _gvn.transform(new BoolNode( chk_unlocked, BoolTest::ne));
4168 
4169   generate_slow_guard(test_unlocked, slow_region);
4170 
4171   // Get the hash value and check to see that it has been properly assigned.
4172   // We depend on hash_mask being at most 32 bits and avoid the use of
4173   // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
4174   // vm: see markWord.hpp.
4175   Node *hash_mask      = _gvn.intcon(markWord::hash_mask);
4176   Node *hash_shift     = _gvn.intcon(markWord::hash_shift);
4177   Node *hshifted_header= _gvn.transform(new URShiftXNode(header, hash_shift));
4178   // This hack lets the hash bits live anywhere in the mark object now, as long
4179   // as the shift drops the relevant bits into the low 32 bits.  Note that
4180   // Java spec says that HashCode is an int so there's no point in capturing
4181   // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build).
4182   hshifted_header      = ConvX2I(hshifted_header);
4183   Node *hash_val       = _gvn.transform(new AndINode(hshifted_header, hash_mask));

4209     // this->control() comes from set_results_for_java_call
4210     result_reg->init_req(_slow_path, control());
4211     result_val->init_req(_slow_path, slow_result);
4212     result_io  ->set_req(_slow_path, i_o());
4213     result_mem ->set_req(_slow_path, reset_memory());
4214   }
4215 
4216   // Return the combined state.
4217   set_i_o(        _gvn.transform(result_io)  );
4218   set_all_memory( _gvn.transform(result_mem));
4219 
4220   set_result(result_reg, result_val);
4221   return true;
4222 }
4223 
4224 //---------------------------inline_native_getClass----------------------------
4225 // public final native Class<?> java.lang.Object.getClass();
4226 //
4227 // Build special case code for calls to getClass on an object.
4228 bool LibraryCallKit::inline_native_getClass() {
4229   Node* obj = argument(0);
4230   if (obj->is_InlineTypeBase()) {
4231     const Type* t = _gvn.type(obj);
4232     if (t->maybe_null()) {
4233       null_check(obj);
4234     }
4235     set_result(makecon(TypeInstPtr::make(t->inline_klass()->java_mirror())));
4236     return true;
4237   }
4238   obj = null_check_receiver();
4239   if (stopped())  return true;
4240   set_result(load_mirror_from_klass(load_object_klass(obj)));
4241   return true;
4242 }
4243 
4244 //-----------------inline_native_Reflection_getCallerClass---------------------
4245 // public static native Class<?> sun.reflect.Reflection.getCallerClass();
4246 //
4247 // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
4248 //
4249 // NOTE: This code must perform the same logic as JVM_GetCallerClass
4250 // in that it must skip particular security frames and checks for
4251 // caller sensitive methods.
4252 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
4253 #ifndef PRODUCT
4254   if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4255     tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
4256   }
4257 #endif
4258 

4556 //  not cloneable or finalizer => slow path to out-of-line Object.clone
4557 //
4558 // The general case has two steps, allocation and copying.
4559 // Allocation has two cases, and uses GraphKit::new_instance or new_array.
4560 //
4561 // Copying also has two cases, oop arrays and everything else.
4562 // Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
4563 // Everything else uses the tight inline loop supplied by CopyArrayNode.
4564 //
4565 // These steps fold up nicely if and when the cloned object's klass
4566 // can be sharply typed as an object array, a type array, or an instance.
4567 //
4568 bool LibraryCallKit::inline_native_clone(bool is_virtual) {
4569   PhiNode* result_val;
4570 
4571   // Set the reexecute bit for the interpreter to reexecute
4572   // the bytecode that invokes Object.clone if deoptimization happens.
4573   { PreserveReexecuteState preexecs(this);
4574     jvms()->set_should_reexecute(true);
4575 
4576     Node* obj = argument(0);
4577     if (obj->is_InlineType()) {
4578       return false;
4579     }
4580 
4581     obj = null_check_receiver();
4582     if (stopped())  return true;
4583 
4584     const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
4585 
4586     // If we are going to clone an instance, we need its exact type to
4587     // know the number and types of fields to convert the clone to
4588     // loads/stores. Maybe a speculative type can help us.
4589     if (!obj_type->klass_is_exact() &&
4590         obj_type->speculative_type() != NULL &&
4591         obj_type->speculative_type()->is_instance_klass() &&
4592         !obj_type->speculative_type()->is_inlinetype()) {
4593       ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass();
4594       if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem &&
4595           !spec_ik->has_injected_fields()) {
4596         ciKlass* k = obj_type->klass();
4597         if (!k->is_instance_klass() ||
4598             k->as_instance_klass()->is_interface() ||
4599             k->as_instance_klass()->has_subklass()) {
4600           obj = maybe_cast_profiled_obj(obj, obj_type->speculative_type(), false);
4601         }
4602       }
4603     }
4604 
4605     // Conservatively insert a memory barrier on all memory slices.
4606     // Do not let writes into the original float below the clone.
4607     insert_mem_bar(Op_MemBarCPUOrder);
4608 
4609     // paths into result_reg:
4610     enum {
4611       _slow_path = 1,     // out-of-line call to clone method (virtual or not)
4612       _objArray_path,     // plain array allocation, plus arrayof_oop_arraycopy
4613       _array_path,        // plain array allocation, plus arrayof_long_arraycopy
4614       _instance_path,     // plain instance allocation, plus arrayof_long_arraycopy
4615       PATH_LIMIT
4616     };
4617     RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4618     result_val             = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
4619     PhiNode*    result_i_o = new PhiNode(result_reg, Type::ABIO);
4620     PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4621     record_for_igvn(result_reg);
4622 
4623     Node* obj_klass = load_object_klass(obj);
4624     // We only go to the fast case code if we pass a number of guards.
4625     // The paths which do not pass are accumulated in the slow_region.
4626     RegionNode* slow_region = new RegionNode(1);
4627     record_for_igvn(slow_region);
4628 
4629     Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL);
4630     if (array_ctl != NULL) {
4631       // It's an array.
4632       PreserveJVMState pjvms(this);
4633       set_control(array_ctl);



4634 
4635       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
4636       const TypeAryPtr* ary_ptr = obj_type->isa_aryptr();
4637       if (UseFlatArray && bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Expansion) &&
4638           obj_type->klass()->can_be_inline_array_klass() &&
4639           (ary_ptr == NULL || (!ary_ptr->is_not_flat() && (!ary_ptr->is_flat() || ary_ptr->elem()->inline_klass()->contains_oops())))) {
4640         // Flattened inline type array may have object field that would require a
4641         // write barrier. Conservatively, go to slow path.
4642         generate_flatArray_guard(obj_klass, slow_region);













4643       }







4644 
4645       if (!stopped()) {
4646         Node* obj_length = load_array_length(obj);
4647         Node* obj_size  = NULL;
4648         Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size, /*deoptimize_on_exception=*/true);
4649 
4650         BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
4651         if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Parsing)) {
4652           // If it is an oop array, it requires very special treatment,
4653           // because gc barriers are required when accessing the array.
4654           Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL);
4655           if (is_obja != NULL) {
4656             PreserveJVMState pjvms2(this);
4657             set_control(is_obja);
4658             // Generate a direct call to the right arraycopy function(s).
4659             // Clones are always tightly coupled.
4660             ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, true, false);
4661             ac->set_clone_oop_array();
4662             Node* n = _gvn.transform(ac);
4663             assert(n == ac, "cannot disappear");
4664             ac->connect_outputs(this, /*deoptimize_on_exception=*/true);
4665 
4666             result_reg->init_req(_objArray_path, control());
4667             result_val->init_req(_objArray_path, alloc_obj);
4668             result_i_o ->set_req(_objArray_path, i_o());
4669             result_mem ->set_req(_objArray_path, reset_memory());
4670           }
4671         }
4672         // Otherwise, there are no barriers to worry about.
4673         // (We can dispense with card marks if we know the allocation
4674         //  comes out of eden (TLAB)...  In fact, ReduceInitialCardMarks
4675         //  causes the non-eden paths to take compensating steps to
4676         //  simulate a fresh allocation, so that no further
4677         //  card marks are required in compiled code to initialize
4678         //  the object.)
4679 
4680         if (!stopped()) {
4681           copy_to_clone(obj, alloc_obj, obj_size, true);
4682 
4683           // Present the results of the copy.
4684           result_reg->init_req(_array_path, control());
4685           result_val->init_req(_array_path, alloc_obj);
4686           result_i_o ->set_req(_array_path, i_o());
4687           result_mem ->set_req(_array_path, reset_memory());
4688         }
4689       }
4690     }
4691 




4692     if (!stopped()) {
4693       // It's an instance (we did array above).  Make the slow-path tests.
4694       // If this is a virtual call, we generate a funny guard.  We grab
4695       // the vtable entry corresponding to clone() from the target object.
4696       // If the target method which we are calling happens to be the
4697       // Object clone() method, we pass the guard.  We do not need this
4698       // guard for non-virtual calls; the caller is known to be the native
4699       // Object clone().
4700       if (is_virtual) {
4701         generate_virtual_guard(obj_klass, slow_region);
4702       }
4703 
4704       // The object must be easily cloneable and must not have a finalizer.
4705       // Both of these conditions may be checked in a single test.
4706       // We could optimize the test further, but we don't care.
4707       generate_access_flags_guard(obj_klass,
4708                                   // Test both conditions:
4709                                   JVM_ACC_IS_CLONEABLE_FAST | JVM_ACC_HAS_FINALIZER,
4710                                   // Must be cloneable but not finalizer:
4711                                   JVM_ACC_IS_CLONEABLE_FAST,

4832 // array in the heap that GCs wouldn't expect. Move the allocation
4833 // after the traps so we don't allocate the array if we
4834 // deoptimize. This is possible because tightly_coupled_allocation()
4835 // guarantees there's no observer of the allocated array at this point
4836 // and the control flow is simple enough.
4837 void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms,
4838                                                     int saved_reexecute_sp, uint new_idx) {
4839   if (saved_jvms != NULL && !stopped()) {
4840     assert(alloc != NULL, "only with a tightly coupled allocation");
4841     // restore JVM state to the state at the arraycopy
4842     saved_jvms->map()->set_control(map()->control());
4843     assert(saved_jvms->map()->memory() == map()->memory(), "memory state changed?");
4844     assert(saved_jvms->map()->i_o() == map()->i_o(), "IO state changed?");
4845     // If we've improved the types of some nodes (null check) while
4846     // emitting the guards, propagate them to the current state
4847     map()->replaced_nodes().apply(saved_jvms->map(), new_idx);
4848     set_jvms(saved_jvms);
4849     _reexecute_sp = saved_reexecute_sp;
4850 
4851     // Remove the allocation from above the guards
4852     CallProjections* callprojs = alloc->extract_projections(true);

4853     InitializeNode* init = alloc->initialization();
4854     Node* alloc_mem = alloc->in(TypeFunc::Memory);
4855     C->gvn_replace_by(callprojs->fallthrough_ioproj, alloc->in(TypeFunc::I_O));
4856     C->gvn_replace_by(init->proj_out(TypeFunc::Memory), alloc_mem);
4857 
4858     // The CastIINode created in GraphKit::new_array (in AllocateArrayNode::make_ideal_length) must stay below
4859     // the allocation (i.e. is only valid if the allocation succeeds):
4860     // 1) replace CastIINode with AllocateArrayNode's length here
4861     // 2) Create CastIINode again once allocation has moved (see below) at the end of this method
4862     //
4863     // Multiple identical CastIINodes might exist here. Each GraphKit::load_array_length() call will generate
4864     // new separate CastIINode (arraycopy guard checks or any array length use between array allocation and ararycopy)
4865     Node* init_control = init->proj_out(TypeFunc::Control);
4866     Node* alloc_length = alloc->Ideal_length();
4867 #ifdef ASSERT
4868     Node* prev_cast = NULL;
4869 #endif
4870     for (uint i = 0; i < init_control->outcnt(); i++) {
4871       Node* init_out = init_control->raw_out(i);
4872       if (init_out->is_CastII() && init_out->in(TypeFunc::Control) == init_control && init_out->in(1) == alloc_length) {
4873 #ifdef ASSERT
4874         if (prev_cast == NULL) {
4875           prev_cast = init_out;

4877           if (prev_cast->cmp(*init_out) == false) {
4878             prev_cast->dump();
4879             init_out->dump();
4880             assert(false, "not equal CastIINode");
4881           }
4882         }
4883 #endif
4884         C->gvn_replace_by(init_out, alloc_length);
4885       }
4886     }
4887     C->gvn_replace_by(init->proj_out(TypeFunc::Control), alloc->in(0));
4888 
4889     // move the allocation here (after the guards)
4890     _gvn.hash_delete(alloc);
4891     alloc->set_req(TypeFunc::Control, control());
4892     alloc->set_req(TypeFunc::I_O, i_o());
4893     Node *mem = reset_memory();
4894     set_all_memory(mem);
4895     alloc->set_req(TypeFunc::Memory, mem);
4896     set_control(init->proj_out_or_null(TypeFunc::Control));
4897     set_i_o(callprojs->fallthrough_ioproj);
4898 
4899     // Update memory as done in GraphKit::set_output_for_allocation()
4900     const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
4901     const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
4902     if (ary_type->isa_aryptr() && length_type != NULL) {
4903       ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
4904     }
4905     const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot);
4906     int            elemidx  = C->get_alias_index(telemref);
4907     set_memory(init->proj_out_or_null(TypeFunc::Memory), Compile::AliasIdxRaw);
4908     set_memory(init->proj_out_or_null(TypeFunc::Memory), elemidx);
4909 
4910     Node* allocx = _gvn.transform(alloc);
4911     assert(allocx == alloc, "where has the allocation gone?");
4912     assert(dest->is_CheckCastPP(), "not an allocation result?");
4913 
4914     _gvn.hash_delete(dest);
4915     dest->set_req(0, control());
4916     Node* destx = _gvn.transform(dest);
4917     assert(destx == dest, "where has the allocation result gone?");

5053       // Do we have the exact type of dest?
5054       bool could_have_dest = dest_spec;
5055       ciKlass* src_k = top_src->klass();
5056       ciKlass* dest_k = top_dest->klass();
5057       if (!src_spec) {
5058         src_k = src_type->speculative_type_not_null();
5059         if (src_k != NULL && src_k->is_array_klass()) {
5060           could_have_src = true;
5061         }
5062       }
5063       if (!dest_spec) {
5064         dest_k = dest_type->speculative_type_not_null();
5065         if (dest_k != NULL && dest_k->is_array_klass()) {
5066           could_have_dest = true;
5067         }
5068       }
5069       if (could_have_src && could_have_dest) {
5070         // If we can have both exact types, emit the missing guards
5071         if (could_have_src && !src_spec) {
5072           src = maybe_cast_profiled_obj(src, src_k, true);
5073           src_type = _gvn.type(src);
5074           top_src = src_type->isa_aryptr();
5075         }
5076         if (could_have_dest && !dest_spec) {
5077           dest = maybe_cast_profiled_obj(dest, dest_k, true);
5078           dest_type = _gvn.type(dest);
5079           top_dest = dest_type->isa_aryptr();
5080         }
5081       }
5082     }
5083   }
5084 
5085   ciMethod* trap_method = method();
5086   int trap_bci = bci();
5087   if (saved_jvms != NULL) {
5088     trap_method = alloc->jvms()->method();
5089     trap_bci = alloc->jvms()->bci();
5090   }
5091 
5092   bool negative_length_guard_generated = false;
5093 
5094   if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
5095       can_emit_guards && !src->is_top() && !dest->is_top()) {

5096     // validate arguments: enables transformation the ArrayCopyNode
5097     validated = true;
5098 
5099     RegionNode* slow_region = new RegionNode(1);
5100     record_for_igvn(slow_region);
5101 
5102     // (1) src and dest are arrays.
5103     generate_non_array_guard(load_object_klass(src), slow_region);
5104     generate_non_array_guard(load_object_klass(dest), slow_region);
5105 
5106     // (2) src and dest arrays must have elements of the same BasicType
5107     // done at macro expansion or at Ideal transformation time
5108 
5109     // (4) src_offset must not be negative.
5110     generate_negative_guard(src_offset, slow_region);
5111 
5112     // (5) dest_offset must not be negative.
5113     generate_negative_guard(dest_offset, slow_region);
5114 
5115     // (7) src_offset + length must not exceed length of src.

5118                          slow_region);
5119 
5120     // (8) dest_offset + length must not exceed length of dest.
5121     generate_limit_guard(dest_offset, length,
5122                          load_array_length(dest),
5123                          slow_region);
5124 
5125     // (6) length must not be negative.
5126     // This is also checked in generate_arraycopy() during macro expansion, but
5127     // we also have to check it here for the case where the ArrayCopyNode will
5128     // be eliminated by Escape Analysis.
5129     if (EliminateAllocations) {
5130       generate_negative_guard(length, slow_region);
5131       negative_length_guard_generated = true;
5132     }
5133 
5134     // (9) each element of an oop array must be assignable
5135     Node* dest_klass = load_object_klass(dest);
5136     if (src != dest) {
5137       Node* not_subtype_ctrl = gen_subtype_check(src, dest_klass);
5138       slow_region->add_req(not_subtype_ctrl);
5139     }
5140 
5141     const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr();
5142     const Type* toop = TypeOopPtr::make_from_klass(dest_klass_t->klass());
5143     src = _gvn.transform(new CheckCastPPNode(control(), src, toop));
5144     src_type = _gvn.type(src);
5145     top_src  = src_type->isa_aryptr();
5146 
5147     // Handle flat inline type arrays (null-free arrays are handled by the subtype check above)
5148     if (!stopped() && UseFlatArray) {
5149       // If dest is flat, src must be flat as well (guaranteed by src <: dest check). Handle flat src here.
5150       assert(top_dest == NULL || !top_dest->is_flat() || top_src->is_flat(), "src array must be flat");
5151       if (top_src != NULL && top_src->is_flat()) {
5152         // Src is flat, check that dest is flat as well
5153         if (top_dest != NULL && !top_dest->is_flat()) {
5154           generate_non_flatArray_guard(dest_klass, slow_region);
5155           // Since dest is flat and src <: dest, dest must have the same type as src.
5156           top_dest = TypeOopPtr::make_from_klass(top_src->klass())->isa_aryptr();
5157           assert(top_dest->is_flat(), "dest must be flat");
5158           dest = _gvn.transform(new CheckCastPPNode(control(), dest, top_dest));
5159         }
5160       } else if (top_src == NULL || !top_src->is_not_flat()) {
5161         // Src might be flat and dest might not be flat. Go to the slow path if src is flat.
5162         // TODO 8251971: Optimize for the case when src/dest are later found to be both flat.
5163         assert(top_dest == NULL || !top_dest->is_flat(), "dest array must not be flat");
5164         generate_flatArray_guard(load_object_klass(src), slow_region);
5165         if (top_src != NULL) {
5166           top_src = top_src->cast_to_not_flat();
5167           src = _gvn.transform(new CheckCastPPNode(control(), src, top_src));
5168         }
5169       }
5170     }
5171 
5172     {
5173       PreserveJVMState pjvms(this);
5174       set_control(_gvn.transform(slow_region));
5175       uncommon_trap(Deoptimization::Reason_intrinsic,
5176                     Deoptimization::Action_make_not_entrant);
5177       assert(stopped(), "Should be stopped");
5178     }




5179   }
5180 
5181   arraycopy_move_allocation_here(alloc, dest, saved_jvms, saved_reexecute_sp, new_idx);
5182 
5183   if (stopped()) {
5184     return true;
5185   }
5186 
5187   ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != NULL, negative_length_guard_generated,
5188                                           // Create LoadRange and LoadKlass nodes for use during macro expansion here
5189                                           // so the compiler has a chance to eliminate them: during macro expansion,
5190                                           // we have to set their control (CastPP nodes are eliminated).
5191                                           load_object_klass(src), load_object_klass(dest),
5192                                           load_array_length(src), load_array_length(dest));
5193 
5194   ac->set_arraycopy(validated);
5195 
5196   Node* n = _gvn.transform(ac);
5197   if (n == ac) {
5198     ac->connect_outputs(this);
< prev index next >